code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=[]
# !pip install sentence_transformers
# !pip install samplesizelib
# -
import warnings
warnings.filterwarnings("ignore")
# +
import matplotlib.pyplot as plt
from matplotlib.image import imread
from mpl_toolkits import mplot3d
from matplotlib import gridspec
from mlxtend.plotting import plot_decision_regions
import seaborn as sns
import pandas as pd
from tqdm.notebook import tqdm
import time
import os
from scipy.special import softmax
from scipy.spatial.distance import cdist
import numpy as np
import torch
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import KFold, ParameterGrid
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer
from samplesizelib.linear.statistical import LikelihoodRatioEstimator
from samplesizelib.linear.models import RegressionModel
from torchvision import datasets
from torchvision import transforms
# + [markdown] id="295SPV-50n1N"
# # Homework
# **Task** : Использовать модель для векторизации предложений из семинара. На основе полученных векторов решить задачу сентимент анализа для выборки Twitter (задача бинарной классификации). В качестве модели рассмотреть логистическую регрессию. Рекомендуется использовать модель Perceptron с третьего семинара, а также функцию ошибки torch.nn.BCELoss.
#
# # Данные
#
# + colab={"base_uri": "https://localhost:8080/", "height": 238} id="jDzLuQrC0ujY" outputId="e9dfd81d-a838-449d-b0eb-37bfbc34bd11"
# data = pd.read_csv("data/Sentiment Analysis Dataset.csv", error_bad_lines=False)
# data.head(5)
# + colab={"base_uri": "https://localhost:8080/"} id="-E5xxgKG1Dvr" outputId="cbde4c10-ec66-4320-c4b4-58dc6a7dbd69"
# x, y = data.SentimentText, data.Sentiment # for now we will take only 1% of original dataset due to computation difficulties
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# x_train.shape, x_test.shape
# + id="GqqFc4x921cP"
# Sentences to vectors
# vectorizer = SentenceTransformer('LaBSE', device='cuda', cache_folder='vectorizer_params/')
# X_train = vectorizer.encode(x_train.to_list())
# X_test = vectorizer.encode(x_test.to_list())
# Y_train = y_train.to_numpy().astype(np.byte)
# Y_test = y_test.to_numpy().astype(np.byte)
# # save to files
# np.save(file='data/train_vectors_full', arr=X_train.astype(np.float16))
# np.save(file='data/test_vectors_full', arr=X_test.astype(np.float16))
# np.save(file='data/target_train_full', arr=Y_train)
# np.save(file='data/target_test_full', arr=Y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="i6Eu8-Rb-lI_" outputId="4ba4307e-5585-44a4-de62-d12833f886e8"
X_train = np.load("data/train_vectors_full.npy")
X_test = np.load("data/test_vectors_full.npy")
Y_train = np.load('data/target_train_full.npy')
Y_test = np.load('data/target_test_full.npy')
X_train.shape, X_test.shape, Y_train.shape, Y_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="_kCp9oqpHIQ7" outputId="b68a45ca-dae8-47e6-89b8-5aab76e9c9ac"
Y_train.dtype, Y_test.dtype
# + [markdown] id="1IlREddp-wQt" tags=[]
# По-хорошему мы должны были бы использовать K-Fold, чтобы получить объективную оценку качества и отобрать гиперпараметры, а затем измериить качество работы на тесте. Однако, мы имеем дело с достаточно большим датасетом (1.2 М train и 0.3 M тест). K-Fold на выборке такого размера даже при небольшой архитектуре займет много времени. Поэтому мы будем использовать обычный train/val split. В силу размера выборки, оценка качества и при таком подходе должна быть достаточно объективной.
# +
x_train_fold, x_val_fold, y_train_fold, y_val_fold = train_test_split(X_train, Y_train, test_size=0.1)
x_train_fold = torch.Tensor(x_train_fold)
x_val_fold = torch.Tensor(x_val_fold)
y_train_fold = torch.FloatTensor(y_train_fold)
y_val_fold = torch.FloatTensor(y_val_fold)
traindata = torch.utils.data.TensorDataset(x_train_fold, y_train_fold)
valdata = torch.utils.data.TensorDataset(x_val_fold, y_val_fold)
test_dataset = torch.utils.data.TensorDataset(torch.Tensor(X_test), torch.FloatTensor(Y_test))
x_train_fold.shape, x_val_fold.shape
# + [markdown] id="3ys3_Wwi3iEs"
# ## Model side
#
# В качестве модели рассмотрим перцептрон из семинара 3.
# + id="W81q8QDtFk5t"
INPUT_DIM = 768
# + colab={"base_uri": "https://localhost:8080/"} id="CDDsbADxFPMG" outputId="0602b7cc-6ace-4212-974f-d3a4f4e6dd63"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = 'cpu'
# + id="Bo59vWmh3i3A"
class Perceptron(torch.nn.Module):
@property
def device(self):
for p in self.parameters():
return p.device
def __init__(self, input_dim=INPUT_DIM, num_layers=0,
hidden_dim=100, output_dim=1, p=0.0):
super(Perceptron, self).__init__()
self.layers = torch.nn.Sequential()
prev_size = input_dim
for i in range(num_layers):
self.layers.add_module('layer{}'.format(i),
torch.nn.Linear(prev_size, hidden_dim))
self.layers.add_module('relu{}'.format(i), torch.nn.ReLU())
self.layers.add_module('dropout{}'.format(i), torch.nn.Dropout(p=p))
prev_size = hidden_dim
self.layers.add_module('classifier',
torch.nn.Linear(prev_size, output_dim))
def forward(self, input):
return self.layers(input)
# + id="zWb7CrfYNYkt"
from IPython.display import clear_output
def testing_binary(model, dataset):
generator = torch.utils.data.DataLoader(dataset, batch_size=51200)
pred = []
real = []
for x, y in generator:
x = x.to(device)
pred.extend((model(x).squeeze() >= 0).cpu().numpy().tolist())
real.extend(y.cpu().numpy().tolist())
return np.mean(np.array(real) == np.array(pred)), \
classification_report(real, pred)
def trainer(model, dataset, loss_function, optimizer, epochs, val_data=None):
loss_history = []
acc_history = []
for epoch in tqdm(range(epochs), leave=False):
generator = torch.utils.data.DataLoader(dataset, batch_size=51200,
shuffle=True)
model.train()
for x, y in generator:
optimizer.zero_grad()
x = x.to(device)
y = y.to(device)
output = model(x)
loss = loss_function(output.squeeze(), y)
loss.backward()
optimizer.step()
loss_history.append(loss.data.cpu().numpy())
model.eval()
acc, _ = testing_binary(model, val_data)
acc_history.append(acc)
clear_output(True)
# print intermediate results
plt.subplots(1, 2)
plt.subplot(1, 2, 1)
plt.plot(loss_history)
plt.subplot(1, 2, 2)
plt.plot(acc_history)
time.sleep(0.1)
plt.pause(0.0001)
# -
# В силу опять же большой выборки, неплохо иметь априорное представление о том, что мы получим на выходе перед hyperparameter tuning. Для этого мы воспользуемся baseline моделью, гиперпараметры которой выбраны умозрительно.
baseline_model = Perceptron(hidden_dim=200, num_layers=5, p=0.1).to(device)
optimizer = torch.optim.Adam(baseline_model.parameters(), lr=1e-4)
# + tags=[]
baseline_model.train()
trainer(baseline_model,
traindata,
torch.nn.BCEWithLogitsLoss(),
optimizer,
epochs=1000,
val_data=valdata)
# +
acc, report = testing_binary(baseline_model, test_dataset)
# -
acc
# save model weights
baseline_model.eval()
torch.save(baseline_model.state_dict(), "models/baseline.pth")
# **Замечание:** видно, что качество на тесте хорошо совпадает с качеством на валидационной выборке. Таким образом, предположение о достаточности простой валидации обосновано.
#
# Из графиков выше и полученного качества можно сделать следующие выводы:
# - Делать сеть глубже или шире смысла нет, так как несмотря на объем выборки, она все равно переобучается.
# - Количество эпох не имеет смысла делать больше 200, далее модель начинает переобучаться.
# - lr подобран оптимально, модель и не учится слишком быстро, и не слишком медленно.
# - dropout можно увеличить.
#
# ### Grid search
# Делать глубокий grid search смысла нет в силу большого времени. Поэтому мы немного поэкспериментируем с шириной сети и параметром **p** у Dropout, а остальные параметры (кроме количества эпох) оставим теми же.
param_grid = ParameterGrid(
{'hid_dim': [200, 250],
'p': [0.3, 0.6]})
for item in tqdm(param_grid):
model = Perceptron(input_dim=INPUT_DIM, hidden_dim=item['hid_dim'], num_layers=5, p=item['p']).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
trainer(model, traindata, torch.nn.BCEWithLogitsLoss(), optimizer, epochs=200, val_data=valdata)
torch.save(model.state_dict(), f"models/perc_hid_dim_{item['hid_dim']}_p_{item['p']}")
# Теперь оценим качество каждой из имеющихся моделей.
path = "models/"
# model_names = os.listdir(path)
model_names = ['baseline.pth',
'perc_hid_dim_200_p_0.3.pth',
'perc_hid_dim_200_p_0.6.pth',
'perc_hid_dim_250_p_0.3.pth',
'perc_hid_dim_250_p_0.6.pth']
# +
accs = {}
for name in model_names:
if '250' in name:
model = Perceptron(input_dim=INPUT_DIM, hidden_dim=250, num_layers=5).to(device)
else:
model = Perceptron(input_dim=INPUT_DIM, hidden_dim=200, num_layers=5).to(device)
model.load_state_dict(torch.load(path + name))
model.eval()
acc, _ = testing_binary(model, valdata)
accs[name] = acc
# -
accs
# +
best_model = Perceptron(input_dim=INPUT_DIM, hidden_dim=250, num_layers=5).to(device)
best_model.load_state_dict(torch.load('models/perc_hid_dim_250_p_0.3.pth'))
best_model.eval()
acc, report = testing_binary(best_model, test_dataset)
print(f"best_accuracy = {acc}\n", report)
# -
# Как видно, лучшая модель превзошла baseline на почти на 2%, что очень даже неплохо. Лучший результат на валидационной выборке показала модель с **p=0.3** и **hid_dim=250**, то есть модель с лучшей обобщающей способностью (и наиболее склонная к overfitting'у).
#
# Стоит отметить, что все-таки результат на тесте и валидации немного отличаются -- мы "подогнали" модели под валидационную выборку. Однако отличие не очень большое, так что в целом алгоритм можно считать валидным.
#
# **P.S.** не могу сказать, с чем связано столь высокое качество baseline на валидации (0.88). Скорее всего, это связано с перезапуском процесса обучения и изменением разбиения на train/val. Однако разбиение Train/Test было постоянным, и конечный результат на тесте можно считать валидным.
| sem7/homework_7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.3.7
# language: julia
# name: julia 0.3
# ---
using TikzGraphs
using TikzPictures
using Graphs
# +
g = simple_graph(13)
add_edge!(g, 1, 3)
add_edge!(g, 1, 4)
add_edge!(g, 1, 5)
add_edge!(g, 1, 6)
add_edge!(g, 1, 7)
add_edge!(g, 1, 8)
add_edge!(g, 3, 2)
add_edge!(g, 4, 2)
add_edge!(g, 5, 2)
add_edge!(g, 6, 2)
add_edge!(g, 7, 2)
add_edge!(g, 8, 2)
add_edge!(g, 3, 9)
add_edge!(g, 4, 9)
add_edge!(g, 5, 9)
add_edge!(g, 6, 9)
add_edge!(g, 7, 9)
add_edge!(g, 8, 9)
add_edge!(g, 3, 10)
add_edge!(g, 4, 10)
add_edge!(g, 5, 10)
add_edge!(g, 6, 10)
add_edge!(g, 7, 10)
add_edge!(g, 8, 10)
add_edge!(g, 3, 11)
add_edge!(g, 4, 11)
add_edge!(g, 5, 11)
add_edge!(g, 6, 11)
add_edge!(g, 7, 11)
add_edge!(g, 8, 11)
add_edge!(g, 3, 12)
add_edge!(g, 4, 12)
add_edge!(g, 5, 12)
add_edge!(g, 6, 12)
add_edge!(g, 7, 12)
add_edge!(g, 8, 12)
add_edge!(g, 3, 13)
add_edge!(g, 4, 13)
add_edge!(g, 5, 13)
add_edge!(g, 6, 13)
add_edge!(g, 7, 13)
add_edge!(g, 8, 13)
TikzGraphs.plot(g)
# -
randperm(5)
# +
function create_experiment(nr_of_states::Int64)
v = [1:nr_of_states]
end
create_experiment(10)
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from modelsDefault import defaultModelsRun
from models import modelsRun
from sklearn import preprocessing
# +
DATA='data/mamografias.csv'
# Función para leer los datos
def readData(data_file):
return pd.read_csv(data_file,sep=',', na_values='?')
data = readData(DATA) # Lectura de los datos
# -
data.head() # Comprobamos el formato de los datos, 6 columnas siendo la última la variable a predecir
data.rename(columns = {'BI-RADS':'BiRads'}, inplace = True) # Para poder referirnos a esta columna como data.BiRads
data.head()
data.shape[0] # 961 instancias
# Comprobamos que las clases están balanceadas: 46% frente a 54% aprox.
print(data[data['Severity']=='maligno'].shape[0])
print(data[data['Severity']=='benigno'].shape[0])
data.isna().sum() # Hay algunos valores perdidos
data.BiRads.replace(0,pd.NA,inplace=True) # BiRads 0 significa radiografía insuficiente
data.Shape.replace('N',pd.NA,inplace=True) # Lo mismo pasa con Shape N
data.isna().sum()
data=data.dropna() # Eliminamos las instancias con valores perdidos
data.shape[0] # Nos quedamos con 825 instancias
print(data[data['Severity']=='maligno'].shape[0]) # Las clases siguen balanceadas: 48'5% frente a 51'5%
print(data[data['Severity']=='benigno'].shape[0])
# Sklearn necesita datos numéricos (aunque sean nominales)
le = preprocessing.LabelEncoder()
data.Shape = le.fit_transform(data.Shape)
data.Severity = le.fit_transform(data.Severity)
print(le.inverse_transform([0,1])) # Consideraremos maligno como la clase positiva
data.head()
dataArray=np.array(data)
dataArray
# Separamos en datos y target (label)
x=dataArray[:,:-1]
y=dataArray[:,-1]
defaultModelsRun(x,y,'dropna')
modelsRun(x,y,'dropna')
| practica1/mamografias_dropna.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from math import log10, log
# %matplotlib inline
# ## Q1
N = 1000
def f(order, x = None):
b = 13
pi = np.pi
if x is None:
x = np.repeat(np.linspace(0,3.3, N)[:, np.newaxis], order, axis=1).T
else:
x = np.repeat(x[:, np.newaxis], order, axis=1).T
a = 0.5
n = np.repeat(np.arange(0,order)[:, np.newaxis],N , axis=1)
return ((a**n)*np.cos((b**n)*pi*x)).sum(axis=0)
# Analytical integral
def integral_f(order, l, u):
b = 13
a = 0.5
pi = np.pi
n = np.arange(0,order)
I_l = ((a**n)*np.sin((b**n)*pi*l)/((b**n)*pi)).sum()
I_u = ((a**n)*np.sin((b**n)*pi*u)/((b**n)*pi)).sum()
return I_u - I_l
avg_error = []
for i in range(5,20):
avg_error.append(((f(i)-f(i+1))**2).sum()/N)
plt.plot(list(range(5,20)), list(map(log10,avg_error)))
plt.ylabel("log(epsilon)")
plt.xlabel("n")
plt.figure(figsize=(15,8))
plt.plot(np.linspace(0,3.3, N), f(15))
samples = []
for j in range(30000):
samples.append((f(15, np.random.rand(N)*3.3).sum())*3.3/N)
# Computed value
sum(samples)/len(samples)
# Exact value
integral_f(15, 0, 3.3)
# ## Q2
sample_pi = []
for k in range(5000):
M = 100000
centre = np.random.ranf(M)*2
theta = np.random.ranf(M)*np.pi/2
m = (centre-(1/2)*np.cos(theta) <= 0) | (centre+(1/2)*np.cos(theta) >= 2)
sample_pi.append(M/m.sum())
# Computed value
sum(sample_pi)/len(sample_pi)
# Exact value
np.pi
| Assignment7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import glob
import math
import scicm
import numpy as np
from viscm import viscm
import matplotlib.cm as cm
import matplotlib.pyplot as plot
import matplotlib.gridspec as gs
# %matplotlib inline
# -
cmaps_monochromat=['Blue','Cyan','Green','Magenta','Orange','Purple','Red','Stone','Teal','Yellow']
cmaps_soft=[f.split('/')[-1].replace('.txt','') for f in sorted(glob.glob('../scicm/cm_data/Soft*txt'))]
cmaps_bichromat=[f.split('/')[-1].replace('.txt','') for f in sorted(glob.glob('../scicm/cm_data/[A-Z]2[A-Z].txt'))]
cmaps_bichromat_grey=[f.split('/')[-1].replace('.txt','') for f in sorted(glob.glob('../scicm/cm_data/[A-Z]grey[A-Z].txt'))]
cmaps_diverging_k=[f.split('/')[-1].replace('.txt','') for f in sorted(glob.glob('../scicm/cm_data/[A-Z]k[A-Z].txt'))]
cmaps_diverging_w=[f.split('/')[-1].replace('.txt','') for f in sorted(glob.glob('../scicm/cm_data/[A-Z]w[A-Z].txt'))]
cmaps_iso=[f.split('/')[-1].replace('.txt','') for f in sorted(glob.glob('../scicm/cm_data/iso*.txt'))]
cmaps_segmented=[f.split('/')[-1].replace('.txt','') for f in sorted(glob.glob('../scicm/cm_data/*2080.txt'))]+['Quartile']
cmaps_miscellaneous=['Day','Night','Garnet','Ripe','Tropical']
cmaps_all=cmaps_monochromat+cmaps_soft+cmaps_bichromat+cmaps_bichromat_grey+cmaps_diverging_k+cmaps_diverging_w+cmaps_segmented+cmaps_iso+cmaps_miscellaneous
gradient=np.linspace(0,1,256)
gradient=np.vstack((gradient,gradient))
# # Colour map samples
# +
fig=plot.figure(figsize=(12,(len(cmaps_monochromat)+1)*0.7),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_monochromat),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_monochromat))]
axes[0].set_title('Monochromatic',fontsize=40)
for ax,name in zip(axes,cmaps_monochromat):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_monochromatic.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(12,(len(cmaps_soft)+1)*0.7),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_soft),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_soft))]
axes[0].set_title('Soft',fontsize=40)
for ax,name in zip(axes,cmaps_soft):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_soft.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(12,(len(cmaps_bichromat)+1)*0.7),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_bichromat),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_bichromat))]
axes[0].set_title('Bichromatic',fontsize=40)
for ax,name in zip(axes,cmaps_bichromat):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_bichromatic.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(12,(len(cmaps_bichromat_grey)+1)*0.7),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_bichromat_grey),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_bichromat_grey))]
axes[0].set_title('Bichromatic (grey)',fontsize=40)
for ax,name in zip(axes,cmaps_bichromat_grey):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_bichromatic_grey.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(12,(len(cmaps_diverging_k)+1)*0.7),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_diverging_k),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_diverging_k))]
axes[0].set_title('Diverging (black)',fontsize=40)
for ax,name in zip(axes,cmaps_diverging_k):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_diverging_k.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(12,(len(cmaps_diverging_w)+1)*0.7),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_diverging_w),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_diverging_w))]
axes[0].set_title('Diverging (white)',fontsize=40)
for ax,name in zip(axes,cmaps_diverging_w):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_diverging_w.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(12,(len(cmaps_segmented)+1)*0.7),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_segmented),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_segmented))]
axes[0].set_title('Segmented',fontsize=40)
for ax,name in zip(axes,cmaps_segmented):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_segmented.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(12,(len(cmaps_miscellaneous)+1)*0.6),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=len(cmaps_miscellaneous),ncols=1,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(len(cmaps_miscellaneous))]
axes[0].set_title('Miscellaneous',fontsize=40)
for ax,name in zip(axes,cmaps_miscellaneous):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.2,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_miscellaneous.png',dpi=200)
plot.show()
# +
fig=plot.figure(figsize=(18,(math.ceil(len(cmaps_all)/2)+1)*0.6),facecolor='w',layout='constrained')
spec=gs.GridSpec(nrows=math.ceil(len(cmaps_all)/2),ncols=2,figure=fig)
axes=[fig.add_subplot(spec[i,0]) for i in range(math.ceil(len(cmaps_all)/2))]+[fig.add_subplot(spec[i,1]) for i in range(math.floor(len(cmaps_all)/2))]
for ax,name in zip(axes,cmaps_all):
ax.imshow(gradient, aspect='auto', cmap=f'scicm.{name}')
lab=ax.set_ylabel(name,labelpad=10,fontsize=30,y=0.1,ha='right')
lab.set_rotation(0)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plot.savefig(f'scicm_all.png',dpi=200)
plot.show()
# -
# # Examples for each colour map
# +
# Generating random data
x0=np.concatenate([rng.normal(0,0.6,100000),rng.normal(1,0.4,100000)])
y0=np.concatenate([rng.normal(0,0.6,100000),rng.normal(1,0.4,100000)])
z0=np.concatenate([rng.uniform(0,0.2,100000),rng.uniform(0.2,0.4,100000)])
x1=np.array([np.cos(np.linspace(-3.14,3.14,300)) for i in range(300)])
y1=np.array([np.sin(np.linspace(-3.14,3.14,300)) for i in range(300)]).T
rng=np.random.default_rng()
x2=rng.uniform(0,1,1000)
y2=x2+rng.normal(0,0.2,1000)
z2=x2+3*y2+rng.normal(0,0.05,1000)
x3=np.linspace(0,1,101)
# +
cmap_val=np.linspace(0,1,12)
pow_val=np.concatenate([1/np.linspace(1,3,6)[::-1],np.linspace(1,3,6)])
#for c in [f'scicm.{cmaps_all[0]}']:
for c in [f'scicm.{ca}' for ca in cmaps_all]:
# Plotting
print(c)
fig,axes=plot.subplots(nrows=2,ncols=2,figsize=(12,12),gridspec_kw=dict(wspace=0.0,hspace=0.0))
axes[0,0].hexbin(x0,y0,lw=0,cmap=c,gridsize=80)
axes[0,0].set_xlim([-1.5,2.5])
axes[0,0].set_ylim([-1.5,2.5])
axes[0,1].imshow(x1*y1,cmap=c)
axes[1,0].scatter(x2,y2,lw=0,c=z2,cmap=c,s=50)
cmap=cm.get_cmap(c)
for cv,pv in zip(cmap_val,pow_val):
axes[1,1].plot(x3,x3**pv,lw=4,c=cmap(cv))
for i in [0,1]:
for j in [0,1]:
axes[i,j].set_xticks([])
axes[i,j].set_yticks([])
plot.tight_layout()
plot.savefig(f'cmap_examples/{c.split(".")[1]}.png',dpi=200)
plot.close()
#plot.show()
# -
#for c in [f'scicm.{cmaps_all[0]}']:
for c in [f'scicm.{ca}' for ca in cmaps_all]:
print(c)
viscm(c)
fig=plot.gcf()
fig.set_size_inches(16,12)
fig.set_facecolor('w')
plot.savefig(f'viscm/{c.split(".")[1]}.png',dpi=200)
plot.close()
#plot.show()
# # Examples of use
# +
import scicm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patheffects as pe
# %matplotlib inline
# -
# Generating random data
rng=np.random.default_rng()
x=rng.multivariate_normal((0,0),((0.5,0.1),(0.1,0.3)),10000)
y=x[:,1]
x=x[:,0]
fig,axes=plt.subplots(nrows=2,ncols=2,figsize=(12,12),gridspec_kw=dict(wspace=0.0,hspace=0.0))
axes[0,0].hexbin(x,y,lw=0,cmap='scicm.Stone',gridsize=40)
axes[0,1].hexbin(x,y,lw=0,cmap='scicm.Stone_r',gridsize=40)
axes[1,0].hexbin(x,y,lw=0,cmap=scicm.cm.BkR,gridsize=40)
axes[1,1].hexbin(x,y,lw=0,cmap='scicm.RkB',gridsize=40)
for ax in axes.flatten():
ax.set_xlim(np.min(x),np.max(x))
ax.set_ylim(np.min(y),np.max(y))
ax.set_axis_off()
plt.tight_layout()
plt.savefig('use_examples/cmap_example.png',dpi=200)
plt.show()
# +
shortMagma=scicm.tools.crop('magma',0.4,1.0)
fig,axes=plt.subplots(nrows=1,ncols=2,figsize=(12,6),gridspec_kw=dict(wspace=0.0,hspace=0.0))
axes[0].hexbin(x,y,lw=0,cmap='magma',gridsize=40)
axes[0].set_title('magna',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
axes[1].hexbin(x,y,lw=0,cmap=shortMagma,gridsize=40)
axes[1].set_title('shortened magna',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
for ax in axes.flatten():
ax.set_xlim(np.min(x),np.max(x))
ax.set_ylim(np.min(y),np.max(y))
ax.set_axis_off()
plt.tight_layout()
plt.savefig('use_examples/tools_example_1.png',dpi=200)
plt.show()
# +
BgreyG=scicm.tools.merge(['scicm.BgreyY','scicm.PgreyG'],[0.5])
fig,axes=plt.subplots(nrows=1,ncols=3,figsize=(18,6),gridspec_kw=dict(wspace=0.0,hspace=0.0))
axes[0].hexbin(x,y,lw=0,cmap='scicm.BgreyY',gridsize=40)
axes[0].set_title('scicm.BgreyY',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
axes[1].hexbin(x,y,lw=0,cmap=BgreyG,gridsize=40)
axes[1].set_title('BgreyG',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
axes[2].hexbin(x,y,lw=0,cmap='scicm.PgreyG',gridsize=40)
axes[2].set_title('scicm.PgreyG',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
for ax in axes.flatten():
ax.set_xlim(np.min(x),np.max(x))
ax.set_ylim(np.min(y),np.max(y))
ax.set_axis_off()
plt.tight_layout()
plt.savefig('use_examples/tools_example_2.png',dpi=200)
plt.show()
# +
diverging_BgreyG=scicm.tools.stitch(['scicm.RgreyB_r','scicm.PgreyG'],[[0,0.5],[0.5,1]],[0.5],name_newcmap='custom.diverging_BgreyG')
fig,axes=plt.subplots(nrows=2,ncols=2,figsize=(12,12),gridspec_kw=dict(wspace=0.0))
axes[0,0].hexbin(x,y,lw=0,cmap='scicm.RgreyB_r',gridsize=40)
axes[0,0].set_title('scicm.RgreyB_r',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
axes[1,0].hexbin(x,y,lw=0,cmap='custom.diverging_BgreyG',gridsize=40)
axes[1,0].set_title('custom.diverging_BgreyG',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
axes[0,1].hexbin(x,y,lw=0,cmap='scicm.PgreyG',gridsize=40)
axes[0,1].set_title('scicm.PgreyG',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
axes[1,1].hexbin(x,y,lw=0,cmap='scicm.BkG',gridsize=40)
axes[1,1].set_title('scicm.BkG',fontsize=25,path_effects=[pe.withStroke(linewidth=2,foreground='w')])
for ax in axes.flatten():
ax.set_xlim(np.min(x),np.max(x))
ax.set_ylim(np.min(y),np.max(y))
ax.set_axis_off()
plt.tight_layout()
plt.savefig('use_examples/tools_example_3.png',dpi=200)
plt.show()
# -
| images/.ipynb_checkpoints/Example_images-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import warnings
warnings.filterwarnings(action='ignore')
from implicit.evaluation import *
from implicit.als import AlternatingLeastSquares as ALS
from implicit.bpr import BayesianPersonalizedRanking as BPR
import numpy as np
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from sklearn.utils import shuffle
from scipy.sparse import *
from collections import Counter
import pandas as pd
# -
SONG_TOP_X = 50000
TAG_TOP_X = 2500
raw_train = pd.read_json("data/train.json")
raw_val = pd.read_json("data/val.json")
# ## Average num of tags
# +
songs_list = list()
total = 0
for playlist in raw_train.tags.tolist():
total += len(playlist)
print(total/len(raw_train))
# -
# ## Total number of tags and songs
def count_col(data):
tmp = dict()
ret = 0
for row in (data):
for song in row:
if song not in tmp:
tmp[song] = 0
ret +=1
return ret
count_col(raw_train.songs.tolist())
count_col(raw_train.tags.tolist())
# ## Calc Portion
def calc_portion_x(cols,x):
all_cols = []
for col in cols:
all_cols += col
cols_cnt = dict(Counter(all_cols))
cols_cnt_list = sorted(cols_cnt.items(), key=lambda t: -t[1])
x_cnt = sum(x[1] for x in cols_cnt_list[:x])
total = sum(x[1] for x in cols_cnt_list)
return x_cnt/total
# ### tag portion
tag_portion = calc_portion_x(raw_train.tags.tolist(),TAG_TOP_X)
tag_portion
# ### song portion
song_portion = calc_portion_x(raw_train.songs.tolist(),SONG_TOP_X)
song_portion
# ## Extract TOP_X songs and tags
def get_top_x(cols, x):
"""
cols : 2D array
x: int
---------------------------
song_id : cnt
in descending order
"""
all_cols = []
for col in cols:
all_cols += col
cols_cnt = dict(Counter(all_cols))
cols_cnt_list = sorted(cols_cnt.items(), key=lambda t: -t[1])
top_cols_cnt = dict()
for col, cnt in cols_cnt_list[:x]:
top_cols_cnt[col] = cnt
return top_cols_cnt
# +
top_songs = get_top_x(raw_train.songs.tolist(), SONG_TOP_X)
assert len(top_songs) == SONG_TOP_X, "top_songs are not extracted correctly"
# +
top_tags = get_top_x(raw_train.tags.tolist(), TAG_TOP_X)
assert len(top_tags) == TAG_TOP_X, "top_tags are not extracted correctly"
# -
# ## Remove raw data that song_id is not in top_song and change song_id to idx
#
# song_to_idx:
# - key: song_id (from raw data)
# - value: idx [0 : SONG_TOP_X-1]
#
# idx_to_song:
# - key: idx [0 : SONG_TOP_X-1]
# - value: song_id (from raw data)
# +
song_to_idx = dict()
idx_to_song = dict()
idx = 0
#make song to idx
#make idx to song
for songs in raw_train.songs.tolist():
for song in songs:
if song not in song_to_idx and song in top_songs:
song_to_idx[song] = idx
idx_to_song[idx] = song
idx+=1
#change song id to idx
for i, row in raw_train.iterrows():
tmp = []
for songs in raw_train.loc[i,["songs"]]:
for song in songs:
if song in top_songs: tmp.append(song_to_idx[song])
raw_train.at[i,'songs'] = tmp
# -
#change te song id to idx
for i, row in raw_val.iterrows():
tmp = []
for songs in raw_val.loc[i,["songs"]]:
for song in songs:
if song in top_songs: tmp.append(song_to_idx[song])
raw_val.at[i,'songs'] = tmp
assert len(song_to_idx) == SONG_TOP_X, "song_to_idx has problem"
# ## Remove raw data that tag is not in top_tag and change change tags from str to id
#
# tag_to_idx:
# - key: tag_id (from raw data, str)
# - value: idx [SONG_TOP_X : TAG_TOP_X+SONG_TOP_X-1]
#
# idx_to_tag:
# - key: idx [SONG_TOP_X : TAG_TOP_X+SONG_TOP_X-1]
# - value: song_id (from raw data)
# +
tag_to_idx = dict()
idx_to_tag = dict()
#make song to idx
#make idx to song
for tags in raw_train.tags.tolist():
for tag in tags:
if tag not in tag_to_idx and tag in top_tags:
tag_to_idx[tag] = idx
idx_to_tag[idx] = tag
idx+=1
#change song id to idx
for i, row in raw_train.iterrows():
tmp = []
for tags in raw_train.loc[i,["tags"]]:
for tag in tags:
if tag in top_tags: tmp.append(tag_to_idx[tag])
raw_train.at[i,'tags'] = tmp
# -
for i, row in raw_val.iterrows():
tmp = []
for tags in raw_val.loc[i,["tags"]]:
for tag in tags:
if tag in top_tags: tmp.append(tag_to_idx[tag])
raw_val.at[i,'tags'] = tmp
assert len(tag_to_idx) == TAG_TOP_X, "tag_to_idx has problem"
n_items = len(song_to_idx)
# ## Make playlist X (songs + tags ids) table
tr_songs = raw_train.songs.tolist()
tr_tags = raw_train.tags.tolist()
te_songs = raw_val.songs.tolist()
te_tags = raw_val.tags.tolist()
# tr & te:
# - row: playlist
# - col: {song| tag}_idx (from 0 to SONG_TOP_X + TAG_TOP_X)
#
# +
tr = []
for songs in tr_songs:
tr.append(songs)
for i, tags in enumerate(tr_tags):
tr[i].extend(tags)
# +
te = []
for songs in te_songs:
te.append(songs)
for i, tags in enumerate(te_tags):
te[i].extend(tags)
# -
# ## Change te is doen at above
def lil_to_csr(playlists,playlists2 = []):
"""
playlists: playlist with top songs and tags
"""
row = []
col = []
data = []
te_row = len(te)
for row_idx, playlist in enumerate(playlists):
for idx in playlist:
col.append(idx)
data.append(1)
row.append(row_idx)
for row_idx, playlist in enumerate(playlists2):
for idx in playlist:
col.append(idx)
data.append(1)
row.append(te_row + row_idx)
return row, col, data
def lil_to_csr(playlists,playlists2 = []):
"""
playlists: playlist with top songs and tags
"""
row = []
col = []
data = []
te_row = len(te)
for row_idx, playlist in enumerate(playlists):
for idx in playlist:
if idx >= 50000:
col.append(idx)
data.append(1)
row.append(row_idx)
for row_idx, playlist in enumerate(playlists2):
for idx in playlist:
if idx >= 50000:
col.append(idx)
data.append(1)
row.append(te_row + row_idx)
return row, col, data
csr_row, csr_col, csr_data = lil_to_csr(te, tr)
r = csr_matrix((csr_data, (csr_row, csr_col)))
te_r= r[:len(te)]
tr_r = r[len(te):]
als_model = ALS(factors=128, regularization=0.08)
als_model.fit(r.T * 15.0)
als_model.user_factors
item_model = ALS(use_gpu=False)
tag_model = ALS(use_gpu=False)
item_model.user_factors = als_model.user_factors
tag_model.user_factors = als_model.user_factors
item_model.item_factors = als_model.item_factors[:n_items]
tag_model.item_factors = als_model.item_factors[n_items:]
item_rec_csr = r[:, :n_items]
tag_rec_csr = r[:, n_items:]
# +
item_ret = []
for u in range(te_r.shape[0]):
item_rec = item_model.recommend(u, item_rec_csr, N=100)
item_rec = [idx_to_song[x[0]] for x in item_rec]
item_ret.append(item_rec)
# -
tag_ret = []
for u in range(te_r.shape[0]):
tag_rec = tag_model.recommend(u, tag_rec_csr, N=10)
tag_rec = [idx_to_tag[x[0]+50000] for x in tag_rec if x[0]+50000 in idx_to_tag]
tag_ret.append(tag_rec)
# + jupyter={"outputs_hidden": true}
tag_ret
# -
valvalval = pd.read_json("data/val.json")
te_ids = valvalval.id.tolist()
returnval = []
for _id, rec, tag_rec in zip(te_ids, item_ret, tag_ret):
returnval.append({
"id": _id,
"songs": rec[:100],
"tags": tag_rec[:10]
})
import json
with open('ret.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(returnval, ensure_ascii=False))
# !pwd
# !ls
| MF_Base.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''base'': conda)'
# language: python
# name: python373jvsc74a57bd0210f9608a45c0278a93c9e0b10db32a427986ab48cfc0d20c139811eb78c4bbc
# ---
# # WorkFlow
# ## Classes
# ## Load the data
# ## Test Modelling
# ## Modelling
# **<hr>**
# ## Classes
import os
import cv2
import torch
import numpy as np
def load_data(img_size=112):
data = []
index = -1
labels = {}
for directory in os.listdir('./data/'):
index += 1
labels[f'./data/{directory}/'] = [index,-1]
print(len(labels))
for label in labels:
for file in os.listdir(label):
filepath = label + file
img = cv2.imread(filepath,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(img_size,img_size))
img = img / 255.0
data.append([
np.array(img),
np.eye(len(labels))[labels[label][0]]
])
labels[label][1] += 1
for _ in range(12):
np.random.shuffle(data)
print(len(data))
np.save('./data.npy',data)
return data
import torch
def other_loading_data_proccess(data):
X = []
y = []
print('going through the data..')
for d in data:
X.append(d[0])
y.append(d[1])
print('splitting the data')
VAL_SPLIT = 0.25
VAL_SPLIT = len(X)*VAL_SPLIT
VAL_SPLIT = int(VAL_SPLIT)
X_train = X[:-VAL_SPLIT]
y_train = y[:-VAL_SPLIT]
X_test = X[-VAL_SPLIT:]
y_test = y[-VAL_SPLIT:]
print('turning data to tensors')
X_train = torch.from_numpy(np.array(X_train))
y_train = torch.from_numpy(np.array(y_train))
X_test = torch.from_numpy(np.array(X_test))
y_test = torch.from_numpy(np.array(y_test))
return [X_train,X_test,y_train,y_test]
# **<hr>**
# ## Load the data
REBUILD_DATA = True
if REBUILD_DATA:
data = load_data()
np.random.shuffle(data)
X_train,X_test,y_train,y_test = other_loading_data_proccess(data)
# ## Test Modelling
import torch
import torch.nn as nn
import torch.nn.functional as F
class Test_Model(nn.Module):
def __init__(self,output:int=36):
super().__init__()
self.conv1 = nn.Conv2d(1,32,3)
self.conv2 = nn.Conv2d(32,64,3)
self.conv3 = nn.Conv2d(64,128,3)
self.conv4 = nn.Conv2d(128,256,3)
self.conv5 = nn.Conv2d(256,384,3)
self.relu = nn.ReLU()
self.max_pool2d = F.max_pool2d
self.fc1 = nn.Linear(384*1*1,32)
self.fc2 = nn.Linear(32,64)
self.fc3 = nn.Linear(64,128)
self.fc4 = nn.Linear(128,256)
self.fc5 = nn.Linear(256,512)
self.fc6 = nn.Linear(512,output)
def forward(self,X):
preds = self.conv1(X)
preds = self.relu(preds)
preds = self.max_pool2d(preds,(2,2))
preds = self.conv2(preds)
preds = self.relu(preds)
preds = self.max_pool2d(preds,(2,2))
preds = self.conv3(preds)
preds = self.relu(preds)
preds = self.max_pool2d(preds,(2,2))
preds = self.conv4(preds)
preds = self.relu(preds)
preds = self.max_pool2d(preds,(2,2))
preds = self.conv5(preds)
preds = self.relu(preds)
preds = self.max_pool2d(preds,(2,2))
preds = preds.view(-1,384*1*1)
preds = self.fc1(preds)
preds = self.relu(preds)
preds = self.fc2(preds)
preds = self.relu(preds)
preds = self.fc3(preds)
preds = self.relu(preds)
preds = self.fc4(preds)
preds = self.relu(preds)
preds = self.fc5(preds)
preds = self.relu(preds)
preds = self.fc6(preds)
# preds = self.relu(preds)
# return F.softmax(preds,dim=1)
return preds
device = torch.device('cuda')
model = Test_Model().to(device)
# +
# preds = model(X_test.reshape(-1,1,112,112).float())
# +
# preds[0]
# -
optimizer = torch.optim.Adam(model.parameters(),lr=0.1)
criterion = nn.CrossEntropyLoss()
BATCH_SIZE = 32
EPOCHS = 5
loss_logs = []
from tqdm import tqdm
PROJECT_NAME = "Sign-Language-Recognition"
def test(net,X,y):
correct = 0
total = 0
net.eval()
with torch.no_grad():
for i in range(len(X)):
real_class = torch.argmax(y[i]).to(device)
net_out = net(X[i].view(-1,1,112,112).to(device).float())
net_out = net_out[0]
predictied_class = torch.argmax(net_out)
if predictied_class == real_class:
correct += 1
total += 1
return round(correct/total,3)
import wandb
import random
index = random.randint(0,29)
print(index)
wandb.init(project=PROJECT_NAME,name='test-CrossEntropyLoss-Adam-0.1')
for _ in tqdm(range(EPOCHS)):
for i in range(0,len(X_train),BATCH_SIZE):
X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
y_batch = y_train[i:i+BATCH_SIZE].to(device)
model.to(device)
preds = model(X_batch.float())
loss = criterion(preds.float(),y_batch.float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index])})
wandb.finish()
import matplotlib.pyplot as plt
import pandas as pd
df = pd.Series(loss_logs)
df.plot.line(figsize=(12,6))
test(model,X_test,y_test)
test(model,X_train,y_train)
| wandb/run-20210517_205534-1c4rmzu2/tmp/code/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Copy Task Plots
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from glob import glob
import json
import os
import sys
sys.path.append(os.path.abspath(os.getcwd() + "./../"))
# %matplotlib inline
# -
# ## Load training history
#
# To generate the models and training history used in this notebook, run the following commands:
#
# ```
# # mkdir ./notebooks/copy
# ./train.py --seed 1 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
# ./train.py --seed 10 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
# ./train.py --seed 100 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
# ./train.py --seed 1000 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
# ```
batch_num = 10000
files = glob("./copy_l/*-{}.json".format(batch_num))
files
# Read the metrics from the .json files
history = [json.loads(open(fname, "rt").read()) for fname in files]
training = np.array([(x['cost'], x['loss'], x['seq_lengths']) for x in history])
print("Training history (seed x metric x sequence) =", training.shape)
# Average every dv values across each (seed, metric)
dv = 100
training = training.reshape(len(files), 3, -1, dv).mean(axis=3)
print(training.shape)
# Average the seeds
training_mean = training.mean(axis=0)
training_std = training.std(axis=0)
print(training_mean.shape)
print(training_std.shape)
# +
fig = plt.figure(figsize=(12, 5))
# X axis is normalized to thousands
x = np.arange(dv / 1000, (batch_num / 1000) + (dv / 1000), dv / 1000)
# Plot the cost
# plt.plot(x, training_mean[0], 'o-', linewidth=2, label='Cost')
plt.errorbar(x, training_mean[0], yerr=training_std[0], fmt='o-', elinewidth=2, linewidth=2, label='Cost')
plt.grid()
plt.yticks(np.arange(0, training_mean[0][0]+5, 5))
plt.ylabel('Cost per sequence (bits)')
plt.xlabel('Sequence (thousands)')
plt.title('Training Convergence', fontsize=16)
ax = plt.axes([.57, .55, .25, .25], facecolor=(0.97, 0.97, 0.97))
plt.title("BCELoss")
plt.plot(x, training_mean[1], 'r-', label='BCE Loss')
plt.yticks(np.arange(0, training_mean[1][0]+0.2, 0.2))
plt.grid()
plt.show()
# -
history[0]
# +
loss = history[0]['loss']
cost = history[0]['cost']
seq_lengths = history[0]['seq_lengths']
unique_sls = set(seq_lengths)
all_metric = list(zip(range(1, batch_num+1), seq_lengths, loss, cost))
fig = plt.figure(figsize=(12, 5))
plt.ylabel('Cost per sequence (bits)')
plt.xlabel('Iteration (thousands)')
plt.title('Training Convergence (Per Sequence Length)', fontsize=16)
for sl in unique_sls:
sl_metrics = [i for i in all_metric if i[1] == sl]
x = [i[0] for i in sl_metrics]
y = [i[3] for i in sl_metrics]
num_pts = len(x) // 50
total_pts = num_pts * 50
x_mean = [i.mean()/1000 for i in np.split(np.array(x)[:total_pts], num_pts)]
y_mean = [i.mean() for i in np.split(np.array(y)[:total_pts], num_pts)]
#40000
plt.plot(x_mean, y_mean, label='Seq-{}'.format(sl))
plt.yticks(np.arange(0, 80, 5))
plt.legend(loc=0)
plt.show()
# -
# # Evaluate
import torch
from IPython.display import Image as IPythonImage
from PIL import Image, ImageDraw, ImageFont
import io
from tasks.copytask import dataloader
from train import evaluate
from tasks.copytask import CopyTaskModelTraining
model = CopyTaskModelTraining()
# !ls ./copy_l/
model.net.load_state_dict(torch.load("./copy_l/copy-task-1-batch-9900.model"))
seq_len = 60
_, x, y = next(iter(dataloader(1, 1, 8, seq_len, seq_len)))
x.shape
def evaluate(net, criterion, X, Y):
"""Evaluate a single batch (without training)."""
inp_seq_len = X.size(0)
outp_seq_len, batch_size, _ = Y.size()
# New sequence
net.init_sequence(batch_size)
# Feed the sequence + delimiter
states = []
for i in range(inp_seq_len):
o, state = net(X[i])
states += [state]
# Read the output (no input given)
y_out = torch.zeros(Y.size())
for i in range(outp_seq_len):
y_out[i], state = net()
states += [state]
loss = criterion(y_out, Y)
y_out_binarized = y_out.clone().data
y_out_binarized.apply_(lambda x: 0 if x < 0.5 else 1)
# The cost is the number of error bits per sequence
cost = torch.sum(torch.abs(y_out_binarized - Y.data))
#print(loss.data)
result = {
'loss': loss.data,#[0],
'cost': cost / batch_size,
'y_out': y_out,
'y_out_binarized': y_out_binarized,
'states': states
}
return result
result = evaluate(model.net, model.criterion, x, y)
y_out = result['y_out']
# +
def cmap(value):
pixval = value * 255
low = 64
high = 240
factor = (255 - low - (255-high)) / 255
return int(low + pixval * factor)
def draw_sequence(y, u=12):
seq_len = y.size(0)
seq_width = y.size(2)
inset = u // 8
pad = u // 2
width = seq_len * u + 2 * pad
height = seq_width * u + 2 * pad
im = Image.new('L', (width, height))
draw = ImageDraw.ImageDraw(im)
draw.rectangle([0, 0, width, height], fill=250)
for i in range(seq_len):
for j in range(seq_width):
val = 1 - y[i, 0, j]#.data[0]
draw.rectangle([pad + i*u + inset,
pad + j*u + inset,
pad + (i+1)*u - inset,
pad + (j+1)*u - inset], fill=cmap(val))
return im
def im_to_png_bytes(im):
png = io.BytesIO()
im.save(png, 'PNG')
return bytes(png.getbuffer())
def im_vconcat(im1, im2, pad=8):
assert im1.size == im2.size
w, h = im1.size
width = w
height = h * 2 + pad
im = Image.new('L', (width, height), color=255)
im.paste(im1, (0, 0))
im.paste(im2, (0, h+pad))
return im
# +
def make_eval_plot(y, y_out, u=12):
im_y = draw_sequence(y, u)
im_y_out = draw_sequence(y_out, u)
im = im_vconcat(im_y, im_y_out, u//2)
w, h = im.size
pad_w = u * 7
im2 = Image.new('L', (w+pad_w, h), color=255)
im2.paste(im, (pad_w, 0))
# Add text
font = ImageFont.truetype("./fonts/PT_Sans-Web-Regular.ttf", 13)
draw = ImageDraw.ImageDraw(im2)
draw.text((u,4*u), "Targets", font=font)
draw.text((u,13*u), "Outputs", font=font)
return im2
im = make_eval_plot(y, y_out, u=8)
IPythonImage(im_to_png_bytes(im))
# -
# ## Create an animated GIF
#
# Lets see how the prediction looks like in each checkpoint that we saved.
# +
seq_len = 80
_, x, y = next(iter(dataloader(1, 1, 8, seq_len, seq_len)))
frames = []
font = ImageFont.truetype("./fonts/PT_Sans-Web-Regular.ttf", 13)
for batch_num in range(500, 10500, 500):
model = CopyTaskModelTraining()
model.net.load_state_dict(torch.load("./copy_l/copy-task-1-batch-{}.model".format(batch_num)))
result = evaluate(model.net, model.criterion, x, y)
y_out = result['y_out']
frame = make_eval_plot(y, y_out, u=10)
w, h = frame.size
frame_seq = Image.new('L', (w, h+40), color=255)
frame_seq.paste(frame, (0, 40))
draw = ImageDraw.ImageDraw(frame_seq)
draw.text((10, 10), "Sequence Num: {} (Cost: {})".format(batch_num, result['cost']), font=font)
frames += [frame_seq]
# +
im = frames[0]
im.save("./copy_l-train-80.gif", save_all=True, append_images=frames[1:], loop=0, duration=1000)
im = frames[0]
im.save("./copy-l-train-80-fast.gif", save_all=True, append_images=frames[1:], loop=0, duration=100)
# -
| notebooks/copy-task-plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #1 美观并正确地书写Python语句
#
# ###书写的美观性
#
# 往往问题不仅是美观,还在于程序的可读性:
testnum = True
print testnum
testnum = True;print testnum
# ###书写的正确性
#
# 用四空格或一Tab表示缩进。
#
# 错误缩进:
i = 42
print "Value is ,{0}".format(i)
# 正确缩进:
for i in xrange(5):
if i > 3:
break
else:
print i,
else:
print "Expectedly Finished"
# #2 Python控制流语法(if/else/for/while等)
#
#
# ###2.1 if和else
#
# 在Python中,标准的if-else或者单if条件语句语法是这样的:
# +
expression = True
if expression:
print "do something"
else:
print "do something_else"
# -
# 程序员笑话:
# +
hot = True
watermelon = True
cnt_steamdumpling = 12
if hot & watermelon:
cnt_steamdumpling = 1
print cnt_steamdumpling
# -
# 运算符有优先级:
# +
yourage = 25
had_a_girlfriend = False
if yourage > 18 and not had_a_girlfriend:
print "Damn..."
else:
print "Ok."
# -
# 多重if-else使用elif:
your_salary = 7000
your_location = "Beijing"
if your_salary > 100000:
print "( ̄︶ ̄)> []"
elif your_salary >= 25000:
print "<( ̄︶ ̄)/*"
else:
print "( ̄﹏ ̄) ( ̄ˇ ̄)"
# 书写if else 语句时候也是另一个需要注意缩进的地方
#
# 如果你在Ipython中书写if-else语句的话,Ipython将在“:”的下一行自动为你缩进,这点非常方便,但是不推荐以下风格的代码:
if True: print "do something"
# ### 2.2 条件表达式(三元操作符)
# 正常的if/else写法,有些臃肿
x,y = 111,17
if x < y:
smaller = x
else:
smaller = y
print smaller
# 简洁的三元表达式,Ruby风味
x , y = 25,10
smaller = x if x < y else y
print smaller
# 短路求值写法:
# +
x , y = 3 , 5
smaller = x < y and x or y # x = a?b:c None,'',0,False
print smaller
x , y = 5 , 3
smaller = x < y and x or y
print smaller
# -
# 装X失败案例:x被判False
# +
x , y = None , 0
print x < y
smaller = x > y and x or y
print smaller
# -
# ###2.3 For循环
# 先介绍range(start,end,step)函数:(生成可迭代序列,Iterable)
print range(10)
print range(1,5,1)
print range(5,1,-1)
print xrange(10)
for i in xrange(10):
print i ** 2,
s = "string"
# for循环可以方便的迭代字符串
for eachletter in s:
print eachletter
# Python 默认会在每个打印出来的字母后加换行符。
#
# 如果不需要这个特性,则在语句后加逗号“,”:
for eachletter in s:
print eachletter,
# ####同步取循环索引
#
# 回忆一下基础语法中介绍的len函数和字符串的切片访问方法,不推荐以下写法:
a = "Be Enumerated"
lena = len(a)
print lena
print range(lena)
for eachnum in range(lena):
print "{0} {1:>2}".format(a[eachnum],eachnum)
# 用enumerate方法来同步循环索引:
for idx, element in enumerate(a):
if idx%2==0 or element=='e':
print idx, element
# ### 2.4 while语法和奇特的else
#
# while语句的形式类似if语句。
#
# 如果while后面的条件为真,冒号下的代码块就会不断循环执行,直到判断条件变为0或者False.
#
# while语句后可以写else语句,如果没被干涉,最终收尾的时候会执行代码。
# +
count = 0
while count <= 3:
print "looping {0}".format(count)
count += 1
print count
else:
print "Finite loop"
count = 0
while True:
print "looping {0}".format(count)
count += 1
print count
if count > 3:
break
else:
print "Broken loop"
# -
# while-else的组合有些奇特,补充一个更奇特的组合:for-else。
# +
for i in xrange(3):
print i
else:
print "Finished"
print "=" * 20
for i in xrange(3):
if i > 1:
break
print i
else:
print "Finished"
# -
# ###2.5 干涉循环行为(break/continue/pass)
#
# * pass:不做任何事。
# * continue:告诉 Python 跳过当前循环块中的剩余语句,继续进行下一轮循环。
# * break:结束当前循环来跳转到下个语句。
#
# 这三个语句有时和if语句一块搭配使用。
# +
def foo():
pass
a = [1, 0, 2, 4]
for element in a:
if element == 0:
continue
print 1. / element
# -
z = 1 + 1j
while True:
if abs(z) > 100:
break
z = z ** 2 + 1
print z
# #3 Python基本数据结构详述
# Python中内置了四种数据结构—列表,元组,字典和集合,用三种不同的括号就可以表示他们。
#
# list(列表)是处理一组有序项目的数据结构,列表的元素需要以[](中括号)来包裹,元素的个数和值可以改变。
#
# tuple的元素以() 来包裹,元组可以看作只读的列表。
#
# 列表和元组都使用切片方法来访问元素,数字索引从0开始计数。
#
# 通过切片([]和[:]),列表和元组可以得到子集。列表的子集是列表,元组切片后结果还是元组(不可改变)。
# ###3.1 List 列表(可变类型)
#
# 列表的定义可以直接使用方括号扩住数据。
#
# 熟悉的切片规则[start,end,step]:
L = ['red','blue','green','black','white']
print isinstance(L,list)
print L[0],L[-1]
print L[2:4]
print L[:3],L[::2]
# 作为可变类型中的一种,列表可以直接修改:
M = ['red','blue','green','black','white',42,True] # 列表中可以包含不同类型的元素
M[2:4] = ['Ruby','sapphire']
N = M
print id(M),id(N)
M[-1] = False # 可以通过切片修改
print N # 可变类型的特点,不直接操作N,N内容被改变
import numpy as np
a = np.array(range(10))
print a,type(a),a.dtype
import scipy as sp
import statsmodels as smodel
import pandas as pd
# 对于同一类型的数值数据,推荐使用运行效率更高的numpy来进行处理。
#
# 对于列表,我们可以使用多种方法来进行操纵:
LM = ['red','blue','green','black','white']
print LM
LM.append('pink')
print LM # 列表尾部添加一个元素
popped = LM.pop()# 删除并返回列表最后一个元素
print LM,popped
#试试LM.pop(0)
popped2 = LM.pop(0)
print LM,popped2
LM.extend(['pink','purple','purple']) # 讲extend后的序列添加到列表中,extend后的内容应该是可迭代的
print LM
LM.remove('purple') # 删除指定值的一个元素
print LM
print popped
print LM[::-1]
LL = LM.reverse #此时已经调用完原地翻转列表方法
LL()
print LL
print LM
print LM*2 #也可以像字符串一样用*方法
print LM+LM #也可以像字符串一样用+方法,但+方法不能直接增加元素。
#LL_law = M.sor
#M.sort()
#print M
#print LL_law
print M
Mnew = sorted(M)
print M
M.sort()
print M
# 判断某个元素是否在列表中,可以使用in 方法
LL_law = ['red', 'blue', 'Ruby', 'sapphire', 'white', 42, False]
my_precious = "silmarils"
print my_precious in LL_law
if "Ruby" in LL_law:
print "Ok"
# 不爽字符串很久了?
string = 'Mukatsuku'
ls_str = list(string)
print ls_str
print ''.join(ls_str)
# ###3.2 Tuple 元组(不可变类型)
# 元组的元素之间以逗号隔开,可以用小括号包裹(推荐):
# +
war3 = ('Orc','Humans','Undead','Night Elves')
heros = 'Blade Master','Farseer','<NAME>','Shadow Hunter'
print type(war3),type(heros)
# -
# 如果需要明确地删除一个列表或者元组,使用del:
war3copy = war3
print war3copy
print war3copy[1]
war3copy[1]="Trans_Humans"
# 和列表类似,元组同样支持+、*、和 in 方法。
#
# 折衷方案使用“可变”元组:
t = (42,False,[True],-203+1j)
t[2][0] = False
print t
print list(t)
# ###3.3 Set 集合(可变类型)与Frozenset 冻结集合(不可变类型)
# 用花括号来定义,用集合操作来进行运算,用set或者frozenset转化其他序列。
war3 = ('Orcs','Humans','Undead','Night Elves')
Lord_of_ring = ('Ainur','Dragons','Dwarves','Elves','Ents','Hobbits','Humans','Orcs')
test_set = set(war3)
train = set(Lord_of_ring)
ya_test_set = {'Orcs','Humans','Undead','Night Elves'}
print 'Orcs' in test_set
print 'Orcs' in train
print 'Orcs' in ya_test_set
# 对于单个集合内的操作对set而言很方便:
test_set.add('Xmen')
print test_set
test_set.update(['No.16','No.17','No.18'])
print test_set
for item in ['Xmen','No.16','No.17','No.18']:
test_set.remove(item)
print test_set
# 不可变类型frozenset:
ftest = frozenset(test_set)
print ftest
ftest.add('Xmen')
# 集合之间的所有基本操作,对于set和frozenset都适用。
#
# 我们来验证两个集合论公式:
#
# $A \hat{} B = (A \backslash B) \cup (B \backslash A)$
#
# $A \hat{} B = (A \cup B) \backslash ( A \cap B)$
# +
print test_set==train #判断是否相等
print test_set<train #判断是否是子集
print test_set>train #判断是否是超集
print test_set&train #求交集
print test_set|train #求并集
print train-test_set #求差集
print test_set^train #求异或
print test_set^train == ((train-test_set) | (test_set-train))
print test_set^train == (train | test_set) - (train & test_set)
# -
# ###3.4 Dict 字典(可变数据类型)
#
# 花括号扩起来,形如{key1:value1,key2:value2,key3:value3}。
#
# key是非重复的,value和key一一对应,不需要非重复。
language={"Scala":"<NAME>","Clojure":"<NAME>",\
"C":"<NAME>","Standard ML":"<NAME>"}
print language.keys() #取得键
print language.values() #取得值
print language.items() #取得键-值对
print language.iterkeys() #取得上述内容的iterable
print language.itervalues()
print language.iteritems()
# 取得某个键对应的值,或者增加一个键值对:
print language['Standard ML']
language["Python"]="<NAME>"
print language['Python']
# 试验一下迭代器:
for key in language:
print 'key={0},value={1}'.format(key,language[key])
# 如果要访问某个键,而字典中又不存在这个键和对应的值,将会报错:
print language["Ruby"]
# 所以使用键之前可以先判断其是否在字典中然后再取:
print language.has_key('Scala')
print 'Ruby' in language
# 或者使用一个非常有用的方法:dict.get(key,default=None)
print language.get("Haskell","They hardly understand IT")
print language.get("Python",None)
# 如果需要删除字典中的某些键 使用del somedict[some_key];
#
# 需要直接删除字典本身使用 del somedict即可。
#
# 向字典中添加键值对,或者根据键更新值非常方便:
language["Ruby"] = "Matz"
print language["Ruby"] + " is a short form, renew it."
language["Ruby"] = "<NAME>"
print language["Ruby"] + " is the full name of Ruby's Creator."
| Series_0_Python_Tutorials/S0EP2_Control_Flow_Data_Structure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
from torchvision import datasets
import numpy as np
import random
import matplotlib.pyplot as plt
from math import exp
data = datasets.MNIST(root = './' , download=True , train=True)
X = data.data.to(torch.int64).numpy()
Y = data.targets.to(torch.int64).numpy()
X = X[np.logical_or(Y == 3 , Y == 8)]
Y = Y[np.logical_or(Y == 3 , Y == 8)]
x_train = X[0 : 600 , : , :]
y_train = Y[0 : 600]
x_validation = X[600 : 800 , :]
y_validation = Y[600 : 800]
x_test = X[800 : 1000 , :]
y_test = Y[800 : 1000]
X.shape , Y.shape , x_train.shape ,y_train.shape
x_train = x_train.reshape(600 , 28 * 28)
x_validation = x_validation.reshape(200 , 28 * 28)
x_test = x_test.reshape(200 , 28 * 28)
img = x_train[0].reshape(28 , 28)
c = 10
ids = np.where(y_train == 8)
y_train[ids] = 1
ids = np.where(y_train == 3)
y_train[ids] = -1
ids = np.where(y_validation == 8)
y_validation[ids] = 1
ids = np.where(y_validation == 3)
y_validation[ids] = -1
ids = np.where(y_test== 8)
y_test[ids] = 1
ids = np.where(y_test == 3)
y_test[ids] = -1
plt.subplot(121) , plt.imshow(img , 'gray')
plt.subplot(122) , plt.imshow(X[16] , 'gray')
def selectRandom(l , h , i):
while(True):
j = random.randint(l , h)
if i != j:
return j
def dataPoints(i , j):
return x_train[i , : ] , y_train[i] , x_train[j , :] , y_train[j]
def kernelfunc(x , y):
return np.dot(x , y.T)
def RBFkernel(x , y):
gamma = 1 / (28 * 28)
return np.exp(-gamma * (x - y)**2)
def alphaBounds(a1 , a2 , y1 ,y2 , c):
if y1 != y2:
return max(-a1 + a2 , 0) , min(c , c - a1 + a2)
else:
return max(a1 + a2 - c , 0) , min(c , a1 + a2)
def calculate_w(alpha , x_train , y_train):
return np.dot(x_train.T , np.multiply(alpha , y_train))
def calculate_b(w , x_train , y_train):
b = y_train - np.dot(w.T , x_train.T)
return np.mean(b)
def calculateError(x , y , w , b):
t = np.sign((np.dot(w.T , x.T) + b)).astype(int)
return t - y
tparr,fparr=[],[]
def fit():
alpha = np.zeros(x_train.shape[0])
epochs = 0
while(True): #run until convergence
for i in range(0 , x_train.shape[0]):
j = selectRandom(0 , x_train.shape[0] - 1 , i) # selecting j randomly
xi , yi , xj , yj = dataPoints(i , j)
kij = 2 * kernelfunc(xi, xj) - kernelfunc(xi , xi) - kernelfunc(xj , xj)
if kij == 0:
continue
alpha_i , alpha_j = alpha[i] , alpha[j]
L , H = alphaBounds(alpha_i , alpha_j , yi , yj , c)
w = calculate_w(alpha , x_train , y_train)
b = calculate_b(w , x_train , y_train)
Ei = calculateError(xi , yi , w , b)
Ej = calculateError(xj , yj , w , b)
alpha[j] = alpha_j - float((yj * (Ei - Ej)) / kij)
alpha[j] = max(L , alpha[j])
alpha[j] = min(H , alpha[j])
alpha[i] = alpha_i + yi * yj * (alpha_j - alpha[j])
b1 = b - Ei - yi*kernelfunc(xi , xi)*(alpha[i] - alpha_i) - yj*kernelfunc(xi , xj)*(alpha[j] - alpha_j)
b2 = b - Ej - yi*kernelfunc(xi , xj)*(alpha[i] - alpha_i) - yj*kernelfunc(xj , xj)*(alpha[j] - alpha_j)
if b1 > 0 and b1 < c:
b = b1
elif b2 > 0 and b2 < c:
b = b2
else:
b = (b1 + b2) / 2
epochs += 1
count = 0
ypred = np.sign(np.dot(w.T , x_test.T) + b)
tp=y_validation[np.logical_and(y_validation==1,ypred==1)].shape[0]
fp=y_validation[np.logical_and(y_validation==-1,ypred==1)].shape[0]
fn = y_validation[np.logical_and(y_validation==1,ypred== -1)].shape[0]
tn = y_validation[np.logical_and(y_validation==-1,ypred== -1)].shape[0]
# tp = tp / (tp + fn)
# fp = fp / (fp + tn)
tparr.append(tp)
fparr.append(fp)
if epochs == 10:
break
w = calculate_w(alpha , x_train , y_train)
b = calculate_b(w , x_train , y_train)
supportIndex = np.where(alpha != 0)[0]
supports = x_train[supportIndex , :]
return w , b , supports , tp , fp
carr = [0.01 , 0.1 , 1 , 10 , 100 , 1000 , 10000 , 100000]
warr = []
barr = []
farr = []
tarr = []
for i in range(8):
c = carr[i]
w , b , s , tp , fp = fit()
warr.append(w)
barr.append(b)
farr.append(fp)
tarr.append(tp)
ids = np.argsort(farr)
farr = np.asarray(farr)
tarr = np.asarray(tarr)
farr = farr[ids]
tarr = tarr[ids]
plt.scatter(farr,tarr)
w = warr[ids[0]]
b = barr[ids[0]]
ypred = np.sign(np.dot(w.T , x_test.T) + b)
count = 0
for i in range(200):
if ypred[i] == y_test[i]:
count += 1
print((count / 200) * 100)
| SVM/SVM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview
# > overview of examples
# The examples section shows some examples of working with nnanno/the Newspaper Navigator data. Currently, there is an 'end-to-end' example for exploring advertisements using computer vision. These examples will be augmented with others in the future.
| nbs/examples/example_overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Used car analysis
#
# We'll work with a dataset of used car ads scraped from eBay. This dataset was supplied by Dataquest.
# +
import pandas as pd
import numpy as np
import re
autos = pd.read_csv('autos.csv', encoding='Latin-1')
# -
autos.head(5)
autos.info()
autos.head()
# Pro observation: **we got some columns**
autos_columns = autos.columns
autos_columns
autos.rename({
'yearOfRegistration':'registration_year',
'monthOfRegistration':'registration_month',
'notRepairedDamage':'unrepaired_damage',
'dateCreated':'ad_created',
'dateCrawled': 'date_crawled',
'offerType': 'offer_type',
'vehicleType': 'vehicle_type',
'powerPS': 'power_ps',
'fuelType': 'fuel_type',
'nrOfPictures': 'num_of_pictures',
'postalCode': 'postal_code',
'lastSeen': 'last_seen',
}, inplace=True, axis='columns')
autos.head()
# ### Renamed all of the columns to snakecase to make them easier to navigate and also to suit column name best practices
autos.describe(include='all')
# ### num_of_pictures seems to be zero for all of its columns. postal_code is stored as a number even though it should be recognised as a string. registration_year seems to have a range from 1000 to 9999. I don't know anybody in the year 9999.... yet
# ### price should be stored as a number but it's stored as a string. odometer is also stored as a string. seller and offer_type have almost all of its values as the same
autos['price'].value_counts().head(10)
autos['price'] = autos['price'].str.replace("$", "").str.replace(",", "").astype(int)
autos['odometer'].value_counts()
autos['odometer'] = autos['odometer'].str.replace(",", "").str.replace("km", "").astype(int)
autos.rename({'odometer': 'odometer_km'}, inplace=True, axis='columns')
autos.head()
# ### Let's take a closer look at the odometer_km column
autos['odometer_km'].unique().shape
autos['odometer_km'].describe()
autos['odometer_km'].value_counts(ascending=False)
# ### Doesn't seem to be too many outliers. Let's explore the price column
autos['price'].unique().shape
autos['price'].describe()
# ### Use format(x, 'f') in order to suppress scientific notation
autos['price'].describe().apply(lambda x: format(x, 'f'))
# ### It seems like there's a LOAD of outliers sitting at the top of the price column
autos['price'].value_counts().sort_index(ascending=False).head(20)
autos['price'].value_counts().sort_index(ascending=False).tail(20)
# ### there are a ton of really REALLY cheap cars. These should be considered outliers and stripped
#
# Let's remove cars sold for free and anything over 350k. Really, I should be stripping a lot more out of the bottom
autos = autos[autos['price'].between(1,350000)]
autos.shape
# ### We're left with 48.5k cars
autos['date_crawled'].str[:10].value_counts(normalize=True, dropna=False).sort_index(ascending=True)
autos['date_crawled'].isnull().value_counts(normalize=True, dropna=False)
# ### No null values. Pages were crawled between 5th March and 7th April
autos['date_crawled'].describe()
autos['ad_created'].describe()
autos['ad_created'].str[:10].value_counts(dropna=False).sort_index(ascending=False).tail(20)
autos['ad_created'].str[:10].value_counts(dropna=False).head(20)
# ### Majority of ads were created between March and April.
autos['last_seen'].str[:10].describe(include='all')
autos['last_seen'].str[:10].value_counts(dropna=False).tail(10)
autos['last_seen'].str[:10].value_counts(dropna=False).head(10)
# ### Majority of ads last seen in April and March
autos['registration_year'].describe(include='all')
# ### Apparently some were registered in the year 1000 and 9999.... Let's strip out those mothafuckas
autos['registration_year'].value_counts().sort_index(ascending=False).head(20)
# Can strip out everything after 2018. There's only a couple from 2019
autos['registration_year'].value_counts().sort_index(ascending=False).tail(70)
# ### Need to work on outlier recognition. I'm not sure how to use box plots yet so let's just strip out everything before 1960
autos = autos[autos['registration_year'].between(1960,2018)]
autos.shape
autos['registration_year'].describe(include='all')
autos['registration_year'].value_counts().sort_index()
# ### All of the years remaining in the dataset are believable. The majority of the cars are in the 90s-2010s
top_brands = autos['brand'].value_counts().head(20).index
top_brands
autos['brand'].describe(include='all')
# ### Brand contains the brand of car in the dataset. I've decided to explore the data for the top 20 listed in the dataset and find the mean price for those brands
brand_mean_price = {}
for brand in top_brands:
brand_mean_price[brand] = autos[autos['brand'] == brand]['price'].mean()
# +
# .items() returns a tuple of dictionary values. By using the lambda function one can sort the items by value
sorted(brand_mean_price.items(), key=lambda x: x[1], reverse=True)
# -
# ### Sonstige_autos are mad expensive, fam. Renault comes in at the cheapest
autos[autos['brand']=='sonstige_autos']['price'].value_counts().sort_index(ascending=False).head(10)
# ### The 345000 car could be considered an outlier which is why the sonstige_autos cost is coming out on top
autos.columns
autos['odometer_km'].value_counts()
brand_mean_mileage = {}
for brand in top_brands:
brand_mean_mileage[brand] = autos[autos['brand'] == brand]['odometer_km'].mean()
brand_mean_mileage
brand_mileage = pd.Series(brand_mean_mileage)
brand_mileage
df = pd.DataFrame(brand_mileage, columns=['mean_mileage'])
brand_price = pd.Series(brand_mean_price)
df
df['mean_price'] = brand_price
brand_price
df
df['mileage_to_price'] = df['mean_mileage'] / df['mean_price']
df
df.sort_values(by='mileage_to_price', ascending=False)
# ### Renault has the best price:mileage
autos.columns
autos.groupby(['brand', 'model']) \
.size() \
.reset_index() \
.rename({0: 'count'}, axis='columns') \
.sort_values('count', ascending=False) \
.head(10)
# ### Volkswagen Golf is the most common combination, closely followed by VW Polo
| ebay_car_sales/UsedCarAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Try setting OPM_NUM_THREADS=1.
# +
import glob
import itertools
import logging
from operator import attrgetter
import os
import pprint
import time
import matplotlib.pyplot as plt
# %matplotlib inline
import scipy.misc
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as hc
from scipy.spatial.distance import pdist
import sklearn.utils
from mrfitty.base import ReferenceSpectrum
from mrfitty.base import InterpolatedSpectrumSet
logging.basicConfig(level=logging.WARN)
# -
iron_archived_cores_data_dir_path = '/home/jlynch/host/project/th_sln/archived_tills_for_trees_Jan_30_2017/'
os.path.exists(iron_archived_cores_data_dir_path)
# Read all iron spectra in the core directories.
iron_archived_reference_glob = os.path.join(iron_archived_cores_data_dir_path, 'Fe_references/*.e')
print('references glob: {}'.format(iron_archived_reference_glob))
iron_archived_cores_spectrum_glob = os.path.join(iron_archived_cores_data_dir_path, '*/*_Fe_XANES/*.e')
print('cores glob: {}'.format(iron_archived_cores_spectrum_glob))
iron_archived_reference_list, _ = list(ReferenceSpectrum.read_all([iron_archived_reference_glob]))
print('refrence count: {}'.format(len(iron_archived_reference_list)))
iron_archived_cores_spectrum_list, _ = list(ReferenceSpectrum.read_all([iron_archived_cores_spectrum_glob]))
print('core spectrum count: {}'.format(len(iron_archived_cores_spectrum_list)))
# What are the maximum and minimum reference energies?
reference_min_energy = np.max([r.data_df.energy.values[0] for r in iron_archived_reference_list])
reference_max_energy = np.min([r.data_df.energy.values[-1] for r in iron_archived_reference_list])
print('reference minimum energy: {:5.2f}'.format(reference_min_energy))
print('reference maximum energy: {:5.2f}'.format(reference_max_energy))
# What are the maximum and minimum core spectrum energies?
min_energy = np.max([r.data_df.energy.values[0] for r in iron_archived_cores_spectrum_list])
max_energy = np.min([r.data_df.energy.values[-1] for r in iron_archived_cores_spectrum_list])
print('minimum energy: {:5.2f}'.format(min_energy))
print('maximum energy: {:5.2f}'.format(max_energy))
interpolate_energy_range = np.linspace(start=7100.0, stop=7250.0, num=200)
print('interpolate_energy_range.shape: {}'.format(interpolate_energy_range.shape))
print('interpolate_energy_range:\n{}'.format(pprint.pformat(interpolate_energy_range.tolist()[:10])))
# interpolate references and spectra in one data frame because concatentating data frames with a
# floating point index is not working for me
interpolated_iron_archived_ref_and_cores_df = InterpolatedSpectrumSet.get_interpolated_spectrum_set_df(
energy_range=interpolate_energy_range,
spectrum_set=set(itertools.chain(iron_archived_reference_list, iron_archived_cores_spectrum_list)))
interpolated_iron_archived_ref_and_cores_df.plot().legend(loc='center left', bbox_to_anchor=(1, 0.5))
interpolated_iron_archived_ref_and_cores_df.head()
def permute_row_elements(df):
for i in range(df.shape[0]):
df.values[i, :] = sklearn.utils.shuffle(df.values[i, :])
return df
# demonstrate permuting row elements
x_df = pd.DataFrame(data=np.array(range(9)).reshape((3,3)))
print('before permuting row elements:')
print(x_df.head())
permute_row_elements(x_df)
print('after permuting row elements:')
print(x_df)
# +
def cluster_with_sig_cut(variable_by_sample_df, title, pdist_metric, linkage_method):
#pdist_metric = 'correlation'
distance_for_sample_pairs = pdist(X=np.transpose(variable_by_sample_df.values), metric=pdist_metric)
print('{}: {} sample pairs'.format(title, len(distance_for_sample_pairs)))
plt.figure()
plt.title(title)
plt.hist(distance_for_sample_pairs)
plt.xlabel('{} distance'.format(pdist_metric))
plt.ylabel('{} pairs'.format(variable_by_sample_df.shape))
plt.show()
resample_count = 1000
expected_distance_list = []
for i in range(resample_count):
# permute the elements of each row of variable_by_sample_df
p_variable_by_sample_df = permute_row_elements(variable_by_sample_df.copy())
p_distance_for_sample_pairs = pdist(X=np.transpose(p_variable_by_sample_df.values), metric=pdist_metric)
p_linkage_distance_variable_by_sample = hc.linkage(y=p_distance_for_sample_pairs, method=linkage_method)
p_dendrogram = hc.dendrogram(Z=p_linkage_distance_variable_by_sample, no_plot=True)
expected_distance_list.extend([d for (_, _, d, _) in p_dendrogram['dcoord']])
p = 95.0
alpha = 1.0 - p/100.0
cutoff_distance = np.percentile(expected_distance_list, q=p)
print('cutoff distance is {}'.format(cutoff_distance))
plt.figure()
plt.hist(expected_distance_list)
plt.title('dendrogram distance null distribution')
plt.show()
linkage_distance_variable_by_sample = hc.linkage(y=distance_for_sample_pairs, method=linkage_method)
plt.figure(figsize=(3.7, 7))
dendrogram = hc.dendrogram(
Z=linkage_distance_variable_by_sample,
orientation='left',
labels=variable_by_sample_df.columns)
icoords = [i for i in itertools.chain(dendrogram['icoord'])]
plt.vlines(cutoff_distance, ymin=np.min(icoords), ymax=np.max(icoords))
plt.title('{}\n{} linkage'.format(title, linkage_method))
plt.xlabel('{} distance'.format(pdist_metric))
plt.savefig(title + '.pdf', format='pdf')
plt.show()
# -
for core in ['OTT3', 'TG3', 'UMRB2']:
# combine core and references
ref_column_list = tuple([c for c in interpolated_iron_archived_ref_and_cores_df.columns if 'standard' in c])
print('reference column list has {} elements:\n{}'.format(len(ref_column_list), pprint.pformat(ref_column_list)))
core_column_list = tuple([c for c in interpolated_iron_archived_ref_and_cores_df.columns if core in c])
print('core {} column list has {} elements:\n{}'.format(core, len(core_column_list), pprint.pformat(core_column_list)))
core_interpolated_iron_archived_df = interpolated_iron_archived_ref_and_cores_df.loc[:, core_column_list]
core_interpolated_iron_archived_df.plot().legend(loc='center left', bbox_to_anchor=(1, 0.5))
core_interpolated_iron_archived_df.head()
core_and_ref_column_list = tuple(itertools.chain(core_column_list, ref_column_list))
core_and_ref_interpolated_iron_archived_df = interpolated_iron_archived_ref_and_cores_df.loc[:, core_and_ref_column_list]
core_and_ref_interpolated_iron_archived_df.plot().legend(loc='center left', bbox_to_anchor=(1, 0.5))
core_and_ref_interpolated_iron_archived_df.head()
cluster_with_sig_cut(
core_interpolated_iron_archived_df,
title='Fe core {} ({} spectra)'.format(core, core_interpolated_iron_archived_df.shape[1]),
pdist_metric='correlation',
linkage_method='complete')
cluster_with_sig_cut(
core_and_ref_interpolated_iron_archived_df,
title='Fe core {} and references ({} spectra)'.format(core, core_and_ref_interpolated_iron_archived_df.shape[1]),
pdist_metric='correlation',
linkage_method='complete')
# +
# all cores
ref_column_list = tuple([c for c in interpolated_iron_archived_ref_and_cores_df.columns if 'standard' in c])
print('reference column list has {} elements:\n{}'.format(len(ref_column_list), pprint.pformat(ref_column_list)))
core_column_list = tuple([c for c in interpolated_iron_archived_ref_and_cores_df.columns if 'standard' not in c])
print('all cores column list has {} elements:\n{}'.format(core, len(core_column_list), pprint.pformat(core_column_list)))
core_interpolated_iron_archived_df = interpolated_iron_archived_ref_and_cores_df.loc[:, core_column_list]
core_interpolated_iron_archived_df.plot().legend(loc='center left', bbox_to_anchor=(1, 0.5))
core_interpolated_iron_archived_df.head()
core_and_ref_column_list = tuple(itertools.chain(core_column_list, ref_column_list))
core_and_ref_interpolated_iron_archived_df = interpolated_iron_archived_ref_and_cores_df.loc[:, core_and_ref_column_list]
core_and_ref_interpolated_iron_archived_df.plot().legend(loc='center left', bbox_to_anchor=(1, 0.5))
core_and_ref_interpolated_iron_archived_df.head()
cluster_with_sig_cut(
core_interpolated_iron_archived_df,
title='Fe all cores({} spectra)'.format(core_interpolated_iron_archived_df.shape[1]),
pdist_metric='correlation',
linkage_method='complete')
cluster_with_sig_cut(
core_and_ref_interpolated_iron_archived_df,
title='Fe all cores and references ({} spectra)'.format(core_and_ref_interpolated_iron_archived_df.shape[1]),
pdist_metric='correlation',
linkage_method='complete')
# -
| notebooks/hc_sig_cut_archived_tills_Fe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Logistic Regression
#
# This function shows how to use TensorFlow to solve logistic regression.
# $ \textbf{y} = sigmoid(\textbf{A}\times \textbf{x} + \textbf{b})$
#
# We will use the low birth weight data, specifically:
# ```
# # y = 0 or 1 = low birth weight
# # x = demographic and medical history data
# ```
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import requests
from tensorflow.python.framework import ops
import os.path
import csv
# +
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# -
# ## Obtain and prepare data for modeling
# +
# name of data file
birth_weight_file = 'birth_weight.csv'
# download data and create data file if file does not exist in current directory
if not os.path.exists(birth_weight_file):
birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'
birth_file = requests.get(birthdata_url)
birth_data = birth_file.text.split('\r\n')
birth_header = birth_data[0].split('\t')
birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1]
with open(birth_weight_file, "w") as f:
writer = csv.writer(f)
writer.writerows(birth_data)
f.close()
# read birth weight data into memory
birth_data = []
with open(birth_weight_file, newline='') as csvfile:
csv_reader = csv.reader(csvfile)
birth_header = next(csv_reader)
for row in csv_reader:
birth_data.append(row)
birth_data = [[float(x) for x in row] for row in birth_data]
# Pull out target variable
y_vals = np.array([x[1] for x in birth_data])
# Pull out predictor variables (not id, not target, and not birthweight)
x_vals = np.array([x[2:9] for x in birth_data])
# set for reproducible results
seed = 99
np.random.seed(seed)
tf.set_random_seed(seed)
# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Normalize by column (min-max norm)
def normalize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m-col_min) / (col_max - col_min)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
# -
# ## Define Tensorflow computational graph¶
# +
# Declare batch size
batch_size = 25
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 7], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[7,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
# -
# ## Train model
# +
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Actual Prediction
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Training loop
loss_vec = []
train_acc = []
test_acc = []
for i in range(1500):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
temp_acc_train = sess.run(accuracy, feed_dict={x_data: x_vals_train, y_target: np.transpose([y_vals_train])})
train_acc.append(temp_acc_train)
temp_acc_test = sess.run(accuracy, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])})
test_acc.append(temp_acc_test)
if (i+1)%300==0:
print('Loss = ' + str(temp_loss))
# -
# ## Display model performance
# +
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Cross Entropy Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Cross Entropy Loss')
plt.show()
# Plot train and test accuracy
plt.plot(train_acc, 'k-', label='Train Set Accuracy')
plt.plot(test_acc, 'r--', label='Test Set Accuracy')
plt.title('Train and Test Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
| ch03_regression/08_logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import networkx as nx
# +
dataset = 'blogcatalog'
raw_folder = '../../dropbox/data/%s' % dataset
output_folder = '../../dropbox/data/%s-connect' % dataset
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
with open('%s/adj_list.txt' % output_folder, 'w') as fout:
with open('%s/meta.txt' % raw_folder, 'r') as f_meta:
w = f_meta.readline().split()
n_nodes = int(w[0])
with open('%s/label.txt' % output_folder, 'w') as flabel:
for i in range(n_nodes):
flabel.write('0 1\n')
for i in range(n_nodes):
flabel.write('1 0\n')
with open('%s/meta.txt' % output_folder, 'w') as f_meta:
f_meta.write('%d %d\n' % (n_nodes * 2, 2))
with open('%s/adj_list.txt' % raw_folder, 'r') as fin:
for row in fin:
fout.write(row)
with open('%s/adj_list.txt' % raw_folder, 'r') as fin:
for row in fin:
row = row.split()
fout.write('%s' % row[0])
for i in range(1, len(row)):
fout.write(' %d' % (int(row[i]) + n_nodes))
fout.write('\n')
# +
dataset = 'n-chains'
output_folder = '../../dropbox/data/%s-connect' % dataset
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
n_nodes = 1000
with open('%s/meta.txt' % output_folder, 'w') as f_meta:
f_meta.write('%d %d\n' % (n_nodes * 2, 2))
with open('%s/adj_list.txt' % output_folder, 'w') as fout:
with open('%s/label.txt' % output_folder, 'w') as flabel:
for i in range(n_nodes):
flabel.write('0 1\n')
for i in range(n_nodes):
flabel.write('1 0\n')
for i in range(n_nodes):
if i > 0 and i < n_nodes - 1:
fout.write('2')
else:
fout.write('1')
if i > 0:
fout.write(' %d' % (i - 1))
if i < n_nodes - 1:
fout.write(' %d' % (i + 1))
fout.write('\n')
for i in range(n_nodes):
if i > 0 and i < n_nodes - 1:
fout.write('2')
else:
fout.write('1')
if i > 0:
fout.write(' %d' % (n_nodes + i - 1))
if i < n_nodes - 1:
fout.write(' %d' % (n_nodes + i + 1))
fout.write('\n')
| code/data_process/connectivity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %run -p ../train_cnf_disentangle_rl.py --data cifar10 --dims 64,64,64 --strides 1,1,1,1 --num_blocks 2 --layer_type concat --multiscale True --rademacher True --batch_size 8000 --test_batch_size 5000 --save ../experiments_published/cnf_conditional_disentangle_cifar10_bs8K_sratio_0_5_drop_0_5_rl_stdscale_15_run1 --seed 1 --conditional True --controlled_tol True --train_mode semisup --lr 0.01 --warmup_iters 1000 --atol 1e-4 --rtol 1e-4 --weight_y 0.5 --condition_ratio 0.5 --dropout_rate 0.5 --scale_fac 1.0 --scale_std 15.0 --max_grad_norm 20.0
#
| conditional/main_conditional_disentangle_cifar_bs8K_sratio_0_5_drop_0_5_rl_stdscale_15_run1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import populartimes
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Get popular times data from Google Maps API
restaurant_data = populartimes.get_id("AIzaSyAutnmsRe7lL1gNZlAleHY1F4Tt5bG8qoo", "ChIJx7dVTY0B3IARvISOSzCb8Uc")
# -
# # How can placing a SurfPod in JRDN restaurant increase traffic and revenue?
#
# By analyzing the "popular times" data provided by Google Maps API, we are able to get an estimate of how full (%capacity) the beachfront restaurant is at different times of the day, on different days of the week.
#
# Upon quick visual analysis, we observe the following:
# - JRDN does not receive a lot of morning traffic
# - Things pick up around noon (especially on weekends)
# - There is an afternoon lull
# - Traffic peaks around dinner hours (6-8pm)
# - JRDN is busiest on weekends
# +
times = ["9am", "10am", "11am", "12pm", "1pm", "2pm", "3pm", "4pm", "5pm", "6pm", "7pm", "8pm", "9pm"]
times_data = restaurant_data["populartimes"]
d = {times_data[0]["name"]: pd.Series(times_data[0]["data"][9:22], index=times),
times_data[1]["name"]: pd.Series(times_data[1]["data"][9:22], index=times),
times_data[2]["name"]: pd.Series(times_data[2]["data"][9:22], index=times),
times_data[3]["name"]: pd.Series(times_data[3]["data"][9:22], index=times),
times_data[4]["name"]: pd.Series(times_data[4]["data"][9:22], index=times),
times_data[5]["name"]: pd.Series(times_data[5]["data"][9:22], index=times),
times_data[6]["name"]: pd.Series(times_data[6]["data"][9:22], index=times)}
df = pd.DataFrame(d)
df.plot.bar(figsize=(20,10));
# -
# ## JRDN Restaurant Weekly Traffic Estimate
#
# Working backwards, we can use this data to get a rough estimate of how many customers JRDN receives on a weekly basis. Using the popular times values as percentages, we multiply the data points by the restaurant's capacity* to estimate hourly traffic, and sum these values to calculate the weekly average.
#
# \**I called the restaurant and was told by the manager that the restaurant seats a maximum of 90 people.*
# +
restaurant_capacity = 90
restaurant_traffic = 0
for d in times_data:
for t in d["data"][9:22]:
restaurant_traffic += t/100*restaurant_capacity
restaurant_traffic = math.floor(restaurant_traffic)
restaurant_traffic
# -
# ## SurfPod Weekly Traffic Estimate
#
# Now, we estimate the weekly traffic of a SurfPod within walking distance from the beach using assumptive data based on preliminary experiments.
# +
surfpod_capacity = 4
surfpod_popularity = [
75, # Experienced surfer traffic (9am-10am)
75,
50,
25, # Mid-day lull (Noon)
50,
75,
75,
100, # Traffic picks up (rented out after 4pm)
100,
100,
100,
100
]
surfpod_traffic = 0
for p in surfpod_popularity:
surfpod_traffic += p/100 * surfpod_capacity
surfpod_traffic = math.floor(surfpod_traffic*7) # Multiply daily traffic by 7
surfpod_traffic
# -
# **Based on these figures, we can hypothesize that a SurfPod placed inside JRDN might increase restaurant traffic by approximately 5-10%.**
# ## Additional Revenue Estimate
#
# If the SurfPod can bring in an additional 250-300 potential customers a week, how many of these people will actually sit down at the restaurant and dine?
#
# Assume a rough percentage estimate for a surfers propensity to consume is 20%.*
#
# \**This figure is loosely based on survey answers from https://www.statista.com/markets/420/topic/494/restaurants/ but, in reality, not enough data has been collected to produce an accurate number.*
propensity = 0.2
math.floor(surfpod_traffic*propensity)
# If approximately 50 more people dine every week at JRDN because of the SurfPod, approximately how much additional weekly revenue does that generate for the restaurant?
#
# After entering the menu item prices in an Excel spreadsheet, we can get the following mean prices for meals at different times of day:
#
# <img src="menu-pricing.png" width="140">
# <div style="text-align: center">
# Breakfast (9am-11am): |14.55<br>
# Brunch (9am-3:30pm Sat + Sun): 15.24<br>
# Lunch (11am-4pm): 17.15<br>
# Dinner (5pm-9pm): 21.87<br>
# </div>
# +
breakfast_mean_price = 14.55
brunch_mean_price = 15.24
lunch_mean_price = 17.15
dinner_mean_price = 21.87
# Monday - Friday
mf_revenue = 0
for i, p in enumerate(surfpod_popularity):
if i < 2: mf_revenue += (p/100*surfpod_capacity)*propensity*breakfast_mean_price
elif i > 1 and i < 7: mf_revenue += (p/100*surfpod_capacity)*propensity*lunch_mean_price
elif i > 7: mf_revenue += (p/100*surfpod_capacity)*propensity*dinner_mean_price
mf_revenue = mf_revenue*5
# Saturday - Sunday
ss_revenue = 0
for i, p in enumerate(surfpod_popularity):
if i < 7: ss_revenue += (p/100*surfpod_capacity)*propensity*brunch_mean_price
elif i > 7: ss_revenue += (p/100*surfpod_capacity)*propensity*dinner_mean_price
ss_revenue = ss_revenue*2
additional_revenue = mf_revenue + ss_revenue
additional_revenue
# -
# According to this calculation, the SurfPod would bring in approximately \\$869.47 worth of additional weekly revenue. Annually, this would translate to an additional \\$45,336.77 in revenue generated from the SurfPod.
#
# **Based on these assumptions and calculations, we can hypothesize that a SurfPod would generate an additional \\$40,000-50,000 in revenue for JRDN.**
# # Conclusion
#
# In summary, we hypothesize that placing a SurfPod inside JRDN or on a plot of grass in front of the restaurant would provide a net benefit to the restaurant by **increasing**:
# - restaurant traffic approximately 5-10%
# - annual revenue approximately \\$40,000-50,000
#
# ### Additional Notes and Strategies
#
# Recall some of our additional qualitative observations regarding the popular times data:
# - JRDN does not receive a lot of morning traffic
# - There is an afternoon lull
#
# SurfUp's mobile platform has the potential to help boost sales during these low-traffic periods through incentives and promotions:
# - Experienced surfers tend to rent surfboards in the morning and evening. Bringing hungry surfers into the restaurant in the morning could help boost morning traffic and sales.
# - Beginner surfers tend to rent surfboards in the afternoon. Using a promotion deployed through a push notification to SurfUp users within a given radius, you could aggregate demand and bring lots of beginners to the restaurant.
# - Most beginner surfers are tourists, one of the primary target markets for the restaurant, who likely have a higher consumption propensity than local residents.
#
| JRDN SurfPod Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from dask.distributed import Client
import dask.bag as db
import string
client = Client(n_workers=4)
def replace(word):
for a in string.punctuation+'¿¡«»':
word = word.replace(a,'')
return word
# %%time
data = db.read_text('quevedo_10000.txt',encoding='latin-1')
datanp = data.map(replace)
words_bag = datanp.map(lambda x: x.split()).flatten()
#si no se hace compute() no saca nada
#print(words_bag.compute()[:10])
appearances_bag = words_bag.map(lambda x: (x.lower(),1))
#print (appearances_bag.compute()[:10])
result_bag = words_bag.map(lambda x: x.lower()).frequencies()
print ('RESULTS------------------')
#print ('words frequency', result_bag.compute()[:10])
sorted_bag = words_bag.map(lambda x: x.lower()).frequencies(sort=True)
print ('RESULTS------------------')
print ('words frequency sorted', sorted_bag.compute()[:10])
| Dask/word_frecuency_sort_dask.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: drlnd
# language: python
# name: drlnd
# ---
# # Multi Agent DDPG Agent for Tennis Game (Report)
#
# ---
# This is a brief report on how the tennis game solution was developed, implemented and executed.
#
# It was understood that, in this tennis game, the observations of each player is symmetrical. Thus,for the same state inputs, the players should act with the same actions. Thus,a DDPG agent was implemented and used simultaniously for calculating the actions of both the players. Moreover,same replay buffer was used to store exeriences of both the players.
#
# The environment was solved within 1134 episodes and the average score continued to rise to 1.5+ as the training progessed.
#
#
# ## Steps taken for training the agent
# ### 1. Initiation of environment and agent
#
# Necessary packages were imported, then environment and agent were initiated.
# +
import torch
import numpy as np
from collections import deque
from unityagents import UnityEnvironment
from ddpg_agent import Agent
import matplotlib.pyplot as plt
# %matplotlib inline
# instantiate the environment and agent
env = UnityEnvironment(file_name="/home/labassistant/deep-reinforcement-learning/p3_collab-compet/Tennis_Linux/Tennis.x86_64")
agent = Agent(state_size=24, action_size=2, random_seed=2)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# get the number of agents in the environment
env_info = env.reset(train_mode=True)[brain_name]
num_agents = len(env_info.agents)
# -
# ### 2.The network architecture for Actor and Critic
print('Local Actor Network:\n', agent.actor_local, '\n')
print('Target Actor Network:\n', agent.actor_target, '\n')
print('Local Critic Network:\n', agent.critic_local, '\n')
print('Target Critic Network:\n', agent.critic_target, '\n')
# ### 3. Training process details
#
# #### Hyper Parameters
# - BUFFER_SIZE = 100000 (replay buffer size)
# - BATCH_SIZE = 300 (minibatch size)
# - GAMMA = 0.99 (discount factor)
# - TAU = 1e-3 (for soft update of target parameters)
# - LR_ACTOR = 1e-4 (learning rate of the actor)
# - LR_CRITIC = 1e-3 (learning rate of the critic)
def ddpg(n_episodes=2000):
scores_deque = deque(maxlen=100)
scores = []
best_score = 0
print_every=400
environment_solved = False
environment_solved_episode = 0
best_performance_episode = 0
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
agent.reset()
score = np.zeros(num_agents)
while True:
if i_episode > 1000:
actions = agent.act(states, noise_level=0)
else:
actions = agent.act(states, noise_level=1)
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
agent.memorize(states, actions, rewards, next_states, dones)
states = next_states
score += rewards
agent.learn()
if np.any(dones):
break
episode_score = np.max(score)
scores_deque.append(episode_score)
scores.append(episode_score)
if (not environment_solved) and np.mean(scores_deque)>0.5 :
environment_solved_episode = i_episode
environment_solved = True
if np.mean(scores_deque) > best_score:
torch.save(agent.actor_local.state_dict(), 'tennisDDPG_actor.pth')
torch.save(agent.critic_local.state_dict(), 'tennisDDPG_critic.pth')
best_score = np.mean(scores_deque)
best_performance_episode = i_episode
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end="")
if i_episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
print(
'Environment solved in {} episodes. Best average score of {} reached at {} episodes.'.format(
environment_solved_episode, best_score, best_performance_episode))
return scores
# ### 4. Training and result
# +
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.ylabel('Score')
plt.xlabel('Episode')
plt.show()
# -
# ### 5. Future ideas
#
# Fine tuning various hyper parameters of the system can possibly improve the efficiency of the system. Specifically,the size of replay buffer did impact the learning efficiency. Moreover, adding more hidden layers and changing the number of the units of the layers found to have impact on the training speed of the agent. Additionally, Prioritised experience replay can possibly improve the training speed.
# ### 6. Testing of the learned Agent
# +
import torch
import numpy as np
from unityagents import UnityEnvironment
from ddpg_agent import Agent
# instantiate the environment and agent
env = UnityEnvironment(file_name="/home/labassistant/deep-reinforcement-learning/p3_collab-compet/Tennis_Linux/Tennis.x86_64")
agent = Agent(state_size=24, action_size=2, random_seed=2)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# get the number of agents in the environment
env_info = env.reset(train_mode=False)[brain_name]
num_agents = len(env_info.agents)
agent.actor_local.load_state_dict(torch.load( 'tennisDDPG_actor.pth'))
agent.critic_local.load_state_dict(torch.load('tennisDDPG_critic.pth'))
for i in range(1, 6): # play game for 5 episodes
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = agent.act(states, noise_level=0) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Score (max over agents) from episode {}: {}'.format(i, np.max(scores)))
# -
env.close()
| Report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Testing area for Exception in thread
# +
from threading import Thread
from time import sleep
def test(waittime=3):
print('started')
sleep(waittime)
raise Exception('thats the one')
return 1
t = Thread(target=test)
t.start()
sleep(2)
print('joining')
t.join()
print('all done')
# -
# # Example wrapper thread
# +
from threading import Thread
class PropagatingThread(Thread):
def run(self):
self.exc = None
try:
if hasattr(self, '_Thread__target'):
# Thread uses name mangling prior to Python 3.
self.ret = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
else:
self.ret = self._target(*self._args, **self._kwargs)
except BaseException as e:
self.exc = e
def join(self):
super(PropagatingThread, self).join()
if self.exc:
raise self.exc
return self.ret
# +
def f(*args, **kwargs):
print(args)
print(kwargs)
raise Exception('I suck')
t = PropagatingThread(target=f, args=(5,), kwargs={'hello':'world'})
t.start()
t.join()
# -
| python/Tread exception.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # "[OptimizationTheory] CH01. Introduction"
# > Optimization theory summary note.
#
# - toc: false
# - badges: false
# - comments: false
# - categories: [optimization-theory]
# - hide_{github,colab,binder,deepnote}_badge: true
# + [markdown] tags=[]
# #### 1.0. Optimization
# > Optimization is the process of creating something that is as effective as possible. From a mathematical perspective, optimization deals with finding the maxima and minima of a function that depends on one or more variables.
#
# For example, determin the optimum analytically
#
# $$
# z = z_0 + \frac{m}{c}(v_0 + \frac{mg}{c})(-\exp(-(c/m)t)) - \frac{mg}{c}t \quad \text{where} \,\ g = 9.81, \,\ z_0 = 100, \,\ v_0 = 55, \,\ m = 80, \,\ c = 15
# $$
# -
# ##### Definition.1.1. Optimization Problem
# Following problem that find optimal solution $\mathbf{x}$ are called __optimization problem__.
#
# $$
# \mathbf{x}^* = \underset{\mathbf{x}}{\mathrm{argmin}} \,\ f(\mathbf{x})
# $$
# where $\mathbf{x}$ is optimization variable, $f : \mathbb{R}^n \rightarrow \mathbb{R}$ is objective function.<br>
# In this situation, $\mathbf{x}^*$ is called optimal solution.
# It is very difficult to solve general optimization problem. Many optimization method involve some compromise, e.g., very long computation time, or not always finding the solution. However, certain problems can be solved efficiently and reliably. For example,
#
# - Least-squares problems
# - Linaer programming problems
# - Convex optimization problems
# #### 1.1. Classification of Optimization Method
#
# There are some kinds of optimization method like
#
# - Search
# - Least-Squares
# - Linear Programming/Nonlinear Optimization
# - Convex Optimization
# - Gradient based Optimization
#
# Above methods are not separated by analytical method and numerical method.
# #### 1.2. Search based Algorithm
#
# Search based algorithms are simplest method in optimization theory. It just computes the objective function value for many $x$ candidates and finds the minimum point. Examples of algorithms are as follows.
#
# - Grid Search
# - Golden-Section Search
#
# These algorithms can be used effectively for single variable functions, but for high-dimensional multivariate functions, the amount of computation increases exponentially. And also, the solution does not guarantee a global optimum.
# #### 1.3. Least-Squares(Mature technology)
#
# Form like
#
# $$
# \mathbf{x}^* = \underset{\mathbf{x}}{\mathrm{argmin}} \,\ || A\mathbf{x} - \mathbf{b} ||_2^2
# $$
#
# are called least-squares problem. Optimal solution can be obtained analytically e.g., $\mathbf{x}^* = (A^T A)^{-1} A^T \mathbf{b}$. There are reliable and efficient algorithms and software that have $n^2k \,\ (A \in \mathbb{R}^{k \times n})$ time complexity. It can increase flexibility by few standard techniques like including weights, adding regularization terms.
# #### 1.4. Linear Programming(Mature technology)/Nonlinear Programming
#
# Form like
#
# $$
# \mathbf{x}^* = \underset{\mathbf{x}}{\mathrm{argmin}} \,\ \mathbf{c}^T \mathbf{x} \quad s.t. \quad \mathbf{a}_i^T \mathbf{x} \le \mathbf{b}_i, \,\ i = 1,\cdots,m
# $$
#
# are called linear programming. There is no analytical formula for solution, but there are reliable and efficient algorithms and software that have $n^2m$ time complexity.<br><br>
#
# Form like
#
# $$
# \mathbf{x}^* = \underset{\mathbf{x}}{\mathrm{argmin}} \,\ f(\mathbf{x}) \quad s.t. \quad g_i(\mathbf{x}) \le 0, \,\ h_j(\mathbf{x}) = 0, \,\ i=1,\cdots,p, \,\ j=1,\cdots,q
# $$
# are called nonlinear programming. There are local optimization methods and global optimization methods.
# #### 1.5. Convex Optimization
#
# Form like
#
# $$
# \mathbf{x}^* = \underset{\mathbf{x}}{\mathrm{argmin}} \,\ f(\mathbf{x}) \quad s.t. \quad g_i(\mathbf{x}) \le 0, \,\ h_j(\mathbf{x}) = 0, \,\ i=1,\cdots,p, \,\ j=1,\cdots,q, \,\ f \,\ \text{is a convex function.}
# $$
#
# This problem includes least-squares problems and linear programming as special cases. $g_i$ is called an inequality constraint function and $h_j$ is called an equality constraint function. There are no analytical solutions, but reliable and efficient algorithms that have $\text{max}\{n^3, n^2p, n^2q\}$ time complexity roughly.<br><br>
#
# Using convex optimization often difficult to recognize. However, there are many tricks for transforming problems into convex form. Many problems can be solved via convex optimization.
# #### 1.6. Gradient based Optimization
# A gradient based optimization is an algorithm to solve problems of the form
# $$
# \mathbf{x}^* = \underset{\mathbf{x}}{\mathrm{argmin}} \,\ f(\mathbf{x})
# $$
# with the search directions defined by the gradient of the function at the current point.
| _notebooks/math/optimization-theory/ch01-introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Stochastic Segmentation Networks: Modelling Spatially Correlated Aleatoric Uncertainty
#
# Monteiro et al. 2020, accepted at NeurIPS 2020.
#
# 
# + slideshow={"slide_type": "skip"}
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torch
import tqdm.notebook as tqdm
plt.rcParams.update({
'font.size': 8,
'text.usetex': True,
'text.latex.preamble': r'\usepackage{amsfonts}'
})
# + slideshow={"slide_type": "skip"}
MARGIN = 8
DIM = 32
rng = np.random.RandomState(7)
def build_input(batch_size, shape):
h, w = shape
img = np.ones((batch_size, h, w), dtype=np.float32) + 0.5*rng.randn(h, w)
line_pos = rng.randint(0, h - MARGIN - 1, size=batch_size)
for i, lp in enumerate(line_pos):
img[i, lp:lp+MARGIN] = 0.9*rng.randn(MARGIN, w)
img[i, :lp] = -1.0 + 0.5*rng.randn(lp, w)
return img.astype(np.float32), line_pos
def build_output(inp, line_pos):
batch_size = inp.shape[0]
offsets = line_pos
output = np.zeros_like(inp)
line_pos = rng.randint(0, MARGIN, size=batch_size) + offsets
for i, lp in enumerate(line_pos):
output[i, lp:] = 1.0
return output.astype(np.float32)
def produce_data(batch_size, dim):
while True:
xs, line_pos = build_input(batch_size, (dim, dim))
ys = build_output(xs, line_pos)
yield np.expand_dims(xs, 1), np.expand_dims(ys, 1)
def plot_im(xs, cmap, title=None):
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
xs = xs.squeeze().T[:16]
ax.imshow(xs, cmap, interpolation="none")
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title, fontsize=40, pad=20)
def sample_bernoulli_im(im, n_samples=1):
im = np.concatenate([1 - im, im], axis=1)[0].transpose(1, 2, 0)
n_ims = np.tile(im[None, ...], [n_samples, 1, 1, 1])
sample = (n_ims.cumsum(-1) >= rng.uniform(size=n_ims.shape[:-1])[..., None]).argmax(-1)
return sample
# + slideshow={"slide_type": "skip"}
inp, lp = build_input(1, (DIM, DIM))
out = np.concatenate([build_output(inp, lp) for _ in range(100)], axis=0)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Some data is messy
#
# For the same image $x$ we have multiple ambigous labels $y_{1,2,\dots}$.
# + slideshow={"slide_type": "fragment"}
plot_im(inp[0], cmap="coolwarm", title=r"$$x$$")
# + slideshow={"slide_type": "subslide"}
plot_im(inp[0], cmap="coolwarm", title=r"$$x$$")
plot_im(out[0], cmap="gray", title=r"$$y_1$$")
# + slideshow={"slide_type": "subslide"}
plot_im(inp[0], cmap="coolwarm", title=r"$$x$$")
plot_im(out[4], cmap="gray", title=r"$$y_2$$")
# + slideshow={"slide_type": "subslide"}
plot_im(out.mean(0), cmap="gray", title=r"$$\mathbb{E}[y \mid x]$$")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Probabilistic perspective
#
# #### Typical modelling approach:
#
# * Assume an underlying data distribution $p(y \mid x)$,
# * construct a parametric model: $p_\phi(y \mid x) = \int p(y \mid \phi) p(\phi \mid x)d\phi$,
# * optimise $\phi$ to minimise a divergence between $p$ and $p_\phi$.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Simplifications:
#
# * Model only an "output" variable $\eta$ (e.g. the logits before softmax);
# - the model becomes $p_\phi(y \mid x) = \int p(y \mid \eta) p_\phi(\eta \mid x)d\eta$
# * set $p_\phi(\eta \mid x)$ to be a delta function, i.e. $\eta = f_\phi(x)$ is deterministic
# - the integral reduces to the likelihood: $p_\phi(y \mid x) = p(y \mid f_\phi(x))$
# * set $p(y \mid f_\phi(x))$ to be a factorised categorical distribution
# * optimise KL-divergence between $p$ and $p_\phi$ using MC approximation:
#
# Using these assumptions, we can derive an easy to implement model:
#
#
# $$
# \begin{align}
# &\text{argmin}_\phi D_\text{KL}\left(p(y \mid x) \mid\mid p(y \mid f_\phi(x))\right) \\
# &= \text{argmin}_\phi \int p(y \mid x) \log{\frac{p(y \mid x)}{p(y \mid f_\phi(x)))}} \\
# &= \text{...steps left as an exercise to the reader...} \\
# &\approx \text{argmin}_\phi -\frac{1}{N}\sum_{n,i,c}y^n_{i,c}\log{\text{softmax}(f_\phi(x^n))_{i,c}},\quad (x^n, y^n) \sim p(x, y) \quad \text{for} \, n=1, 2, \dots, N
# \end{align}
# $$
#
# **For a good solution $\phi^*$ we will have $p(y_i \mid f_{\phi^*}(x)_i) = p(y_i \mid x)$**
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + slideshow={"slide_type": "subslide"}
class MLP(torch.nn.Module):
def __init__(self, inp_size, n_hidden):
super(MLP, self).__init__()
self.layers = [torch.nn.Linear(inp_size, n_hidden[0])]
for cur, nxt in zip(n_hidden, n_hidden[1:]):
self.layers.append(torch.nn.Linear(cur, nxt))
self.layers.append(torch.nn.Linear(n_hidden[-1], inp_size))
self.model = torch.nn.Sequential(*self.layers)
def forward(self, xs, as_probs=False):
orig_shape = xs.shape
out = xs.view(xs.shape[0], -1)
for layer in self.layers[:-1]:
out = torch.nn.functional.relu(layer(out))
out = self.layers[-1](out)
if as_probs:
out = torch.sigmoid(out)
return out.view(*orig_shape)
def log_likelihood(self, xs, ys):
params = self.forward(xs)
logprobs = torch.distributions.Bernoulli(logits=params).log_prob(ys).sum((1, 2, 3))
return logprobs
# + slideshow={"slide_type": "subslide"}
def run_step(xs_t, ys_t, opt, net):
opt.zero_grad()
loss = -torch.mean(net.log_likelihood(xs_t, ys_t))
loss.backward()
opt.step()
return loss.detach().numpy()
# + slideshow={"slide_type": "subslide"}
producer = produce_data(10, DIM)
net = MLP(inp_size=DIM*DIM, n_hidden=[64, 64, 64])
opt = torch.optim.Adam(net.parameters(), lr=3e-4)
N_ITERS = 1000
pbar = tqdm.tqdm(range(N_ITERS))
for i in pbar:
xs, ys = next(producer)
xs_t = torch.as_tensor(xs)
ys_t = torch.as_tensor(ys)
loss = run_step(xs_t, ys_t, opt, net)
pbar.set_description_str(f"loss: {loss:4.2f} ")
ys_pred = net(torch.as_tensor(inp[:, None, ...]), as_probs=True).detach().numpy()
plot_im(ys_pred[0], cmap="gray", title=r"$$p(y \mid f_{\phi^*}(x))$$")
# + [markdown] slideshow={"slide_type": "subslide"}
# * What we see is a distribution, not a sample from it.
# * We can obtain a useful sample by applying argmax/thresholding operation (mode extraction)
# + slideshow={"slide_type": "fragment"}
plot_im(ys_pred[0] > 0.5, cmap="gray", title=r"$$y = \mathrm{argmax}(p(y \mid f_{\phi^*}(x)))$$")
# + [markdown] slideshow={"slide_type": "subslide"}
# * Alternatively we can also sample from $p(y \mid f_{\phi^*}(x))$ at the cost of label inconsistencies.
# + slideshow={"slide_type": "fragment"}
fd_sample = sample_bernoulli_im(ys_pred)
plot_im(fd_sample, cmap="gray", title=r"$$y \sim p(y \mid f_{\phi^*}(x))$$")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Modelling dependence in $y$
#
# Before:
# 
#
# Now:
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Complications:
#
# Discard two simplifying assumptions of the older model:
# 1. Logits are independent
# 2. $p(\eta \mid x)$ is a delta function
# + [markdown] slideshow={"slide_type": "fragment"}
# Both can be tackled by letting $p(\eta \mid x) = \mathcal{N}\left(\eta \mid \mu(x), \Sigma(x)\right)$
# + [markdown] slideshow={"slide_type": "fragment"}
# However, $\Sigma(x)$ will become intractable if one has to model all $\frac{1}{2}(H\times W\times C)^2$ correlations!
# + [markdown] slideshow={"slide_type": "fragment"}
# Introduce low-rank parameterisation for $\Sigma = PP^T + D$ where $P\in \mathbb{R}^{(H\times W\times C)\times R}$ and $R \ll H\times W\times C$.
#
# $R$ is a hyperparameter controlling the rank of the covariance factor matrix $P$ and $D$ is a diagonal matrix.
# + [markdown] slideshow={"slide_type": "subslide"}
# This modifies the objective slightly:
#
# * the model becomes $p_\phi(y \mid x) = \int p(y \mid \eta) \mathcal{N}\left(\eta \mid \mu(x),\Sigma(x)\right)d\eta \approx \frac{1}{M}\sum_i p(y \mid \eta_i),\quad \eta_i \sim \mathcal{N}\left(\mu(x),\Sigma(x)\right)$
# $$
# \begin{align}
# &\text{argmin}_\phi -\frac{1}{N}\sum_{n,i,c}y^n_{i,c}\log{\text{softmax}(\underset{\eta^n}{\underbrace{f_\phi(x^n)}})_{i,c}},\quad (x^n, y^n) \sim p(x, y)\\
# &\rightarrow \text{argmin}_\phi -\frac{1}{N}\sum_n \log{\left(\frac{1}{M}\sum_j\exp{\left(\sum_{i,c}y^n_{i,c}\log{\text{softmax}\left(\eta^n_j\right)_{i,c}}\right)}\right)},\quad \eta^n\sim \mathcal{N}\left(\mu(x^n), \Sigma(x^n)\right);\quad (x^n, y^n) \sim p(x, y)
# \end{align}
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# Lastly, we need to learn $\mu(x)$ and $\Sigma(x)$. Apply reparametrisation trick:
#
# $$
# \eta \sim \mathcal{N}\left(\mu(x), \Sigma(x)\right) \iff \mu(x) + \sqrt{\Sigma(x)}\epsilon,\quad \epsilon \sim \mathcal{N}\left(0, I\right)
# $$
# + slideshow={"slide_type": "subslide"}
class StochasticMLP(torch.nn.Module):
def __init__(self, inp_size, n_hidden, cov_factor=1):
super(StochasticMLP, self).__init__()
self.cov_factor = cov_factor
self.layers = [torch.nn.Linear(inp_size, n_hidden[0])]
for cur, nxt in zip(n_hidden, n_hidden[1:]):
self.layers.append(torch.nn.Linear(cur, nxt))
self.mu = torch.nn.Linear(n_hidden[-1], inp_size)
self.P = torch.nn.Linear(n_hidden[-1], inp_size*cov_factor)
self.D = torch.nn.Linear(n_hidden[-1], inp_size)
self.layers.extend([self.mu, self.P, self.D])
self.model = torch.nn.Sequential(*self.layers)
def forward(self, xs, as_probs=False, n_samples=1, use_mode=False):
out = xs.view(xs.shape[0], -1)
for layer in self.layers[:-3]:
out = torch.nn.functional.relu(layer(out))
if not use_mode:
cov_factor = self.P(out).view(xs.shape[0], -1, self.cov_factor)
cov_diag = torch.exp(self.D(out)) + 1e-3
out = torch.distributions.LowRankMultivariateNormal(
self.mu(out), cov_factor, cov_diag
).rsample((n_samples,)).permute(1, 0, 2)
else:
out = self.mu(out)
if as_probs:
out = torch.sigmoid(out)
return out.view(xs.shape[0], n_samples, *xs.shape[1:])
def log_likelihood(self, xs, ys, n_samples=8):
params = self.forward(xs, n_samples=n_samples).view(xs.shape[0], n_samples, -1)
logprobs = torch.distributions.Bernoulli(logits=params).log_prob(ys.view(xs.shape[0], 1, -1))
logprobs = torch.logsumexp(logprobs.sum(-1), dim=1) - np.log(n_samples)
return logprobs
# + slideshow={"slide_type": "skip"}
def compute_cov(self, xs):
out = xs.view(xs.shape[0], -1)
for layer in self.layers[:-3]:
out = torch.nn.functional.relu(layer(out))
cov_factor = self.P(out).view(xs.shape[0], -1, self.cov_factor)
cov_diag = torch.exp(self.D(out)) + 1e-3
return cov_factor, cov_diag
StochasticMLP.compute_cov = compute_cov
# + slideshow={"slide_type": "subslide"}
net = StochasticMLP(inp_size=DIM*DIM, n_hidden=[64, 64, 64], cov_factor=2)
opt = torch.optim.Adam(net.parameters(), lr=5e-4)
N_ITERS = 20000
pbar = tqdm.tqdm(range(N_ITERS))
for i in pbar:
xs, ys = next(producer)
xs_t = torch.as_tensor(xs)
ys_t = torch.as_tensor(ys)
loss = run_step(xs_t, ys_t, opt, net)
pbar.set_description_str(f"loss: {loss:4.2f} ")
# + [markdown] slideshow={"slide_type": "subslide"}
# Sampling from $\mathcal{N}\left(\eta \mid \mu(x),\Sigma(x)\right)$ results different probabilities for the output. Applying argmax on each gives samples $y_{1,2,\dots}$:
# + slideshow={"slide_type": "fragment"}
ys_pred = net(torch.as_tensor(inp[:, None, ...]), as_probs=True, n_samples=200).detach().numpy()[0]
plot_im(ys_pred[0] > 0.5, cmap="gray", title=r"$$y_1 = \mathrm{argmax}(p(y \mid \eta_1))$$")
plot_im(ys_pred[1] > 0.5, cmap="gray", title=r"$$y_2 = \mathrm{argmax}(p(y \mid \eta_2))$$")
# -
# Verfiy that the average of all samples resembles the average of all ground truth labels.
# + slideshow={"slide_type": "subslide"}
plot_im(ys_pred.mean(0), cmap="gray", title="$$\mathbb{E}[\mathrm{softmax}(\eta)\mid x]$$")
# + slideshow={"slide_type": "subslide"}
mode_pred = net(torch.as_tensor(inp[:, None, ...]), as_probs=True, use_mode=True).detach().numpy()
plot_im(mode_pred > 0.5, cmap="gray", title=r"$$y = \mathrm{argmax}(p(y \mid \mu(x)))$$")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Extra: explore the covariance
# + slideshow={"slide_type": "fragment"}
cf, cd = net.compute_cov(torch.as_tensor(inp[:, None, ...]))
# + slideshow={"slide_type": "fragment"}
plot_im(cf.view(1, 32, 32, 2).detach().numpy().sum(-1), cmap="gray")
# + slideshow={"slide_type": "subslide"}
cf = cf.detach().numpy()[0]
cd = cd.detach().numpy()[0]
cov_mat = cf @ cf.T + np.diag(cd)
# + slideshow={"slide_type": "fragment"}
plt.imshow(cov_mat, cmap="gray")
plt.gca().set_xticks([])
plt.gca().set_yticks([])
| stochastic_segmentation_networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
import os
import time
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, CuDNNLSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D
from keras.layers import Bidirectional, GlobalMaxPool1D, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import Input, Embedding, Dense, Conv2D, MaxPool2D, concatenate, AvgPool2D
from keras.layers import Reshape, Flatten, Concatenate, Dropout, SpatialDropout1D
from keras.optimizers import Adam
from keras.models import Model
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints, optimizers, layers
# + _uuid="af438c8f72480c46532509408d084bd1abb7db54"
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
# + _uuid="aacea99c59a3ffc259fc1aa216566a4529242cf2"
# train_df, val_df = train_test_split(train_df, test_size=0.1, random_state=2018)
# + _uuid="569e2ae8aefab137c4010af679b1a3d49e17b3e6"
train_df = train_df.sample(frac=1)
# val_df = val_df.sample(frac=1)
# + _uuid="af8a666fdf9ed74809816278f5657f25d862c502"
train_df['l'] = train_df['ciphertext'].apply(lambda t: len(str(t)))
# + _uuid="203c0a05531454dbb221ea7152dbc358a08081d7"
train_df.l.describe()
# + _uuid="2c8b670ce344ecbc57c28dc04940eb5c5d877a08"
chars = set([])
for a in train_df.ciphertext.values:
for c in str(a):
chars.add(c)
# + _uuid="c3538923051fafde208d05b837abca192143984b"
len(chars)
# + _uuid="fedbd6af680f1dd5252ba549edd22f308352c8d5"
tokenizer = Tokenizer(num_words=110)
tokenizer.fit_on_texts(list(map(lambda w: list(str(w)), train_df.ciphertext.values)))
train_X = tokenizer.texts_to_sequences(list(map(lambda w: list(str(w)), train_df.ciphertext.values)))
test_X = tokenizer.texts_to_sequences(list(map(lambda w: list(str(w)), test_df.ciphertext.values)))
# + _uuid="8cbd897da5f6c14e663b8e262425554eeac09ca1"
train_X = pad_sequences(train_X, maxlen=300)
test_X = pad_sequences(test_X, maxlen=300)
# + _uuid="2a9a12ffc73a183fe36e3a3b7dec2725297ac14b"
from keras.utils import to_categorical
train_y = to_categorical(train_df['target'].values)
# + _uuid="e15ed313aa3f2246aecbcee862fea0c582c9983a"
# https://www.kaggle.com/hireme/fun-api-keras-f1-metric-cyclical-learning-rate/code
from keras.callbacks import Callback
class CyclicLR(Callback):
"""This callback implements a cyclical learning rate policy (CLR).
The method cycles the learning rate between two boundaries with
some constant frequency, as detailed in this paper (https://arxiv.org/abs/1506.01186).
The amplitude of the cycle can be scaled on a per-iteration or
per-cycle basis.
This class has three built-in policies, as put forth in the paper.
"triangular":
A basic triangular cycle w/ no amplitude scaling.
"triangular2":
A basic triangular cycle that scales initial amplitude by half each cycle.
"exp_range":
A cycle that scales initial amplitude by gamma**(cycle iterations) at each
cycle iteration.
For more detail, please see paper.
# Example
```python
clr = CyclicLR(base_lr=0.001, max_lr=0.006,
step_size=2000., mode='triangular')
model.fit(X_train, Y_train, callbacks=[clr])
```
Class also supports custom scaling functions:
```python
clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.))
clr = CyclicLR(base_lr=0.001, max_lr=0.006,
step_size=2000., scale_fn=clr_fn,
scale_mode='cycle')
model.fit(X_train, Y_train, callbacks=[clr])
```
# Arguments
base_lr: initial learning rate which is the
lower boundary in the cycle.
max_lr: upper boundary in the cycle. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size: number of training iterations per
half cycle. Authors suggest setting step_size
2-8 x training iterations in epoch.
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
gamma: constant in 'exp_range' scaling function:
gamma**(cycle iterations)
scale_fn: Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
mode paramater is ignored
scale_mode: {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle). Default is 'cycle'.
"""
def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular',
gamma=1., scale_fn=None, scale_mode='cycle'):
super(CyclicLR, self).__init__()
self.base_lr = base_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
self.gamma = gamma
if scale_fn == None:
if self.mode == 'triangular':
self.scale_fn = lambda x: 1.
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self.scale_fn = lambda x: 1/(2.**(x-1))
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self.scale_fn = lambda x: gamma**(x)
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.clr_iterations = 0.
self.trn_iterations = 0.
self.history = {}
self._reset()
def _reset(self, new_base_lr=None, new_max_lr=None,
new_step_size=None):
"""Resets cycle iterations.
Optional boundary/step size adjustment.
"""
if new_base_lr != None:
self.base_lr = new_base_lr
if new_max_lr != None:
self.max_lr = new_max_lr
if new_step_size != None:
self.step_size = new_step_size
self.clr_iterations = 0.
def clr(self):
cycle = np.floor(1+self.clr_iterations/(2*self.step_size))
x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1)
if self.scale_mode == 'cycle':
return self.base_lr + (self.max_lr-self.base_lr)*np.maximum(0, (1-x))*self.scale_fn(cycle)
else:
return self.base_lr + (self.max_lr-self.base_lr)*np.maximum(0, (1-x))*self.scale_fn(self.clr_iterations)
def on_train_begin(self, logs={}):
logs = logs or {}
if self.clr_iterations == 0:
K.set_value(self.model.optimizer.lr, self.base_lr)
else:
K.set_value(self.model.optimizer.lr, self.clr())
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.trn_iterations += 1
self.clr_iterations += 1
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.trn_iterations)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
K.set_value(self.model.optimizer.lr, self.clr())
def f1(y_true, y_pred):
'''
metric from here
https://stackoverflow.com/questions/43547402/how-to-calculate-f1-macro-in-keras
'''
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# + _uuid="250e647d3bcdfcb1665925148a0dc3e26630b624"
maxlen = 300
max_features = 110
embed_dim = 20
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_dim, input_length=train_X.shape[1])(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
conc = concatenate([avg_pool, max_pool])
conc = Dense(64, activation="relu")(conc)
# conc = Dropout(0.4)(conc)
outp = Dense(20, activation="softmax")(conc)
model = Model(inputs=inp, outputs=outp)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[f1])
# + _uuid="2f020443d1a462f9516809c34dc3a6853103d8a3"
model.summary()
# + _uuid="773323820981848aefa7ff40dea495970c00a7e7"
history = model.fit(train_X, train_y, batch_size=32, epochs=20, validation_split=0.1)
# + _uuid="624b884a26da076d0c5e1a2e6ac93f9eea36ec14"
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history.history['f1'])
plt.plot(history.history['val_f1'])
plt.title('model f1')
plt.ylabel('f1')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# + _uuid="7e5f87b2a9a52ff082feb2d07273bbd7e2216b80"
y_hat_pred = model.predict(test_X)
def to_v(v):
return list(map(lambda x: np.argmax(x), v))
# + _uuid="dc6f6d25805cb2df5761781f88729df966ff45ac"
sub_df = pd.DataFrame(list(zip(test_df.Id.values, to_v(y_hat_pred))), columns=['Id', 'Predicted'])
# + _uuid="771ec5123fed2a7a2a4aa60bca55080ef8cba013"
sub_df
# + _uuid="348621e9a41f15ca64ff3954edb20bb1e9a5c553"
sub_df.to_csv('submission.csv', index=False)
| extension/examples/8570777.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
from datetime import timedelta
subject = 'EGD-0125'
csv_file = 'clinical_data.csv'
xnat_path = 'https://bigr-rad-xnat.erasmusmc.nl'
user = ''
project = 'EGD'
## Difference in dates between sources, due to anonimization
num_days_anon = -1
res = pd.read_csv(csv_file, sep=';')
res = res.set_index('Anon_ID')
dtime = timedelta(days=num_days_anon)
with xnat.connect(xnat_path, user=user) as connection:
xnat_project = connection.projects[project]
for subject in res.index.values:
dates = get_scan_dates_project(subject, xnat_project)
dates = [d - dtime for d in dates]
clin_dates, clin_labels = get_clinical_dates(subject)
print(subject)
filename = 'timelines_egd/{}.png'.format(subject)
plot_dates(clin_dates, clin_labels, dates, filename)
# +
import xnat
def get_scan_dates(subject):
dates = []
with xnat.connect('https://bigr-rad-xnat.erasmusmc.nl', user='KvanGarderen') as connection:
xnat_project = connection.projects['EGD']
xnat_subject = xnat_project.subjects[subject]
for exp in xnat_subject.experiments:
date = xnat_subject.experiments[exp].date
dates.append(date)
return dates
def get_scan_dates_project(subject, project):
xnat_subject = xnat_project.subjects[subject]
dates = []
for exp in xnat_subject.experiments:
date = xnat_subject.experiments[exp].date
dates.append(date)
return dates
# +
import matplotlib.dates as mdates
def plot_dates(dates, labels, scan_dates, filename):
fig, ax = plt.subplots(figsize=(10, 8), constrained_layout=True)
ax.set(title=subject)
levels = np.tile([-5, 5, -3, 3, -1, 1, -7, 7, -9, 9],
int(np.ceil(len(dates)/10)))[:len(dates)]
markerline, stemline, baseline = ax.stem(dates, levels,
linefmt="C3-", basefmt="k-",
use_line_collection=True)
ax.stem(scan_dates, [0]*len(scan_dates),
linefmt="C3-", basefmt="k-",
use_line_collection=True)
plt.setp(markerline, mec="k", mfc="w", zorder=3)
# annotate lines
vert = np.array(['top', 'bottom'])[(levels > 0).astype(int)]
for d, la, l, va in zip(dates, labels, levels, vert):
ax.annotate(la, xy=(d, l), xytext=(-3, np.sign(l)*3),
textcoords="offset points", va=va, ha="right")
ax.annotate(d.strftime('%d-%m-%Y'), xy=(d, l), xytext=(-3, np.sign(l)*15),
textcoords="offset points", va=va, ha="right")
# remove y axis and spines
ax.get_yaxis().set_visible(False)
for spine in ["left", "top", "right"]:
ax.spines[spine].set_visible(False)
# format xaxis with 4 month intervals
ax.get_xaxis().set_major_locator(mdates.YearLocator())
ax.get_xaxis().set_major_formatter(mdates.DateFormatter("%Y"))
plt.setp(ax.get_xticklabels(), rotation=30, ha="right")
ax.margins(y=0.5, x=0.2)
plt.savefig(filename)
plt.show()
# +
import pandas as pd
from datetime import datetime as dt
import matplotlib.dates as mdates
def get_clinical_dates(subject):
res = pd.read_csv(csv_file, sep=';')
res = res.set_index('Anon_ID')
labels = ['Date_first_symptom',
'Date_diagn_MRI',
'Death_Date',
'Date_progression',
'Date_pre-ok_scan',
'Date_post-ok_scan',
'Date OK1', 'Date OK2', 'Date OK3', 'Date Biopsy',
'RT1_start', 'RT1_end', 'RT2_start', 'RT2_end',
'Chemotherapy_start', 'Chemotherapy_end', 'Chemotherapy2_start', 'Chemotherapy2_end',
'Chemotherapy3_start', 'Chemotherapy3_end']
dates = []
used_labels = []
for l in labels:
date = res.loc[subject, l]
try:
parsed_date = dt.strptime(date, '%m/%d/%Y')
dates.append(parsed_date.date())
used_labels.append(l)
except:
pass
try:
parsed_date = dt.strptime(date, '%d-%m-%Y')
dates.append(parsed_date.date())
used_labels.append(l)
except:
pass
return dates, used_labels
# -
| clinical/timelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # Project 1
#
# ## Step 1: Open the `sat_scores.csv` file. Investigate the data, and answer the questions below.
#
# ##### 1. What does the data describe?
# The sat-score data describes SAT, standardized college entrance exam, scores from the 50 states and the District of Columbia for test taking year 2001, as provided by the College Board. This data also contains participation in the SAT exam, presumably in percentages and across the U.S. Finally, the last row of data is an aggregator of all 50 states, plus DC, for participation rates and SAT score where verbal and math are separately stated.
# ##### 2. Does the data look complete? Are there any obvious issues with the observations?
# The data does contain a complete listing of SAT scores for all states plus the District of Columbia. The last row contains the nationwide SAT scores and participation, which is not to be included in the 50 state, plus DC view of the data. Additionally, another issue with the data is the unpacked version of the SAT scores given. Therefore, both verbal and math scores are summed in order to get total SAT score.
# ##### 3. Create a data dictionary for the dataset.
# SAT Scores in 2001
# Description
# The sat-score data describes SAT, standardized college entrance exam, scores from the 50 states and the District of Columbia for test taking year 2001, as provided by the College Board. This data also contains participation in the SAT exam, presumably in percentages. Finally, the last row of data is an aggregator of all 50 states, plus DC, for participation rates and SAT score where verbal and math are separately stated.
#
# Methodology
# Format a panda dataframe from a comma delimited file containing 51 observations on the following 4 variables.
#
# State
# 50 states of the U.S, plus the District of Columbia
#
# Rate
# Test participation rate; denoted in percentage by State
#
# Verbal
# Result of Verbal component of the SAT exam; section graded on a scale of 200–800
#
# Math
# Result of Math component of the SAT exam; section graded on a scale of 200–800
#
# Total SAT
# Calculated from source data. Combines the Math and Verbal components of the exam issued in 2001.
# ## Step 2: Load the data.
# ##### 4. Load the data into a list of lists
import numpy as np
import scipy.stats as stats
import csv
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
satscores = '/Users/DES/DSI-NYC-5/Projects/project-1-sat-scores/assets/sat_scores.csv'
rows = []
with open(satscores, 'r') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
# ##### 5. Print the data
print rows
# ##### 6. Extract a list of the labels from the data, and remove them from the data.
#Header is list of labels from data
header = rows[0]
header
#Data minus Header list
data = rows[1:]
data[0:10]
# ##### 7. Create a list of State names extracted from the data. (Hint: use the list of labels to index on the State column)
# +
#Exclusive List of States
list_states =[]
for t in data:
list_states.append(t[0])
# -
list_states[0:10]
# +
#List of Lists of Rate, SAT Scores
scores_rate = []
for t in data:
scores_rate.append(t[1:])
# -
scores_rate[0:10]
# ##### 8. Print the types of each column
type(scores_rate)
# ##### 9. Do any types need to be reassigned? If so, go ahead and do it.
scores_rate[0:10]
# +
numerical_list = []
index = []
for x in scores_rate:
index = list(map(int, x))
numerical_list.append(index)
# -
print numerical_list[0:10]
type(numerical_list)
# ##### 10. Create a dictionary for each column mapping the State to its respective value for that column.
header
header_m_s = header[1:]
header_m_s
numerical_list[0:10]
sat_data = {}
for name in header_m_s:
sat_data[name] = [x[header_m_s.index(name)] for x in numerical_list]
sat_data.values()
type(sat_data)
sat_data.keys()
type(list_states)
# ##### 11. Create a dictionary with the values for each of the numeric columns
sat_data['Math'][0:10]
for i, j in sat_data.items():
j = [float(x) for x in j]
sat_data[i] = j
sat_data['Math'][0:10]
sat_data.keys()
temp = []
dictlist = []
#convert dictionary to list
for key, value in sat_data.iteritems():
temp = [key,value]
dictlist.append(temp)
dictlist
# ## Step 3: Describe the data
# ##### 12. Print the min and max of each column
import pandas as pd
satscores = pd.read_csv('/Users/DES/DSI-NYC-5/Projects/project-1-sat-scores/assets/sat_scores.csv')
satscores.head()
sat = pd.DataFrame(sat, columns=['State','Rate','Verbal','Math','Total_SAT'])
#Exclude the 'ALL' category from data
sats = sat.iloc[:51]
sat['Total_SAT'] = sat['Verbal'] + sat['Math'] #Included an aggregate version of SAT
sat[0:10]
print "Participation Rate Min:",sats["Rate"].min()
print "Participation Rate Max:",sats["Rate"].max()
print "SAT Math Min:",sats["Math"].min()
print "SAT Math Max:",sats["Math"].max()
print "SAT Verbal Min:",sat["Verbal"].min()
print "SAT Verbal Max:",sats["Verbal"].max()
print "Total SAT Min:",sat["Total_SAT"].min()
print "Total SAT Max:",sats["Total_SAT"].max()
def summary_stats(col, data):
print 'COLUMN: ' + col
print 'mean: ' + str(np.mean(data))
print 'median: ' + str(np.median(data))
print 'mode: ' + str(stats.mode([round(d) for d in data]))
print 'variance: ' + str(np.var(data))
print 'standard deviation: ' + str(np.std(data))
summary_stats('Rate', sats['Rate'])
summary_stats('Math', sats['Math'])
summary_stats('Verbal', sats['Verbal'])
summary_stats('Total_SAT', sats['Total_SAT'])
# ##### 13. Write a function using only list comprehensions, no loops, to compute Standard Deviation. Print the Standard Deviation of each numeric column.
def stddev(data):
"""returns the standard deviation of lst"""
m = np.mean(data)
variance = sum([(i - m)**2 for i in data]) / len(data)
return np.sqrt(variance)
stddev(sats['Rate'])
stddev(sats['Math'])
stddev(sats['Verbal'])
stddev(sats['Total_SAT'])
# +
#Hypothesis testing where
# H0 (null hypothesis): There is no difference between Math and Verbal SAT Scores
# HA (alternative hypothesis): There is a difference between Math and Verbal SAT Scores
a_mean = sats['Math'].mean()
b_mean = sats['Verbal'].mean()
a_var = sats['Math'].var()
b_var = sats['Verbal'].var()
a_n = len(sats['Math'])
b_n = len(sats['Verbal'])
numerator = a_mean - b_mean
denominator = np.sqrt((a_var / a_n) + (b_var / b_n))
z = numerator / denominator
z
# -
p_val = 1 - stats.norm.cdf(z)
p_val
alpha = .01
print p_val, alpha, p_val > alpha
# ## Step 4: Visualize the data
# ##### 14. Using MatPlotLib and PyPlot, plot the distribution of the Rate using histograms.
# +
ax = sns.distplot(sats['Rate'], bins=10)
sns.distplot(sats['Rate'], color='darkred', bins=10, ax=ax)
ax = plt.axes()
ax.set_title('Distribution SAT Participation Rate')
plt.show()
# -
# ##### 15. Plot the Math distribution
# +
ax = sns.distplot(sats['Math'], bins=10)
sns.distplot(sats['Math'], color='yellow', bins=10, ax=ax)
ax = plt.axes()
ax.set_title('Distribution of Math SAT Scores')
plt.show()
# -
# ##### 16. Plot the Verbal distribution
# +
ax = sns.distplot(sats['Verbal'], bins=10)
sns.distplot(sats['Verbal'], color='darkblue', bins=10, ax=ax)
ax = plt.axes()
ax.set_title('Distribution of Verbal SAT Scores')
plt.show()
# +
ax = sns.distplot(sats['Total_SAT'], bins=10)
sns.distplot(sats['Total_SAT'], color='darkblue', bins=10, ax=ax)
ax = plt.axes()
ax.set_title('Distribution of Total SAT Scores')
plt.show()
# -
# ##### 17. What is the typical assumption for data distribution?
# A typical assumption of a data distribution is that the distribution is normal or the data is bell-curve shaped.
# ##### 18. Does that distribution hold true for our data?
# No, these numeric fields do not have a normal distribution. The SAT Verbal component is negatively skewed, whereas both Participation Rate and SAT Math are right skewed or positively skewed distribution.
#
# ##### 19. Plot some scatterplots. **BONUS**: Use a PyPlot `figure` to present multiple plots at once.
import seaborn as sns
sns.pairplot(sats)
plt.show()
# ##### 20. Are there any interesting relationships to note?
# There seems to be a suggestive proportional relationship between SAT Math, SAT Verbal and Total scores, overall. That is, for example, as verbal scores increase, the math scores proportionally and positively increase. Other variable relationships, however, seem to have a rather inconclusive linear relationship. When considering building a linear regression model to describe Math, Verbal or Total Score one would need to address the outliers the scatter plots above display for each resective scores.
# ##### 21. Create box plots for each variable.
# +
data = [sats['Math'], sats['Verbal']]
fig, ax1 = plt.subplots(figsize=(12, 8))
plt.boxplot(data)
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Box Plot of SAT Math / Verbal Scores', y =1.03, fontsize = 24)
ax1.set_xlabel('Features', fontsize = 18)
ax1.set_ylabel('SAT Scores', fontsize = 18)
# Set the axes ranges and axes labels
numBoxes = 2
ax1.set_xlim(0.5, numBoxes + 0.5)
ax1.set_ylim(400, 625)
xtickNames = plt.setp(ax1, xticklabels=['SAT Math Score', 'SAT Verbal Score'])
plt.setp(xtickNames, fontsize=14)
plt.axhline(625, color = 'darkgreen')
plt.axvline(1, color = 'darkgreen', linewidth = 1, alpha = 0.4)
plt.show()
# +
data = [sats['Total_SAT']]
fig, ax1 = plt.subplots(figsize=(12, 8))
plt.boxplot(data)
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Box Plot of Total SAT Scores', y =1.03, fontsize = 24)
ax1.set_xlabel('Feature', fontsize = 18)
ax1.set_ylabel('Combined SAT Scores', fontsize = 18)
# Set the axes ranges and axes labels
numBoxes = 1
ax1.set_xlim(0.5, numBoxes + 0.5)
ax1.set_ylim(900, 1300)
xtickNames = plt.setp(ax1, xticklabels=['Total SAT Scores'])
plt.setp(xtickNames, fontsize=14)
plt.axhline(1300, color = 'darkgreen')
plt.axvline(1, color = 'darkgreen', linewidth = 1, alpha = 0.4)
plt.show()
# +
data = [sats['Rate']]
fig, ax1 = plt.subplots(figsize=(12, 8))
plt.boxplot(data)
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Box Plot of Participation Rate in SAT Examination', y =1.03, fontsize = 24)
ax1.set_xlabel('Feature', fontsize = 18)
ax1.set_ylabel('Participation Rate', fontsize = 18)
# Set the axes ranges and axes labels
numBoxes = 1
ax1.set_xlim(0.5, numBoxes + 0.5)
ax1.set_ylim(0, 100)
xtickNames = plt.setp(ax1, xticklabels=['Participation Rate'])
plt.setp(xtickNames, fontsize=14)
plt.axhline(100, color = 'darkgreen')
plt.axvline(1, color = 'darkgreen', linewidth = 1, alpha = 0.4)
plt.show()
# -
# ##### BONUS: Using Tableau, create a heat map for each variable using a map of the US.
sat.to_csv("/Users/DES/DSI-NYC-5/Projects/project-1-sat-scores/assets/SAT_Scores_DC.csv", sep='\t')
| _posts/project-1-sat-scores/project_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 1
import pandas as pd
households = pd.read_csv("lcfs_2019_dvhh_ukanon.tab", delimiter="\t")
persons = pd.read_csv("lcfs_2019_dvper_ukanon201920.tab", delimiter="\t")
spending = pd.read_csv("lcfs_2019_dv_set89_ukanon.tab", delimiter="\t")
#spending is weekly
#Codes for larger categories
categories = {"Food and non-alcoholic beverages": "1.",
"Alcoholic beverages, tobacco and narcotics": "2.",
"Clothing and footwear": "3.",
"Housing, water, electricity, gas and other fuels": "4.",
"Furnishings, household equipment and routine maintenance of house": "5.",
"Health": "6.",
"Transport": "7.",
"Communication": "8.",
"Recreation and culture": "9.",
"Education": "10",
"Restaurants and hotels": "11",
"Miscellaneous goods and services": "12",
#"Non-consumption expenditure": "20"
}
# +
spending["COI_PLUS"] = spending["COI_PLUS"].transform(lambda x: x[:2])
#if code is 20 change to 12 to include non-consumption expenditure in misc goods & services
spending["COI_PLUS"].replace({"20": "12"}, inplace=True)
# -
#Total expenditure for each category
y = spending.groupby(["COI_PLUS"]).pdamount.sum()
total_expenditure = {}
for n in categories.keys():
value = y.loc[categories[n]] * 52 #because weekly
total_expenditure[n] = value
#Total emissions grouped by NCFS categories in Ktonnes
emissions = pd.read_csv("emissions.csv")
#NCFS categories included in each LCFS larger category, excluding non-consumption expenditure
categories_emissions = {'Food and non-alcoholic beverages': ['Food', 'Non-alcoholic beverages'],
'Alcoholic beverages, tobacco and narcotics': ['Alcoholic beverages', 'Tobacco'],
'Clothing and footwear': ['Clothing', 'Footwear'],
'Housing, water, electricity, gas and other fuels': [
#'Actual rentals for households',
'Imputed rentals for households',
'Electricity, gas and other fuels'],
'Furnishings, household equipment and routine maintenance of house': ['Furniture, furnishings, carpets etc', 'Household textiles',
'Household appliances', 'Glassware, tableware and household utensils',
'Tools and equipment for house and garden',
'Goods and services for household maintenance'],
'Health': ['Medical products, appliances and equipment', 'Hospital services'],
'Transport': ['Purchase of vehicles', 'Operation of personal transport equipment',
'Transport services'],
'Communication': ['Postal services',
'Telephone and telefax equipment', 'Telephone and telefax services'],
'Recreation and culture': ['Audio-visual, photo and info processing equipment',
'Other major durables for recreation and culture',
'Other recreational equipment etc',
'Recreational and cultural services','Newspapers, books and stationery'],
'Education': ['Education'],
'Restaurants and hotels': ['Restaurants and hotels'],
'Miscellaneous goods and services': ['Miscellaneous goods and services']}
# +
#Total emissions for each LCFS larger category
emissions_per_cat = {}
for n in categories_emissions.keys():
items = categories_emissions[n]
total = 0
for m in items:
total = total + float(emissions[m][0])
#since units is in ktonnes
emissions_per_cat[n] = total * 10000
#Emissions per pound spent (in tonnes)
emissions_per_pound = {}
for category in categories_emissions.keys():
emissions_per_pound[category] = total_expenditure[category]/emissions_per_cat[category]
emissions_per_pound
# +
#Emissions for a household
def get_category_total(household, category):
cat_code = categories.get(category)
household_rows = spending.loc[spending["case"] == household]
household_rows["COI_PLUS"] = household_rows["COI_PLUS"].transform(lambda x: x[:2])
#if code is 20 change to 12; include non-consumption in misc goods & services
household_rows["COI_PLUS"].replace({"20": "12"}, inplace=True)
relevant_rows = household_rows.loc[household_rows["COI_PLUS"] == cat_code]
#multiply by 52 to get annual
expenditure = relevant_rows["pdamount"].sum() * 52
return expenditure * emissions_per_pound[category]
def get_household_total(household):
emissions = 0
for n in categories:
category_emissions = (get_category_total(household, n))
emissions = emissions + category_emissions
return emissions
# -
#This is a simpler version of the above, but gets an error with 4th category
x = spending.groupby(["case", "COI_PLUS"]).pdamount.sum()
def get_h_emissions(household):
emits = 0
for n in categories:
category_expenditure = x[household].loc[categories[n]] * 52 #because weekly
print(category_expenditure)
emit = category_expenditure * emissions_per_pound[n]
emits = emits + emit
return emits
#Example
get_household_total(780)
#Add emissions column to households dataframe
households["Emissions"] = pd.Series(get_household_total(n) for n in households["case"])
# # Part 2
#LCFS variables using field names
def get_LCFS_variables():
from openfisca_uk.entities import Person, BenUnit, Household
#Household size
class A049(Variable):
value_type = float
entity = Household
definition_period = ETERNITY
#Gross household income (weekly)
class P352p(Variable):
value_type = float
entity = Household
definition_period = YEAR
#Equivalized income (McClements Scale)
class EqIncDMp(Variable):
value_type = float
entity = Household
definition_period = YEAR
#Equivalised income (OECD Scale)
class EqIncDOp(Variable):
value_type = float
entity = Household
definition_period = YEAR
#Location (gov office region)
class Gorx(Variable):
value_type = float
entity = Household
definition_period = ETERNITY
#Rent
class B010(Variable):
value_type = float
entity = Household
definition_period = ETERNITY
def get_input_variables():
#Household size
class household_size(Variable):
value_type = float
entity = Household
label = "Number of people in household"
definition_period = ETERNITY
def formula(household, period, parameters):
return household("A049", period) * WEEKS_IN_YEAR
#Gross household income
class gross_income(Variable):
value_type = float
entity = Household
label = "Gross household income"
definition_period = YEAR
def formula(household, period, parameters):
return household("P352p", period) * WEEKS_IN_YEAR
#Equivalized income (McClements Scale)
class equivalized_income(Variable):
value_type = float
entity = Household
definition_period = YEAR
def formula(household, period, parameters):
return household("EqIncDMp", period) * WEEKS_IN_YEAR
#if OECD scale, replace with "EqIncDOp"
class region(Variable):
value_type = Enum
possible_values = Region
default_value = Region.UNKNOWN
entity = Household
label = "Region of the UK"
definition_period = ETERNITY
def formula(household, period, parameters):
region = household("Gorx", period)
reg = select(
[
region == 1,
region == 2,
region == 4,
region == 5,
region == 6,
region == 7,
region == 8,
region == 9,
region == 10,
region == 11,
region == 12,
region == 13,
],
[
Region.NORTH_EAST,
Region.NORTH_WEST,
Region.YORKSHIRE,
Region.EAST_MIDLANDS,
Region.WEST_MIDLANDS,
Region.EAST_OF_ENGLAND,
Region.LONDON,
Region.SOUTH_EAST,
Region.SOUTH_WEST,
Region.WALES,
Region.SCOTLAND,
Region.NORTHERN_IRELAND,
],
)
return reg
class rent(Variable):
value_type = float
entity = Household
label = "Gross rent for the household"
definition_period = YEAR
def formula(household, period, parameters):
return household("B010", period) * WEEKS_IN_YEAR
# +
from openfisca_uk_data.datasets.frs.raw_frs import RawFRS
from pathlib import Path
from typing import List
from openfisca_core.model_api import *
from openfisca_uk_data.utils import dataset
import pandas as pd
import shutil
from openfisca_uk_data.utils import (
CAPITAL_INCOME_VARIABLES,
LABOUR_INCOME_VARIABLES,
uprated,
)
import h5py
from openfisca_uk_data.datasets.frs.base_frs.dataset import BaseFRS
from openfisca_uk_data.datasets.frs.base_frs.model_input_variables import (
get_input_variables,
)
def from_FRS(year: int = 2018):
from openfisca_uk import CountryTaxBenefitSystem
system = CountryTaxBenefitSystem()
variables = []
for variable in get_input_variables():
try:
variables += [type(system.variables[variable.__name__])]
except:
variables += [variable]
for i in range(len(variables)):
variable = variables[i]
if variable.__name__ in LABOUR_INCOME_VARIABLES:
variables[i] = uprated(
"uprating.labour_income", from_year=year + 1
)(variable)
elif variable.__name__ in CAPITAL_INCOME_VARIABLES:
variables[i] = uprated(
"uprating.labour_income", from_year=year + 1
)(variable)
else:
variables[i] = uprated(from_year=year + 1)(variable)
class reform(Reform):
def apply(self):
for var in variables:
self.update_variable(var)
return reform
@dataset
class FRS:
name = "frs"
openfisca_uk_compatible = True
input_reform_from_year = from_FRS
def generate(year) -> None:
base_frs_years = BaseFRS().years
if len(base_frs_years) == 0:
raw_frs_years = RawFRS().years
if len(raw_frs_years) == 0:
raise Exception("No FRS microdata to generate from")
else:
base_frs_year = max(raw_frs_years)
else:
base_frs_year = max(base_frs_years)
from openfisca_uk import Microsimulation
base_frs_sim = Microsimulation(dataset=BaseFRS, year=base_frs_year)
person_vars, benunit_vars, household_vars = [
[
var.__name__
for var in get_input_variables()
if var.entity.key == entity
]
for entity in ("person", "benunit", "household")
]
with h5py.File(FRS.file(year), mode="w") as f:
for variable in person_vars + benunit_vars + household_vars:
f[f"{variable}/{year}"] = base_frs_sim.calc(
variable, year
).values
# +
from openfisca_uk_data.utils import (
MAIN_INPUT_VARIABLES,
dataset,
uprate_variables,
)
import synthimpute as si
import numpy as np
import h5py
class FRS_LCFS_Adjusted:
name = "frs_lcfs_adj"
openfisca_uk_compatible = True
input_reform_from_year = uprate_variables(MAIN_INPUT_VARIABLES)
def generate(year):
from openfisca_uk import Microsimulation
from openfisca_uk_data.datasets.frs.frs import FRS
LCFS = households
frs_sim = Microsimulation(dataset=FRS)
lcfs_sim = Microsimulation(dataset=LCFS)
#common variables
lcfs_common_variables = np.array(
[
lcfs_sim.calc("household_size", year).values,
lcfs_sim.calc("gross_household_income", year).values,
lcfs_sim.calc("equivalized_income", year).values,
lcfs_sim.calc("region", year).values,
lcfs_sim.calc("rent", year).values,
]
).T
frs_common_variables = np.array(
[
frs_sim.calc("household_size", year).values,
frs_sim.calc("gross_household_income", year).values,
frs_sim.calc("equivalized_income", year).values,
frs_sim.calc("region", year).values,
frs_sim.calc("rent", year).values,
]
).T
#imputed variables
lcfs_emissions = lcfs_sim.calc("dividend_income", year).values
frs_weight = frs_sim.calc("household_weight", year).values
print(
"Imputing emissions for FRS respondents from LCFS values...",
end="",
)
imputed_emissions = si.rf_impute(
x_train=lcfs_common_variables,
y_train=lcfs_emissions,
x_new=frs_common_variables,
sample_weight_train=frs_weight,
mean_quantile=0.18,
)
print(" completed.")
imputed_emissions *= (
frs_sim.calc("emissions", year).values > 0
)
frs_sim.simulation.set_input(
"emissions", year, imputed_emissions
)
with h5py.File(
FRS_SPI_Adjusted.data_dir / FRS_SPI_Adjusted.filename(year), "w"
) as f:
for variable in MAIN_INPUT_VARIABLES:
f[f"{variable}/{year}"] = frs_sim.calc(variable, year).values
# -
| carbon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] colab_type="text" id="kR-4eNdK6lYS"
# Deep Learning
# =============
#
# Assignment 2
# ------------
#
# Previously in `1_notmnist.ipynb`, we created a pickle with formatted datasets for training, development and testing on the [notMNIST dataset](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html).
#
# The goal of this assignment is to progressively train deeper and more accurate models using TensorFlow.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="JLpLa8Jt7Vu4"
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
# + [markdown] colab_type="text" id="1HrCK6e17WzV"
# First reload the data we generated in `1_notmnist.ipynb`.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 19456, "status": "ok", "timestamp": 1449847956073, "user": {"color": "", "displayName": "", "isAnonymous": false, "isMe": true, "permissionId": "", "photoUrl": "", "sessionId": "0", "userId": ""}, "user_tz": 480} id="y3-cj1bpmuxc" outputId="0ddb1607-1fc4-4ddb-de28-6c7ab7fb0c33"
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# + [markdown] colab_type="text" id="L7aHrm6nGDMB"
# Reformat into a shape that's more adapted to the models we're going to train:
# - data as a flat matrix,
# - labels as float 1-hot encodings.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 19723, "status": "ok", "timestamp": 1449847956364, "user": {"color": "", "displayName": "", "isAnonymous": false, "isMe": true, "permissionId": "", "photoUrl": "", "sessionId": "0", "userId": ""}, "user_tz": 480} id="IRSyYiIIGIzS" outputId="2ba0fc75-1487-4ace-a562-cf81cae82793"
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# + [markdown] colab_type="text" id="nCLVqyQ5vPPH"
# We're first going to train a multinomial logistic regression using simple gradient descent.
#
# TensorFlow works like this:
# * First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below:
#
# with graph.as_default():
# ...
#
# * Then you can run the operations on this graph as many times as you want by calling `session.run()`, providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below:
#
# with tf.Session(graph=graph) as session:
# ...
#
# Let's load all the data into TensorFlow and build the computation graph corresponding to our training:
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Nfv39qvtvOl_"
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random valued following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
# + [markdown] colab_type="text" id="KQcL4uqISHjP"
# Let's run this computation and iterate:
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 9}]} colab_type="code" executionInfo={"elapsed": 57454, "status": "ok", "timestamp": 1449847994134, "user": {"color": "", "displayName": "", "isAnonymous": false, "isMe": true, "permissionId": "", "photoUrl": "", "sessionId": "0", "userId": ""}, "user_tz": 480} id="z2cjdenH869W" outputId="4c037ba1-b526-4d8e-e632-91e2a0333267"
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
# + [markdown] colab_type="text" id="x68f-hxRGm3H"
# Let's now switch to stochastic gradient descent training instead, which is much faster.
#
# The graph will be similar, except that instead of holding all the training data into a constant node, we create a `Placeholder` node which will be fed actual data at every call of `sesion.run()`.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="qhPMzWYRGrzM"
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
# + [markdown] colab_type="text" id="XmVZESmtG4JH"
# Let's run it:
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 6}]} colab_type="code" executionInfo={"elapsed": 66292, "status": "ok", "timestamp": 1449848003013, "user": {"color": "", "displayName": "", "isAnonymous": false, "isMe": true, "permissionId": "", "photoUrl": "", "sessionId": "0", "userId": ""}, "user_tz": 480} id="FoF91pknG_YW" outputId="d255c80e-954d-4183-ca1c-c7333ce91d0a"
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
# + [markdown] colab_type="text" id="7omWxtvLLxik"
# ---
# Problem
# -------
#
# Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units (nn.relu()) and 1024 hidden nodes. This model should improve your validation / test accuracy.
#
# ---
# +
batch_size = 128
image_size_flat = image_size * image_size
num_hidden_nodes = 1024
graph = tf.Graph()
with graph.as_default():
weights_hidden = tf.Variable(tf.truncated_normal([image_size_flat, num_hidden_nodes]))
biases_hidden = tf.Variable(tf.zeros([num_hidden_nodes]))
weights_logits = tf.Variable(tf.truncated_normal([num_hidden_nodes, num_labels]))
biases_logits = tf.Variable(tf.zeros([num_labels]))
tf_train_dataset = tf.placeholder(tf.float32,shape=(batch_size, image_size_flat))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
train_hidden = tf.nn.relu(tf.matmul(tf_train_dataset, weights_hidden) + biases_hidden)
train_logits = tf.matmul(train_hidden, weights_logits) + biases_logits
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(train_logits, tf_train_labels))
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
train_prediction = tf.nn.softmax(train_logits)
valid_hidden = tf.nn.relu(tf.matmul(tf_valid_dataset, weights_hidden) + biases_hidden)
valid_logits = tf.matmul(valid_hidden, weights_logits) + biases_logits
valid_prediction = tf.nn.softmax(valid_logits)
test_hidden = tf.nn.relu(tf.matmul(tf_test_dataset, weights_hidden) + biases_hidden)
test_logits = tf.matmul(test_hidden, weights_logits) + biases_logits
test_prediction = tf.nn.softmax(test_logits)
# +
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
print('')
| 2_fullyconnected.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # T81-558: Applications of Deep Neural Networks
# **Class 3: Training a Neural Network**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# # Building the Feature Vector
#
# Neural networks require their input to be a fixed number of columns. This is very similar to spreadsheet data. This input must be completely numeric.
#
# It is important to represent the data in a way that the neural network can train from it. In class 6, we will see even more ways to preprocess data. For now, we will look at several of the most basic ways to transform data for a neural network.
#
# Before we look at specific ways to preprocess data, it is important to consider four basic types of data, as defined by [Stanley Smith Stevens](https://en.wikipedia.org/wiki/Stanley_Smith_Stevens). These are commonly referred to as the [levels of measure](https://en.wikipedia.org/wiki/Level_of_measurement):
#
# * Character Data (strings)
# * **Nominal** - Individual discrete items, no order. For example: color, zip code, shape.
# * **Ordinal** - Individual discrete items that can be ordered. For example: grade level, job title, Starbucks(tm) coffee size (tall, vente, grande)
# * Numeric Data
# * **Interval** - Numeric values, no defined start. For example, temperature. You would never say "yesterday was twice as hot as today".
# * **Ratio** - Numeric values, clearly defined start. For example, speed. You would say that "The first car is going twice as fast as the second."
#
# The following code contains several useful functions to encode the feature vector for various types of data. Encoding data:
#
# * **encode_text_dummy** - Encode text fields, such as the iris species as a single field for each class. Three classes would become "0,0,1" "0,1,0" and "1,0,0". Encode non-target predictors this way. Good for nominal.
# * **encode_text_index** - Encode text fields, such as the iris species as a single numeric field as "0" "1" and "2". Encode the target field for a classification this way. Good for nominal.
# * **encode_numeric_zscore** - Encode numeric values as a z-score. Neural networks deal well with "centered" fields, zscore is usually a good starting point for interval/ratio.
#
# *Ordinal values can be encoded as dummy or index. Later we will see a more advanced means of encoding*
#
# Dealing with missing data:
#
# * **missing_median** - Fill all missing values with the median value.
#
# Creating the final feature vector:
#
# * **to_xy** - Once all fields are numeric, this function can provide the x and y matrixes that are used to fit the neural network.
#
# Other utility functions:
#
# * **hms_string** - Print out an elapsed time string.
# * **chart_regression** - Display a chart to show how well a regression performs.
#
# +
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import shutil
import os
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = "{}-{}".format(name, x)
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1
# at every location where the original column (name) matches each of the target_values. One column is added for
# each target value.
def encode_text_single_dummy(df, name, target_values):
for tv in target_values:
l = list(df[name].astype(str))
l = [1 if str(x) == str(tv) else 0 for x in l]
name2 = "{}-{}".format(name, tv)
df[name2] = l
# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).
def encode_text_index(df, name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Encode a numeric column as zscores
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
# Convert all missing values in the specified column to the median
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
# Convert all missing values in the specified column to the default
def missing_default(df, name, default_value):
df[name] = df[name].fillna(default_value)
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)
else:
# Regression
return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
# Regression chart.
def chart_regression(pred,y,sort=True):
t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})
if sort:
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
# Remove all rows where the specified column is +/- sd standard deviations
def remove_outliers(df, name, sd):
drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))]
df.drop(drop_rows, axis=0, inplace=True)
# Encode a column to a range between normalized_low and normalized_high.
def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1,
data_low=None, data_high=None):
if data_low is None:
data_low = min(df[name])
data_high = max(df[name])
df[name] = ((df[name] - data_low) / (data_high - data_low)) \
* (normalized_high - normalized_low) + normalized_low
# -
# # Training with a Validation Set and Early Stopping
#
# **Overfitting** occurs when a neural network is trained to the point that it begins to memorize rather than generalize.
#
# 
#
# It is important to segment the original dataset into several datasets:
#
# * **Training Set**
# * **Validation Set**
# * **Holdout Set**
#
# There are several different ways that these sets can be constructed. The following programs demonstrate some of these.
#
# The first method is a training and validation set. The training data are used to train the neural network until the validation set no longer improves. This attempts to stop at a near optimal training point. This method will only give accurate "out of sample" predictions for the validation set, this is usually 20% or so of the data. The predictions for the training data will be overly optimistic, as these were the data that the neural network was trained on.
#
# 
#
# +
import pandas as pd
import io
import requests
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import EarlyStopping
path = "./data/"
filename = os.path.join(path,"iris.csv")
df = pd.read_csv(filename,na_values=['NA','?'])
species = encode_text_index(df,"species")
x,y = to_xy(df,"species")
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
model = Sequential()
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(1,activation='relu'))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x,y,validation_data=(x_test,y_test),callbacks=[monitor],verbose=2,epochs=1000)
# -
# Now that the neural network is trained, we can make predictions about the test set. The following code predicts the type of iris for test set and displays the first five irises.
# +
from sklearn import metrics
import tensorflow as tf
pred = model.predict(x_test)
print(pred[0:5]) # print first five predictions
# -
# These numbers are in scientific notation. Each line provides the probability that the iris is one of the 3 types of iris in the data set. For the first line, the second type of iris has a 91% probability of being the species of iris.
# # Calculate Classification Accuracy
#
# Accuracy is the number of rows where the neural network correctly predicted the target class. Accuracy is only used for classification, not regression.
#
# $ accuracy = \frac{\textit{#} \ correct}{N} $
#
# Where $N$ is the size of the evaluted set (training or validation). Higher accuracy numbers are desired.
#
# As we just saw, by default, Keras will return the percent probability for each class. We can change these prediction probabilities into the actual iris predicted with **argmax**.
pred = np.argmax(pred,axis=1) # raw probabilities to chosen class (highest probability)
print(pred)
# Now that we have the actual iris flower predicted, we can calculate the percent accuracy (how many were correctly classified).
y_compare = np.argmax(y_test,axis=1)
score = metrics.accuracy_score(y_compare, pred)
print("Accuracy score: {}".format(score))
# # Calculate Classification Log Loss
#
# Accuracy is like a final exam with no partial credit. However, neural networks can predict a probability of each of the target classes. Neural networks will give high probabilities to predictions that are more likely. Log loss is an error metric that penalizes confidence in wrong answers. Lower log loss values are desired.
#
# The following code shows the output of predict_proba:
#
# +
from IPython.display import display
# Don't display numpy in scientific notation
np.set_printoptions(precision=4)
np.set_printoptions(suppress=True)
# Generate predictions
pred = model.predict(x_test)
print("Numpy array of predictions")
print(pred[0]*100)
print("As percent probability")
display(pred[0:5])
score = metrics.log_loss(y_test, pred)
print("Log loss score: {}".format(score))
# -
# [Log loss](https://www.kaggle.com/wiki/LogarithmicLoss) is calculated as follows:
#
# $ \text{log loss} = -\frac{1}{N}\sum_{i=1}^N {( {y}_i\log(\hat{y}_i) + (1 - {y}_i)\log(1 - \hat{y}_i))} $
#
# The log function is useful to penalizing wrong answers. The following code demonstrates the utility of the log function:
# +
# %matplotlib inline
from matplotlib.pyplot import figure, show
from numpy import arange, sin, pi
t = arange(1e-5, 5.0, 0.00001)
#t = arange(1.0, 5.0, 0.00001) # computer scientists
#t = arange(0.0, 1.0, 0.00001) # data scientists
fig = figure(1,figsize=(12, 10))
ax1 = fig.add_subplot(211)
ax1.plot(t, np.log(t))
ax1.grid(True)
ax1.set_ylim((-8, 1.5))
ax1.set_xlim((-0.1, 2))
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('log(x)')
show()
# -
# # Evaluating Regression Results
#
# Regression results are evaluated differently than classification. Consider the following code that trains a neural network for the [MPG dataset](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/datasets_mpg.ipynb).
# +
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
cars = df['name']
df.drop('name',1,inplace=True)
missing_median(df, 'horsepower')
x,y = to_xy(df,"mpg")
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=45)
model = Sequential()
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x,y,validation_data=(x_test,y_test),callbacks=[monitor],verbose=2,epochs=1000)
# -
# ### Mean Square Error
#
# The mean square error is the sum of the squared differences between the prediction ($\hat{y}$) and the expected ($y$). MSE values are not of a particular unit. If an MSE value has decreased for a model, that is good. However, beyond this, there is not much more you can determine. Low MSE values are desired.
#
# $ \text{MSE} = \frac{1}{n} \sum_{i=1}^n \left(\hat{y}_i - y_i\right)^2 $
#
# +
# Predict
pred = model.predict(x_test)
# Measure MSE error.
score = metrics.mean_squared_error(pred,y_test)
print("Final score (MSE): {}".format(score))
# -
# ### Root Mean Square Error
#
# The root mean square (RMSE) is essentially the square root of the MSE. Because of this, the RMSE error is in the same units as the training data outcome. Low RMSE values are desired.
#
# $ \text{MSE} = \sqrt{\frac{1}{n} \sum_{i=1}^n \left(\hat{y}_i - y_i\right)^2} $
# Measure RMSE error. RMSE is common for regression.
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Final score (RMSE): {}".format(score))
# # Training with Cross Validation
#
# Cross validation uses a number of folds, and multiple models, to generate out of sample predictions on the entire dataset. It is important to note that there will be one model (neural network) for each fold. Each model contributes part of the final out-of-sample prediction.
#
# 
#
# For new data, which is data not present in the training set, predictions from the fold models can be handled in several ways.
#
# * Choose the model that had the highest validation score as the final model.
# * Preset new data to the 5 models and average the result (this is an [enesmble](https://en.wikipedia.org/wiki/Ensemble_learning)).
# * Retrain a new model (using the same settings as the crossvalidation) on the entire dataset. Train for as many steps, and with the same hidden layer structure.
#
# ## Regression with Cross Validation
#
# The following code trains the MPG dataset using a 5-fold cross validation. The expected performance of a neural network, of the type trained here, would be the score for the generated out-of-sample predictions.
# +
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Activation
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
filename_write = os.path.join(path,"auto-mpg-out-of-sample.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# Shuffle
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
# Preprocess
cars = df['name']
df.drop('name',1,inplace=True)
missing_median(df, 'horsepower')
# Encode to a 2D matrix for training
x,y = to_xy(df,'mpg')
# Cross validate
kf = KFold(5)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x):
fold+=1
print("Fold #{}".format(fold))
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
model = Sequential()
model.add(Dense(20, input_dim=x.shape[1], activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
pred = model.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
# Measure this fold's RMSE
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Fold score (RMSE): {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print("Final, out of sample score (RMSE): {}".format(score))
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 )
oosDF.to_csv(filename_write,index=False)
# -
# ## Classification with Crossvalidation
#
# The following code trains and fits the iris dataset with cross validation. It also writes out the out of sample (predictions on the test set) results.
# +
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Activation
path = "./data/"
filename_read = os.path.join(path,"iris.csv")
filename_write = os.path.join(path,"iris-out-of-sample.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# Shuffle
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
# Encode to a 2D matrix for training
species = encode_text_index(df,"species")
x,y = to_xy(df,"species")
# Cross validate
kf = KFold(5)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x):
fold+=1
print("Fold #{}".format(fold))
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dense(25, activation='relu')) # Hidden 2
model.add(Dense(y.shape[1],activation='softmax')) # Output
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=25, verbose=1, mode='auto')
model.fit(x,y,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
pred = model.predict(x_test)
oos_y.append(y_test)
pred = np.argmax(pred,axis=1) # raw probabilities to chosen class (highest probability)
oos_pred.append(pred)
# Measure this fold's accuracy
y_compare = np.argmax(y_test,axis=1) # For accuracy calculation
score = metrics.accuracy_score(y_compare, pred)
print("Fold score (accuracy): {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
oos_y_compare = np.argmax(oos_y,axis=1) # For accuracy calculation
score = metrics.accuracy_score(oos_y_compare, oos_pred)
print("Final score (accuracy): {}".format(score))
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 )
oosDF.to_csv(filename_write,index=False)
# -
# # Training with Cross Validation and a Holdout Set
#
# If you have a considerable amount of data, it is always valuable to set aside a holdout set before you crossvalidate. This hold out set will be the final evaluation before you make use of your model for its real-world use.
#
# 
#
# The following program makes use of a holdout set, and then still cross validates.
# +
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
from keras.callbacks import EarlyStopping
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
filename_write = os.path.join(path,"auto-mpg-holdout.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# create feature vector
missing_median(df, 'horsepower')
df.drop('name',1,inplace=True)
encode_text_dummy(df, 'origin')
# Shuffle
np.random.seed(42)
df = df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
# Encode to a 2D matrix for training
x,y = to_xy(df,'mpg')
# Keep a 10% holdout
x_main, x_holdout, y_main, y_holdout = train_test_split(
x, y, test_size=0.10)
# Cross validate
kf = KFold(5)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x_main):
fold+=1
print("Fold #{}".format(fold))
x_train = x_main[train]
y_train = y_main[train]
x_test = x_main[test]
y_test = y_main[test]
model = Sequential()
model.add(Dense(20, input_dim=x.shape[1], activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
pred = model.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
# Measure accuracy
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Fold score (RMSE): {}".format(score))
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print()
print("Cross-validated score (RMSE): {}".format(score))
# Write the cross-validated prediction (from the last neural network)
holdout_pred = model.predict(x_holdout)
score = np.sqrt(metrics.mean_squared_error(holdout_pred,y_holdout))
print("Holdout score (RMSE): {}".format(score))
# -
# # Scikit-Learn Versions: model_selection vs cross_validation
#
# Scikit-Learn changed a bit in how crossvalidation is handled. Both versions still work, but you should use the **sklearn.model_selection** import, rather than **sklearn.cross_validation**. The following shows both the new and old forms of cross validation. All examples from this class will use the newer form.
#
# The following two sections show both forms:
# +
# Older scikit-learn syntax for splits/cross validation
# Still valid, but going away. Do not use.
# (Note the red box warning below)
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
kf = KFold(len(df), n_folds=5)
fold = 0
for train, test in kf:
fold+=1
print("Fold #{}: train={}, test={}".format(fold,len(train),len(test)))
# +
# Newer scikit-learn syntax for splits/cross validation
# Use this method (as shown above)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
kf = KFold(5)
fold = 0
for train, test in kf.split(df):
fold+=1
print("Fold #{}: train={}, test={}".format(fold,len(train),len(test)))
# -
# # How Kaggle Competitions are Scored
#
# [Kaggle](https://www.kaggle.com/) is a platform for competitive data science. Competitions are posted onto Kaggle by companies seeking the best model for their data. Competing in a Kaggle competition is quite a bit of work, I've [competed in one Kaggle competition](https://www.kaggle.com/jeffheaton).
#
# Kaggle awards "tiers", such as:
#
# * Kaggle Grandmaster
# * Kaggle Master
# * Kaggle Expert
#
# Your [tier](https://www.kaggle.com/progression) is based on your performance in past competitions.
#
# To compete in Kaggle you simply provide predictions for a dataset that they post. You do not need to submit any code. Your prediction output will place you onto the [leaderboard of a competition](https://www.kaggle.com/c/otto-group-product-classification-challenge/leaderboard/public).
#
# 
# An original dataset is sent to Kaggle by the company. From this dataset, Kaggle posts public data that includes "train" and "test. For the "train" data, the outcomes (y) are provided. For the test data, no outcomes are provided. Your submission file contains your predictions for the "test data". When you submit your results, Kaggle will calculate a score on part of your prediction data. They do not publish want part of the submission data are used for the public and private leaderboard scores (this is a secret to prevent overfitting). While the competition is still running, Kaggle publishes the public leaderboard ranks. Once the competition ends, the private leaderboard is revealed to designate the true winners. Due to overfitting, there is sometimes an upset in positions when the final private leaderboard is revealed.
# # Managing Hyperparameters
#
# There are many different settings that you can use for a neural network. These can affect performance. The following code changes some of these, beyond their default values:
#
# * **activation:** relu, sigmoid, tanh
# * Layers/Neuron Counts
# * **optimizer:** adam, sgd, rmsprop, and [others](https://keras.io/optimizers/)
# +
# %matplotlib inline
from matplotlib.pyplot import figure, show
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
import tensorflow as tf
path = "./data/"
preprocess = False
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# create feature vector
missing_median(df, 'horsepower')
encode_text_dummy(df, 'origin')
df.drop('name',1,inplace=True)
if preprocess:
encode_numeric_zscore(df, 'horsepower')
encode_numeric_zscore(df, 'weight')
encode_numeric_zscore(df, 'cylinders')
encode_numeric_zscore(df, 'displacement')
encode_numeric_zscore(df, 'acceleration')
# Encode to a 2D matrix for training
x,y = to_xy(df,'mpg')
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.20, random_state=42)
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu'))
model.add(Dense(25, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
model.fit(x,y,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)
# Predict and measure RMSE
pred = model.predict(x_test)
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Score (RMSE): {}".format(score))
# -
| t81_558_class3_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # P2P结构
#
# p2p(peer to peer)可以定义成终端之间通过直接交换来共享计算机资源和服务,而无需经过服务器的中转.它的好处是显而易见的,不用服务器中转,不需要受限于服务器的带宽,而且大大减轻了服务器的压力.p2p的应用包括IM(qq,MSN),bittorrent等等.
#
# p2p是一种对等的结构.和客户端服务器结构不同,并没有先后顺序,也不通过某个权威(中间人)进行中转,因此p2p无法使用tcp协议.如果你看过我写的html5攻略,其中有一节webrtc就是一个典型的p2p协议.从中我们可以看到p2p的基本形态
#
# + 每个节点都是单独的客户端
# + 客户端之间可以直连
# + 寻找客户端依靠一个叫STUN的服务器,STUN服务器只是类似信息交换墙的作用,而不负责中转信息
#
#
# 在讲实现前我们先来看几个概念:
#
# + NAT
# + 打洞
# + STUN
# ## NAT
#
# NAT(Network Address Translation)是将IP数据包头中的IP地址转换为另一个IP地址的过程,通俗来讲,就是局域网公用一个public IP.我们可以很容易的检查到自己主机的ip地址和公网ip地址是不一致的,本机的ip地址可以使用`ifconfig(Unix)或ipconfig(windows)`查看,而公网的百度搜下`ip`就出来了.
#
# 那NAT是用来解决什么问题的?
#
# 上个世纪80年代,当时的人们在设计网络地址的时候,觉得再怎么样也不会有超过32bits位长即232台终端设备联入互联网,再加上增加ip的长度(即使是从4字节增到6字节)对当时设备的计算,存储,传输成本也是相当巨大的,想象当年的千年虫问题就是因为不存储年份的前两位导致的,现在想想,不就几个byte吗?我一顿饭不吃就省了好几个G了,但在当时的确是相当稀缺的资源.
#
# 后来逐渐发现IP地址不够用了,然后就NAT就诞生了!(虽然ipv6也是解决办法,但始终普及不开来).NAT的本质就是让一群机器公用同一个IP.这样就暂时解决了IP短缺的问题.其实NAT还有一个重要的用途,就是保护NAT内的主机不受外界攻击,因为公网与内网不管怎么样都有个NAT服务器阻隔着,这样要暴露的信息或者接口就可控了.
#
# ### NAT的类型
#
# NAT的作用就是通过映射的方式为内网和公网打开通路,
# 假设路由器ip为`172.16.31.10`,公网服务器ip为`172.16.31.10`,内网机器`192.168.0.240:5060`首先发给路由器`172.16.31.10`,路由器分配一个端口,比如说`54333`,然后路由器代替内网机器发给服务器,即`172.16.31.10:54333 -> 172.16.31.10:80`,此时路由器会在映射表上留下一个"洞",来自`172.16.31.10:80`发送到`172.16.31.10:54333`的包都会转发到`192.168.0.250:5060`
#
# 但不是所有发往`172.16.31.10:54333`的包都会被转发过去,不同的NAT类型做同样的事情会有不同的方法
#
# #### Full Cone全锥形NAT
#
#
# IP,端口都不受限.只要客户端由内到外打通一个洞(`NatIP:NatPort -> A:P1`)之后,其他IP的主机(`B`)或端口(`A:P2`)都可以使用这个洞发送数据到客户端。
# 
#
#
# #### Restricted Cone受限锥形NAT
#
#
# IP受限,端口不受限.当客户端由内到外打通一个洞(`NatIP:NatPort -> A:P1`)之后,A机器可以使用他的其他端口(`P2`)主动连接客户端,但B机器则不被允许.
# 
#
#
# #### Restricted Port Cone端口受限锥形NAT
#
# IP,端口都受限.返回的数据只接受曾经打洞成功的对象(`A:P1`),由`A:P2`,`B:P1`发起的数据将不被`NatIP:NatPort`接收.
# 
#
# #### Symmetric NAT对称型NAT
#
# 对称型NAT具有端口受限锥型的受限特性.但更重要的是,他对每个外部主机或端口的会话都会映射为不同的端口(洞).只有来自相同的内部地址(`IP:PORT`)并且发送到相同外部地址(`X:x`)的请求,在NAT上才映射为相同的外网端口,即相同的映射.
#
# 举个例子:
#
# 1. client访问`A:p1`是这样的路径:`Client --> NatIP:Pa1 --> A:P1`
# 2. client访问`A:p2`是这样的路径:`Client --> NatIP:Pa2 --> A:P2`
#
# (而在前面的三种NAT中,只要client不变,那么留在路由器上的“洞”就不会变,symmetric NAT会变,端口变)
#
#
# ### 怎么确定自己的NAT类型
#
#
# 为什么要知道自己的NAT类型?这为之后的打洞做准备.RFC专门定义了一套协议来做这件事(RFC 5389),这个协议的名字叫STUN(Session Traversal Utilities for NAT),它的算法输出是:
#
# + Public ip and port
# + 防火墙是否设置
# + 是否在NAT之后以及NAT的类型
#
# 我们可以使用[pystun](https://github.com/jtriley/pystun)来查看自己的的NAT,不过由于年久失修,这个项目止步于python2.
import stun
nat_type, external_ip, external_port = stun.get_ip_info()
nat_type
external_ip
external_port
# ## "打洞"
#
#
# 既然有NAT守关,那么我们如何才能够直接和远端通信呢?这就需要所谓的`打洞`.
#
# > 问题也就归结为:有两个需要互联的`client A`和`client B`,如何让他们可以互联
#
# 方案:
#
# 1. A,B分别与stun server交互获得自己的NAT类型
#
# + A,B连接一个公网服务器(turn server,RFC5766),把自己的NAT发给turn server,此时turn server发现A和B想要互连,把对方的ip,port,NAT类型发给对方
# Client根据自身NAT类型做出相应的策略。
#
# + Client根据自身NAT类型做出相应的策略。
#
# 
# + 如果有一方为Symmetric NAT
#
# 因为每一次连接端口都不一样,所以对方无法知道在对称NAT的client下次用什么端口.无法完全实现p2p传输(预测端口除外),需要turn server做一个relay,即所有消息通过turn server转发.
#
# + 如果有一方是Full Cone
#
# 一方通过与full cone的一方的public ip和port直接与full cone通信,实现了p2p通信.
#
# + 如果有一方是受限型NAT(两种)
#
# 受限型一方向对方发送“打洞包”,比如”punching…”,对方收到后回复一个指定的命令,比如”end punching”,通信开始.这样做理由:受限型一方需要让`IPA:portA`的包进入,需要先向`IPA:portA`发包.实现了p2p通信.
#
#
#
# 我们通过上述的讨论可知,symmetric NAT好像不能实现p2p啊?其实不然,能实现,但代价太高,这个方法叫端口预测.
#
# 基本思路:
#
# A向B的所有port(0~65535)发包,让路由器知道来自B的所有端口都转发到A
# B向A的所有port(0~65535)发包,让路由器知道来自A的所有端口都转发到B
# 于是连接完成了
# ## STUN
#
# stun是一套专门用于
# 以上内容参考自<http://lifeofzjs.com/blog/2014/07/19/how-p2p-in-symmetric-nat/>感谢作者的科普.
| 异步socket编程/p2p结构.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Code Reuse
# Let’s put what we learned about code reuse all together.
# <br><br>
# First, let’s look back at **inheritance**. Run the following cell that defines a generic `Animal` class.
class Animal:
name = ""
category = ""
def __init__(self, name):
self.name = name
def set_category(self, category):
self.category = category
# What we have is not enough to do much -- yet. That’s where you come in.
# <br><br>
# In the next cell, define a `Turtle` class that inherits from the `Animal` class. Then go ahead and set its category. For instance, a turtle is generally considered a reptile. Although modern cladistics call this categorization into question, for purposes of this exercise we will say turtles are reptiles!
# Run the following cell to check whether you correctly defined your `Turtle` class and set its category to reptile.
print(Turtle.category)
# Was the output of the above cell reptile? If not, go back and edit your `Turtle` class making sure that it inherits from the `Animal` class and its category is properly set to reptile. Be sure to re-run that cell once you've finished your edits. Did you get it? If so, great!
# Next, let’s practice **composition** a little bit. This one will require a second type of `Animal` that is in the same category as the first. For example, since you already created a `Turtle` class, go ahead and create a `Snake` class. Don’t forget that it also inherits from the `Animal` class and that its category should be set to reptile.
# Now, let’s say we have a large variety of `Animal`s (such as turtles and snakes) in a Zoo. Below we have the `Zoo` class. We’re going to use it to organize our various `Animal`s. Remember, inheritance says a Turtle is an `Animal`, but a `Zoo` is not an `Animal` and an `Animal` is not a `Zoo` -- though they are related to one another.
# Fill in the blanks of the `Zoo` class below so that you can use **zoo.add_animal( )** to add instances of the `Animal` subclasses you created above. Once you’ve added them all, you should be able to use **zoo.total_of_category( )** to tell you exactly how many individual `Animal` types the `Zoo` has for each category! Be sure to run the cell once you've finished your edits.
# +
class Zoo:
def __init__(self):
self.current_animals = {}
def add_animal(self, animal):
self.current_animals[animal.name] = animal.category
def total_of_category(self, category):
result = 0
for animal in self.___.values():
if ___ == category:
result += 1
return result
zoo = Zoo()
# -
# Run the following cell to check whether you properly filled in the blanks of your `Zoo` class.
# +
turtle = Turtle("Turtle") #create an instance of the Turtle class
snake = Snake("Snake") #create an instance of the Snake class
zoo.add_animal(turtle)
zoo.add_animal(snake)
print(zoo.total_of_category("reptile")) #how many zoo animal types in the reptile category
# -
# Was the output of the above cell 2? If not, go back and edit the `Zoo` class making sure to fill in the blanks with the appropriate attributes. Be sure to re-run that cell once you've finished your edits. Did you get it? If so, perfect! You have successfully defined your `Turtle` and `Snake` subclasses as well as your `Zoo` class.
# <br><br>
# Do you see how this could be useful in a wide variety of circumstances?
# Okay, one last bit of practice before we move on. Let’s take another quick peek at **modules**. There are lots of useful modules for Python. Many of them are in the standard library. However, some modules are not loaded by default. So, before we can use any of the built-in methods of a module that is not in the standard library, the module must first be imported. Let’s review this by importing the `calendar` module.
# <br>
# Run the following cell that imports the `calendar` module.
import calendar
# One built-in method of the `calendar` module is the **weekday( )** method. The weekday( ) method returns the day of the week (0 is Monday) for a given year, month, and day passed into the function. The syntax is weekday(year, month, day) where the input parameters are all of number type. By importing the `calendar` module, the weekday( ) method becomes available for us to use.
# <br><br>
# In the following cell, fill in the blanks of the weekday( ) method with date arguments to return the day of the week for that date.
calendar.weekday(___,___,___)
# Did you get an integer representing the day of the week (0 is Monday) for the date arguments passed into the function? If not, go back and edit your input parameters. Once you output an integer representing the day of the week for the date arguments passed into weekday( ), you are all done with this notebook. Great work!
| Crash Course on Python/pygrams_notebooks/utf-8''C1M5L3_Code_Reuse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## [[Stack Overflow] Gathering a sequence of unknown length in dask #255](https://github.com/coiled/dask-community/issues/255)
from time import sleep
from distributed import Client, get_client, worker_client, Queue
def f(x):
sleep(0.5)
return (x - 1)**3
def derivative(x):
sleep(1)
return 3 * (x - 1)**2
def newton_optimization(x, fval, dfdx):
if abs(fval) < 1e-10:
return x
with worker_client() as client:
x = x - fval / dfdx
# client = get_client()
fval = client.submit(f, x)
dfdx = client.submit(derivative, x)
next_step = client.submit(newton_optimization, x, fval, dfdx)
queue.put(next_step)
return x
client = Client()
client
queue = Queue()
task = client.submit(newton_optimization, 0, 1, 3)
task.result()
# + tags=[]
while True:
future = queue.get()
print(future.result())
# -
client.shutdown()
| notebooks/255_gather_unknown_length.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.10 ('lab1')
# language: python
# name: python3
# ---
# +
from torch import Size
from rewards import SingleReward, SizeReward
import pandas as pd
import numpy as np
from scipy.stats import norm, skewnorm
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from rdkit import Chem
# -
drugs = pd.read_csv('../GraphDecomp/SmallDrug.csv',error_bad_lines=False,delimiter=';')
smiles_values = drugs['Smiles'].values
reward = SizeReward()
# +
class RewardProfiler():
def __init__(self,path):
drugs = pd.read_csv(path,error_bad_lines=False,delimiter=';')
smiles_values = drugs['Smiles']
self.mols = []
self.bad_smiles = []
for smile in smiles_values:
try:
mol = Chem.MolFromSmiles(smile)
self.mols.append(mol)
except:
self.bad_smiles.append(smile)
print(f'succesfuly read in {len(self.mols)} molecules, failed on {len(self.bad_smiles)}')
def profile(self,reward_module: SingleReward):
rewards = list(map(lambda x: reward_module.giveReward(x), self.mols))
ae, loce, scalee = skewnorm.fit(rewards)
mean = np.mean(rewards)
plt.figure()
plt.hist(rewards, bins=[i for i in range(20)], density=True, alpha=0.6, color='g')
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = skewnorm.pdf(x,ae, loce, scalee)
plt.plot(x, p, 'k', linewidth=2)
plt.xlabel('Reward Value')
plt.ylabel('Probability')
plt.title(f'Histogram of {reward_module.name()} Mean: {mean:.3f}')
rewardProfiler = RewardProfiler('../GraphDecomp/SmallDrug.csv')
# -
rewardProfiler.profile(SizeReward())
| CLEAN/Rewards/reward_profiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import pacakages
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
# ## Data
mnist_dataset, mnist_info = tfds.load(name='mnist', with_info=True, as_supervised=True)
# +
mnist_train, mnist_test = mnist_dataset['train'], mnist_dataset['test']
# either count the # of train samples or use the mnist_info
num_validation_samples = 0.1 * mnist_info.splits['train'].num_examples
num_validation_samples = tf.cast(num_validation_samples, tf.int64)
num_test_samples = mnist_info.splits['test'].num_examples
num_test_samples = tf.cast(num_test_samples, tf.int64)
# dataset.map(*function*) applies a custom transformation to a given dataset. It takes as input a function which determines the transformation
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.
return image, label
scaled_train_and_validation_data = mnist_train.map(scale)
test_data = mnist_test.map(scale)
# if BUFFER_SIZE is 1, no shuffling will happen
# if BUFFER_SIZE >= num_samples, shuffling will happen at once (uniformly)
# if 1 < BUFFER_SIZE < num_samples, we will be optimizing the computational power
BUFFER_SIZE = 10000
shuffled_train_and_validation_data = scaled_train_and_validation_data.shuffle(BUFFER_SIZE)
validation_data = shuffled_train_and_validation_data.take(num_validation_samples)
train_data = shuffled_train_and_validation_data.skip(num_validation_samples)
BATCH_SIZE = 100 # 1 #10000 # 100
train_data = train_data.batch(BATCH_SIZE)
validation_data = validation_data.batch(num_validation_samples)
test_data = test_data.batch(num_test_samples)
validation_inputs, validation_targets = next(iter(validation_data))
# -
# ## Model
#
# outline the model
# +
input_size = 784
output_size = 10
hidden_layer_size = 200 #100 #50
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28,1)),
tf.keras.layers.Dense(hidden_layer_size, activation='relu'), #'sigmoid'), #'relu'), # 1st hidden layer
tf.keras.layers.Dense(hidden_layer_size, activation='tanh'), # 'tanh'), #'sigmoid'), #'relu'), # 2nd hidden layer
tf.keras.layers.Dense(hidden_layer_size, activation='relu'),
tf.keras.layers.Dense(hidden_layer_size, activation='tanh'), # 'tanh'),
tf.keras.layers.Dense(hidden_layer_size, activation='relu'),
tf.keras.layers.Dense(output_size, activation='softmax')
])
# -
# ## Choose the optimizer and loss function
# binary_crossentropy: binary encoding
# categorical_crossentropy: expects targets have been one-hot encoded
# sparse_categorical_crossentropy: applies one-hot encoding
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# ## Training
# +
# At the beginning of each epoch, the training loss will be set to 0
# The algorithm will iterate over a present number of batches, all from train_data
# The weights and biases will be updates as many times as there are batches
# We will get avalue for the loss function, indicating how the training is going
# We will also see a training accuracy
# At the end of the epoch, the algorithm will forward propagate the whole validation set
# When we reach the maximum number of epochs the training will be over
NUM_EPOCHS = 5
STEPS = num_validation_samples / BATCH_SIZE
model.fit(train_data, epochs = NUM_EPOCHS, validation_data=(validation_inputs, validation_targets), validation_steps=STEPS, verbose=2)
# -
# ## Test the model
test_loss, test_accuracy = model.evaluate(test_data)
print('Test Loss: {0:.2f}. Test Accuracy: {1:.2f}%'.format(test_loss, test_accuracy*100))
| MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
import psycopg2
from config import db_password
import time
# -
# Add the clean movie function that takes in the argument, "movie".
def clean_movie(movie):
movie = dict(movie) #create a non-destructive copy
alt_titles = {}
# combine alternate titles into one list
for key in ['Also known as','Arabic','Cantonese','Chinese','French',
'Hangul','Hebrew','Hepburn','Japanese','Literally',
'Mandarin','McCune-Reischauer','Original title','Polish',
'Revised Romanization','Romanized','Russian',
'Simplified','Traditional','Yiddish']:
if key in movie:
alt_titles[key] = movie[key]
movie.pop(key)
if len(alt_titles) > 0:
movie['alt_titles'] = alt_titles
# merge column names
def change_column_name(old_name, new_name):
if old_name in movie:
movie[new_name] = movie.pop(old_name)
change_column_name('Adaptation by', 'Writer(s)')
change_column_name('Country of origin', 'Country')
change_column_name('Directed by', 'Director')
change_column_name('Distributed by', 'Distributor')
change_column_name('Edited by', 'Editor(s)')
change_column_name('Length', 'Running time')
change_column_name('Original release', 'Release date')
change_column_name('Music by', 'Composer(s)')
change_column_name('Produced by', 'Producer(s)')
change_column_name('Producer', 'Producer(s)')
change_column_name('Productioncompanies ', 'Production company(s)')
change_column_name('Productioncompany ', 'Production company(s)')
change_column_name('Released', 'Release Date')
change_column_name('Release Date', 'Release date')
change_column_name('Screen story by', 'Writer(s)')
change_column_name('Screenplay by', 'Writer(s)')
change_column_name('Story by', 'Writer(s)')
change_column_name('Theme music composer', 'Composer(s)')
change_column_name('Written by', 'Writer(s)')
return movie
# +
# 1 Add the function that takes in three arguments;
# Wikipedia data, Kaggle metadata, and MovieLens rating data (from Kaggle)
def extract_transform_load(wikipedia, kaggle, ratings):
# Read in the kaggle metadata and MovieLens ratings CSV files as Pandas DataFrames.
kaggle_metadata = pd.read_csv(kaggle, low_memory=False)
ratings = pd.read_csv(ratings, low_memory=False)
# Open and read the Wikipedia data JSON file.
with open(wikipedia, mode='r') as file:
wiki_movies_raw = json.load(file)
# Write a list comprehension to filter out TV shows.
wiki_movies = [movie for movie in wiki_movies_raw
if 'No. of episodes' not in movie]
# Write a list comprehension to iterate through the cleaned wiki movies list
# and call the clean_movie function on each movie.
clean_movies = [clean_movie(movie) for movie in wiki_movies]
# Read in the cleaned movies list from Step 4 as a DataFrame.
wiki_movies_df = pd.DataFrame(clean_movies)
# Write a try-except block to catch errors while extracting the IMDb ID using a regular expression string and
# dropping any imdb_id duplicates. If there is an error, capture and print the exception.
try:
wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\d{7})')
wiki_movies_df.drop_duplicates(subset="imdb_id", inplace=True)
except Exception:
print(Exception)
# Write a list comprehension to keep the columns that don't have null values from the wiki_movies_df DataFrame.
wiki_columns_to_keep = [column for column in wiki_movies_df.columns if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * 0.9]
wiki_movies_df = wiki_movies_df[wiki_columns_to_keep]
# Create a variable that will hold the non-null values from the “Box office” column.
box_office = wiki_movies_df['Box office'].dropna()
# Convert the box office data created in Step 8 to string values using the lambda and join functions.
box_office = box_office.apply(lambda x: ' '.join(x) if type(x) == list else x)
# Write a regular expression to match the six elements of "form_one" of the box office data.
form_one = r'\$\s*\d+\.?\d*\s*[mb]illi?on'
# Write a regular expression to match the three elements of "form_two" of the box office data.
form_two = r'\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illion)'
# Add the parse_dollars function.
def parse_dollars(s):
# if s is not a string, return NaN
if type(s) != str:
return np.nan
# if input is of the form $###.# million
if re.match(r'\$\s*\d+\.?\d*\s*milli?on', s, flags=re.IGNORECASE):
# remove dollar sign and " million"
s = re.sub('\$|\s|[a-zA-Z]', '', s)
# convert to float and multiply by a million
value = float(s) * 10**6
# return value
return value
# if input is of the form $###.# billion
elif re.match(r'\$\s*\d+\.?\d*\s*billi?on', s, flags=re.IGNORECASE):
# remove dollar sign and " billion"
s = re.sub('\$|\s|[a-zA-Z]', '', s)
# convert to float and multiply by a billion
value = float(s) * 10**9
# return value
return value
# if input is of the form $###,###,###
elif re.match(r'\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illion)', s, flags=re.IGNORECASE):
# remove dollar sign and commas
s = re.sub('\$|,','',s)
# convert to float
value = float(s)
# return value
return value
# otherwise, return NaN
else:
return np.nan
# Clean the box office column in the wiki_movies_df DataFrame.
wiki_movies_df['box_office'] = box_office.str.extract(f'({form_one}|{form_two})',flags=re.IGNORECASE)[0].apply(parse_dollars)
wiki_movies_df.drop('Box office', axis=1, inplace=True)
# Clean the budget column in the wiki_movies_df DataFrame.
budget = wiki_movies_df["Budget"].dropna()
budget = budget.map(lambda x: ' '.join(x) if type(x) == list else x)
budget = budget.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
budget = budget.str.replace(r'\[\d+\]\s*', '')
wiki_movies_df['budget'] = budget.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
# Clean the release date column in the wiki_movies_df DataFrame.
release_date = wiki_movies_df['Release date'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
date_form_one = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s[123]\d,\s\d{4}'
date_form_two = r'\d{4}.[01]\d.[123]\d'
date_form_three = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s\d{4}'
date_form_four = r'\d{4}'
wiki_movies_df['release_date'] = pd.to_datetime(release_date.str.extract(f'({date_form_one}|{date_form_two}|{date_form_three}|{date_form_four})')[0], infer_datetime_format=True)
# Clean the running time column in the wiki_movies_df DataFrame.
running_time = wiki_movies_df['Running time'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
running_time_extract = running_time.str.extract(r'(\d+)\s*ho?u?r?s?\s*(\d*)|(\d+)\s*m')
running_time_extract = running_time_extract.apply(lambda col: pd.to_numeric(col, errors='coerce')).fillna(0)
wiki_movies_df['running_time'] = running_time_extract.apply(lambda row: row[0]*60 + row[1] if row[2] == 0 else row[2], axis=1)
wiki_movies_df.drop('Running time', axis=1, inplace=True)
# 2. Clean the Kaggle metadata.
kaggle_metadata = kaggle_metadata[kaggle_metadata['adult'] == 'False'].drop('adult',axis='columns')
kaggle_metadata['video'] = kaggle_metadata['video'] == 'True'
kaggle_metadata['budget'] = kaggle_metadata['budget'].astype(int)
kaggle_metadata['id'] = pd.to_numeric(kaggle_metadata['id'], errors='raise')
kaggle_metadata['popularity'] = pd.to_numeric(kaggle_metadata['popularity'], errors='raise')
kaggle_metadata['release_date'] = pd.to_datetime(kaggle_metadata['release_date'])
# 3. Merged the two DataFrames into the movies DataFrame.
movies_df = pd.merge(wiki_movies_df, kaggle_metadata, on='imdb_id', suffixes=['_wiki','_kaggle'])
# 4. Drop unnecessary columns from the merged DataFrame.
movies_df.drop(columns=['title_wiki','release_date_wiki','Language','Production company(s)'], inplace=True)
# 5. Add in the function to fill in the missing Kaggle data.
def fill_missing_kaggle_data(df, kaggle_column, wiki_column):
df[kaggle_column] = df.apply(
lambda row: row[wiki_column] if row[kaggle_column] == 0 else row[kaggle_column], axis=1)
df.drop(columns=wiki_column, inplace=True)
# 6. Call the function in Step 5 with the DataFrame and columns as the arguments.
fill_missing_kaggle_data(movies_df, 'runtime', 'running_time')
fill_missing_kaggle_data(movies_df, 'budget_kaggle', 'budget_wiki')
fill_missing_kaggle_data(movies_df, 'revenue', 'box_office')
# 7. Filter the movies DataFrame for specific columns.
for col in movies_df.columns:
lists_to_tuples = lambda x: tuple(x) if type(x) == list else x
value_counts = movies_df[col].apply(lists_to_tuples).value_counts(dropna=False)
num_values = len(value_counts)
if num_values == 1:
print(col)
movies_df['video'].value_counts(dropna=False)
# 8. Rename the columns in the movies DataFrame.
movies_df = movies_df.loc[:, ['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link',
'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count',
'genres','original_language','overview','spoken_languages','Country',
'production_companies','production_countries','Distributor',
'Producer(s)','Director','Starring','Cinematography','Editor(s)','Writer(s)','Composer(s)','Based on'
]]
movies_df.rename({'id':'kaggle_id',
'title_kaggle':'title',
'url':'wikipedia_url',
'budget_kaggle':'budget',
'release_date_kaggle':'release_date',
'Country':'country',
'Distributor':'distributor',
'Producer(s)':'producers',
'Director':'director',
'Starring':'starring',
'Cinematography':'cinematography',
'Editor(s)':'editors',
'Writer(s)':'writers',
'Composer(s)':'composers',
'Based on':'based_on'
}, axis='columns', inplace=True)
# 9. Transform and merge the ratings DataFrame.
#transform
rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count() \
.rename({'userId':'count'}, axis=1) \
.pivot(index='movieId',columns='rating', values='count')
rating_counts.columns = ['rating_' + str(col) for col in rating_counts.columns]
#merge
movies_with_ratings_df = pd.merge(movies_df, rating_counts, left_on='kaggle_id', right_index=True, how='left')
movies_with_ratings_df[rating_counts.columns] = movies_with_ratings_df[rating_counts.columns].fillna(0)
print(len(movies_df))
# create database engine to communicate with SQL server
# db_string = f"postgresql://postgres:{db_password}@127.0.0.1:5432/movie_data"
# # connection string for local server
# engine = create_engine(db_string)
# # load movie data into database
# movies_df.to_sql(name='movies', con=engine, if_exists='replace')
# # load ratings data into database
# # import raw ratings data
# rows_imported = 0
# # get the start_time from time.time()
# start_time = time.time()
# for data in pd.read_csv(f'{file_dir}ratings.csv', chunksize=1000000):
# print(f'importing rows {rows_imported} to {rows_imported + len(data)}...', end='')
# data.to_sql(name='ratings', con=engine, if_exists='append')
# rows_imported += len(data)
# # add elapsed time to final print out
# print(f'Done. {time.time() - start_time} total seconds elapsed')
# -
# 10. Create the path to your file directory and variables for the three files.
file_dir = '/Users/Tanzich/Desktop/DataScienceLocal/Movies-ETL/Class/'
# The Wikipedia data
wiki_file = f'{file_dir}/wikipedia-movies.json'
# The Kaggle metadata
kaggle_file = f'{file_dir}/movies_metadata.csv'
# The MovieLens rating data.
ratings_file = f'{file_dir}/ratings.csv'
# 11. Set the three variables equal to the function created in D1.
extract_transform_load(wiki_file, kaggle_file, ratings_file)
| ETL_create_database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="UQWlGqS0o1ta"
# ## Importing Libraries and Data
# + colab={"base_uri": "https://localhost:8080/", "height": 139} colab_type="code" id="3cRyIWaC_Dhi" outputId="4e282941-fafb-47e8-e5a0-27ca0c23a46e"
# Importing the required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.filterwarnings('ignore')
# + colab={} colab_type="code" id="NcFutNt08nFV"
# Reading the dataset
churn_data = pd.read_csv("telecom_churn_data.csv")
# + colab={} colab_type="code" id="t3gdGJL5_NKe"
# Settings for viewing the entire columns
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="fE8iEgyMEGYL" outputId="bab38952-262b-4881-889a-70142b68c03a"
churn_data.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 379} colab_type="code" id="ik8ClE-u_VLL" outputId="4d45fcf5-0e00-4d7b-d301-afe7d0f85519"
# Checking the data
churn_data.head(10)
# + [markdown] colab_type="text" id="aojfNxSc_oSr"
# ## Data Cleaning
# + [markdown] colab_type="text" id="Gv7J-qtmUf8z"
# #### `Missing value percentage`
# + colab={} colab_type="code" id="NKIvxKLuD3a1"
def nullPercentage(churn_data):
return round((churn_data.isnull().sum()/len(churn_data)*100),2).sort_values(ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="EAqpriEg_d25" outputId="22c19e23-3b31-47d8-e37d-29d2c6a69edf"
percentage_ = nullPercentage(churn_data)
percentage_
# + [markdown] colab_type="text" id="PB07YA5EVK3x"
# #### `Splitting the categorical and continuous columns`
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="N05bi0NuBkr0" outputId="19dd34f3-8f62-4c17-d9b0-2495ba9d2e4d"
# Checking the null data again
print(nullPercentage(churn_data))
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="QWIIiCb6Bvv7" outputId="c9045bd1-065c-4e4b-9325-1f4d3d4d7496"
# Categorical variables
categorical_columns = churn_data.select_dtypes(exclude=['int','float']).columns
categorical_columns.values
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="f0OjGHM9B06Z" outputId="7ce02702-7e3d-40cd-ad6c-9164502ed346"
# Continuous variables
continuous_columns = churn_data.select_dtypes(include=['int','float']).columns
continuous_columns.values
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="XsJxDRbRvJw9" outputId="242f5246-cc87-4825-d028-adb7043263fc"
# Checking if there are any non_standard missing values
columns_ = churn_data.columns
for col in columns_:
print ('{} : Unique values {}\n'.format(col, churn_data[col].unique()))
# + colab={} colab_type="code" id="0Uq2Ycs6CTTO"
# Converting the categorical columns into datetime type
for col in categorical_columns.values:
churn_data[col] = pd.to_datetime(churn_data[col])
# + colab={} colab_type="code" id="lh9zI1pGPaB_"
# Converting the categorical features to object type.
churn_data[['mobile_number','circle_id']] = churn_data[['mobile_number','circle_id']].astype(object)
# + [markdown] colab_type="text" id="Qq08InrQ2drT"
# #### `Imputing the meaningfull missing data by 0`
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="tSw2g9Xm47vs" outputId="efd5cf6b-555f-40d2-d424-94e8dd2617df"
# Checking the null values in total recharge amount.
churn_data[['total_rech_amt_6','total_rech_amt_7','total_rech_amt_8','total_rech_amt_9']].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="6rA8nVrJ4-BY" outputId="4e172460-b96e-447b-a1ff-e076b6b7eca9"
# Checking the null values in average reacharge data and total recharge data.
churn_data[['av_rech_amt_data_6','av_rech_amt_data_7','av_rech_amt_data_8','av_rech_amt_data_9','total_rech_data_6','total_rech_data_7','total_rech_data_8','total_rech_data_9']].isnull().sum()
# + colab={} colab_type="code" id="XzIStsjk5cDH"
# Imputing the recharge data columns with 0.
churn_data[['av_rech_amt_data_6','av_rech_amt_data_7','av_rech_amt_data_8','av_rech_amt_data_9']] = churn_data[['av_rech_amt_data_6','av_rech_amt_data_7','av_rech_amt_data_8','av_rech_amt_data_9']].fillna(0)
churn_data[['total_rech_data_6','total_rech_data_7','total_rech_data_8','total_rech_data_9']] = churn_data[['total_rech_data_6','total_rech_data_7','total_rech_data_8','total_rech_data_9']].fillna(0)
# + [markdown] colab_type="text" id="NXintnzmUyhc"
# #### `Dropping the columns which has high percentage of missing values`
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="UUCJv7RF_wYO" outputId="72a80df9-288d-4edf-ab00-8c748f424ab8"
# Function to create null_percentage, columns dataframe
def createNullDict(data):
columns_ = data.columns
null_percentage_ = round((data.isnull().sum()/len(data)*100),2)
dataframe = pd.DataFrame({'Columns':columns_,'Percentage':null_percentage_})
dataframe.reset_index(drop=True,inplace=True)
return dataframe
# Dropping the columns which has more than 30% of missing values except 9th month data
# creating a dictionary of columns and their respective missing percentage
null_dataframe = createNullDict(churn_data)
columns_to_drop = null_dataframe.loc[(null_dataframe['Columns'].str.contains('_9')==False) & (null_dataframe['Percentage']>20)]['Columns']
# Printing the value which are dropped
print (columns_to_drop)
# Dropping the columns
churn_data.drop(columns_to_drop, axis=1, inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="f5zfEmyhBV-f" outputId="59c8538c-c033-4d38-f6a3-56e74376d78c"
# Checking the dataframe after dropping
print('Shape is',churn_data.shape)
churn_data.head(10)
# + [markdown] colab_type="text" id="rgoT9TxKVZ63"
# #### `Checking the Discrete values`
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="zr4XScM5OG_r" outputId="e1cd8d84-6efc-49f1-b95d-bfe21011b39c"
churn_data.describe(percentiles=[0.25,0.5,0.75,0.90,0.95,0.99])
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="2rJVnd2XPD7L" outputId="d4eb10f2-f292-4fa6-e1c6-428bfdd91128"
print(nullPercentage(churn_data))
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="XN1_OhyNOwPf" outputId="2eacbc52-3f84-43dc-bb93-901a0d3a190b"
# Dividing the discrete and continuous varibales
dataFrame = createNullDict(churn_data)
discrete_variables = dataFrame.loc[(dataFrame['Columns'].str.contains('sachet')) | (dataFrame['Columns'].str.contains('aon')) | (dataFrame['Columns'].str.contains('monthly')) ]['Columns']
discrete_variables.values
for feature in discrete_variables.values:
index = dataFrame[dataFrame['Columns'] == feature].index.values[0]
print ('{} Missing value {}'.format(feature, dataFrame._get_value(index,'Percentage')))
# + [markdown] colab_type="text" id="QsUd9ScSsfS9"
# `As there are no missing discrete variables we will be imputing the continuous variables using k-NN`
# + [markdown] colab_type="text" id="hZlN-S-GWZvo"
# #### `Imputing the missing values`
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="mJZi_KkWUoH1" outputId="f06bbae8-f1b7-4c3c-c5bf-7b8d54647c2e"
print(nullPercentage(churn_data))
# + colab={} colab_type="code" id="h-iO4w4oKrXs"
# Creating the list of columns which has to be imputed
columns_to_impute = []
for col in churn_data.columns:
if 'ic' in col or 'og' in col or '_mou' in col:
columns_to_impute.append(col)
# + colab={} colab_type="code" id="az3ACgiD1oRE"
# Imputing the columns with mean
for col in columns_to_impute:
churn_data[col].fillna(churn_data[col].mean(),inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="Dh4o8gg43RJS" outputId="7e474621-c3c6-400d-cb54-1feb2947fde2"
# Checking the null percentage
nullPercentage(churn_data)
# + [markdown] colab_type="text" id="vNy35RZSH4PZ"
# ## Filtering the High Value Customers
# High value customers are those who have recharged greater than or equal to 70th percentile of the total recharge done by the customer
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="Lc1HrUdKH7Dn" outputId="a25e583f-0e13-4dc3-da5e-d40370c4b710"
[ col for col in churn_data.columns if 'date' in col]
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="jjlaE0vI1_wz" outputId="073f3e79-534e-4d19-cb77-00576b40e85e"
[col for col in churn_data.columns if 'rech' in col]
# + colab={} colab_type="code" id="Ji5ycdrAIu2f"
# Deriving the total_recharge_amount column for 6th and 7th months which is known as good phase.
churn_data['total_recharge_amt_6'] = churn_data['av_rech_amt_data_6'] + churn_data['total_rech_num_6']
churn_data['total_recharge_amt_7'] = churn_data['av_rech_amt_data_7'] + churn_data['total_rech_num_7']
churn_data['total_recharge_amt_8'] = churn_data['av_rech_amt_data_8'] + churn_data['total_rech_num_8']
churn_data['total_recharge_amt_9'] = churn_data['av_rech_amt_data_9'] + churn_data['total_rech_num_9']
# + colab={} colab_type="code" id="MYxlzh3vpumN"
# Dropping the columns which are used to derive a new column
churn_data.drop(['av_rech_amt_data_6','av_rech_amt_data_7','total_rech_amt_6','total_rech_amt_7'],axis=1,inplace=True)
# + colab={} colab_type="code" id="6m5M5npTFn7Q"
# Deriving the average of 6th and 7th month recharge column
churn_data['average_amt_6n7_month'] = (churn_data['total_recharge_amt_6']+churn_data['total_recharge_amt_7'])/2
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="HVccRIsgHEaQ" outputId="85ad0755-3488-4337-bf93-5c5f159ab69e"
# Checking the derived column
churn_data['average_amt_6n7_month'].head()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="bRz6itRGHW-p" outputId="58c26669-a898-46cb-9947-6d0fe4e7253c"
# Calculating the 70th percentile
percentile = churn_data['average_amt_6n7_month'].quantile(0.7)
print(f'70th percentile is {percentile}')
# + colab={} colab_type="code" id="rhrd33wjHxtk"
# Retaining the data of high value custormers, that who are greater than 70th percentile
churn_data_new = churn_data[churn_data['average_amt_6n7_month']>=percentile]
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="aapcsjsmJKDX" outputId="0c549a11-f50e-473a-f495-3e5231fa8cd7"
churn_data_new.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} colab_type="code" id="oaK8YGnlKOCj" outputId="bf7d9e2f-773e-45b7-f44b-a841440724cb"
nullPercentage(churn_data_new)
# + [markdown] colab_type="text" id="Qow0T7zQ7tvo"
# ## Deriving the churn column
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="x9gvR38R7zMT" outputId="1e63f8ed-74a9-4c8f-bab2-69855935dad0"
# Checking whether there are any null columns while derive the churn columns.
churn_data_new[['total_ic_mou_9','total_og_mou_9','vol_2g_mb_9','vol_3g_mb_9']].isnull().sum()
# + colab={} colab_type="code" id="GQlhZR0M8IUH"
churn_data_new['churned_column'] = np.where(churn_data_new[['total_ic_mou_9','total_og_mou_9','vol_2g_mb_9','vol_3g_mb_9']].sum(axis=1) == 0,1,0)
# + colab={"base_uri": "https://localhost:8080/", "height": 359} colab_type="code" id="0Bu5Ir9k8TeV" outputId="0ce1b478-4f98-4e9e-9878-d42bfdcb7254"
churn_data_new[['total_ic_mou_9','total_og_mou_9','vol_2g_mb_9','vol_3g_mb_9','churned_column']].head(10)
# + colab={} colab_type="code" id="j6rj6FM8_fdF"
# + [markdown] colab_type="text" id="x4iNtwp8A_17"
# #### `Dropping the 9th month data`
# + colab={} colab_type="code" id="nPB09vIwBD3g"
# As it is the churn phase so we are dropping all this data
dropping_columns = [col for col in churn_data_new.columns if '_9' in col]
# + colab={} colab_type="code" id="GbjSTMz3BPKZ"
churn_data_new.drop(dropping_columns,axis=1,inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="F9FW6QQEBrOx" outputId="98d506ce-e7bc-480b-94fb-3b37b99a1e8e"
churn_data_new.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" id="oPNacUffBt35" outputId="e82efee7-53f0-4672-a546-67c79c6827ce"
churn_data_new.head(5)
# + colab={} colab_type="code" id="ae7JtTRxBxBA"
# + [markdown] colab_type="text" id="XA4ivaiM6rN5"
# ## Data PreProcessing
# + [markdown] colab_type="text" id="oQkLMuXs7lgN"
# #### `Deriving the variable`
# + colab={} colab_type="code" id="-7MzdrKy7s7l"
# Converting the AON(Age on Network) from days to months
churn_data_new['aon_month'] = churn_data_new['aon']/30
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="JAXYpYn_-YfK" outputId="07799e90-3361-4957-b3b7-32f3ce94c8a5"
churn_data_new[['aon_month','aon']].head()
# + colab={} colab_type="code" id="RRVeDjp_-y3p"
# Dropping the aon column as we have derived a column from it.
churn_data_new.drop('aon',axis=1,inplace=True)
# + colab={} colab_type="code" id="39--rVbK3Oa7"
# churn_data_new[''] = churn_data_new['total_recharge_amt_6']-churn_data_new['total_recharge_amt_8']
# + [markdown] colab_type="text" id="lcjnXJLUWNLO"
# #### `Outliers Treatment`
# + colab={"base_uri": "https://localhost:8080/", "height": 410} colab_type="code" id="VgtxQj-KOAUm" outputId="c0885d0b-5bae-4fff-c6be-3465731354e3"
churn_data_new.describe(percentiles=[0.25,0.5,0.75,0.90,0.95,0.99])
# + colab={} colab_type="code" id="qbg1tC1zHKX0"
# Excluding the column which does not have the outliers
no_outliers = ['loc_og_t2o_mou','std_og_t2o_mou','loc_ic_t2o_mou','std_og_t2c_mou_6','std_og_t2c_mou_7','std_og_t2c_mou_8','std_ic_t2o_mou_6','std_ic_t2o_mou_7','std_ic_t2o_mou_8','mobile_number','circle_id','churned_column','last_date_of_month_6','last_date_of_month_7','last_date_of_month_8']
# + colab={"base_uri": "https://localhost:8080/", "height": 317} colab_type="code" id="ETfW9wzRAqpT" outputId="efbae6ed-b1f5-47e9-8ec0-a0c02fdee13c"
churn_data_new[no_outliers].describe()
# + colab={} colab_type="code" id="OiRkSrnAuM2p"
columns_to_treat = []
for col in churn_data_new.columns:
if col not in discrete_variables.values and col not in no_outliers and 'date' not in col:
columns_to_treat.append(col)
# + colab={"base_uri": "https://localhost:8080/", "height": 317} colab_type="code" id="NlHahNIcBRx9" outputId="d0629ffd-319d-44b3-d09b-eaf75215fd41"
churn_data_new[columns_to_treat].describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="I4nbMSgu2ePB" outputId="ad1c60f2-9dfb-46cb-c76f-e521a89494f5"
# Checking the percentage of outliers
for col in columns_to_treat:
q = churn_data_new[col].quantile(0.99)
percentage = churn_data_new[ churn_data_new[col] >= q ].shape[0]/churn_data_new.shape[0]*100
print (f"Column: {col} Percentage:{percentage} ")
# + [markdown] colab_type="text" id="srsDrm7jOilt"
# `Log Transforming the outliers as we have very less percentage`
# + colab={"base_uri": "https://localhost:8080/", "height": 410} colab_type="code" id="KrZfYf-TnNy7" outputId="55a7e394-eccd-44bc-b53a-1be4b45933c5"
# Checking the data after dropping
churn_data_new.describe(percentiles=[0.25,0.5,0.75,0.90,0.95,0.99])
# + colab={} colab_type="code" id="Yp3Yt110-NgL"
# Replacing the negative arpu values with zero
cols = ['arpu_6','arpu_7','arpu_8']
for col in cols:
churn_data_new[churn_data_new[col]<0] = 0
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="rjuxKUwF4Dke" outputId="4aae672d-5345-4727-dbe8-2d264ee1d0f1"
churn_data_new[columns_to_treat].dtypes
# + colab={} colab_type="code" id="u-MMxErhMod-"
# Applying the log transformation to treat the outliers
churn_data_new[columns_to_treat] = churn_data_new[columns_to_treat].apply(lambda x:np.log(1+x))
# + colab={"base_uri": "https://localhost:8080/", "height": 410} colab_type="code" id="E90Uf31E5XA0" outputId="04887898-1a9a-4de7-8c73-27901b528072"
churn_data_new.describe(percentiles=[0.25,0.5,0.75,0.90,0.95,0.99])
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="N4Q3zqT9JIHJ" outputId="3e9acb35-4e8b-42a5-b2b5-7bd96b3badb1"
print(churn_data_new.shape)
# + [markdown] colab_type="text" id="Bg6gIXuFyKiz"
# #### `Graphs`
# + colab={"base_uri": "https://localhost:8080/", "height": 460} colab_type="code" id="PVLirhg-6pHz" outputId="9a3d6dbc-96a4-486a-b0b1-4d9ac01b2c9f"
plt.figure(figsize=(10,10))
f, axes = plt.subplots(2, 2, figsize=(7, 7), sharex=True)
sns.distplot(churn_data_new['total_recharge_amt_6'],bins=50,rug=False,kde=False,ax=axes[0, 0])
sns.distplot(churn_data_new['total_recharge_amt_7'],bins=50,rug=False,kde=False,ax=axes[0, 1])
sns.distplot(churn_data_new['total_recharge_amt_8'],bins=50,rug=False,kde=False,ax=axes[1, 0])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="JX0nTrERK-X5" outputId="c3db8565-5766-485e-add4-7338e81b3224"
# Checking the correlation between the churn V\s other columns
plt.figure(figsize=(30,30))
churn_data_new.corr()['churned_column'].sort_values(ascending = False).plot(kind='bar')
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="ZIrW9euoMHwC" outputId="b6c4dc6c-8bc9-4d18-9f51-326bdd6e3265"
sns.boxplot(churn_data_new['churned_column'],churn_data_new['aon_month'])
# + [markdown] colab_type="text" id="RgULgWmtNmwb"
# ## Model building
# + [markdown] colab_type="text" id="grUdmuDMNsE8"
# #### `Treating the imbalance dataset`
# + colab={} colab_type="code" id="RtE-jP2WNxlu"
# Removing all the object columns
categorical_columns_ = churn_data_new.select_dtypes(exclude=['int','float']).columns
# + colab={} colab_type="code" id="UrMVKigAShA9"
for col in categorical_columns_:
churn_data_new.drop(col,axis=1,inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="S5thuwP8SiW9" outputId="5287d6ce-1f8e-415c-a45f-a00eac0e7236"
churn_data_new.head(10)
print(churn_data_new.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="D0YvT7EYWRi-" outputId="4363d7e8-70b0-405b-98eb-fe3cffaf6d01"
# We can see that the dataset is imbalance since 92% of customers did't churned.
churn_data_new['churned_column'].value_counts()/churn_data_new.shape[0] * 100
# + colab={} colab_type="code" id="aYNyfLvaUIak"
main_df = churn_data_new
# + colab={} colab_type="code" id="lRNAxOG1Uc4Y"
X = main_df.drop(['churned_column'],axis=1)
y = main_df['churned_column']
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="CxIrlovWUvLN" outputId="072d6817-448a-4ec5-abcf-35830c708bb9"
# Spliting the data as train and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, train_size=0.7, random_state=1)
print("Dimension of X_train:", X_train.shape)
print("Dimension of X_test:", X_test.shape)
# + colab={} colab_type="code" id="UOpN9szyU65j"
from imblearn.over_sampling import SMOTE
sm = SMOTE(kind = "regular")
X_train_,y_train_ = sm.fit_sample(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="Nat5OdjcVV4u" outputId="699096bf-fc47-4b18-bc67-243ab6752ba1"
# Checking the shape and also the balanced data after treated using the SMOTE
print("Dimension of X_tr Shape:", X_train_.shape)
print("Dimension of y_tr Shape:", y_train_.shape)
print("Imbalance in Training dataset:",(y_train_ != 0).sum()/(y_train_ == 0).sum())
# + colab={} colab_type="code" id="KhSqkDy1v0IZ"
# Confusion metrics
def confusion_matrix_(y_test,y_preds):
cm1 = confusion_matrix(y_test, y_preds)
print('Confusion Matrix : \n', cm1)
total1=sum(sum(cm1))
#####from confusion matrix calculate accuracy
accuracy1=(cm1[0,0]+cm1[1,1])/total1
print ('Accuracy : ', accuracy1)
sensitivity1 = cm1[0,0]/(cm1[0,0]+cm1[0,1])
print('Sensitivity : ', sensitivity1 )
specificity1 = cm1[1,1]/(cm1[1,0]+cm1[1,1])
print('Specificity : ', specificity1)
# + [markdown] colab_type="text" id="RsjhdnaGnSQG"
# #### `Logistic Regression`
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="5Mvotmw7X05B" outputId="f8772bf8-936a-442e-c323-9bc99e2a788b"
lr = LogisticRegression()
lr.fit(X_train_, y_train_)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="4ipB3oikZdN7" outputId="bb460c4d-db59-4883-8770-ddf4bbafef73"
y_pred = lr.predict(X_test)
metrics.accuracy_score(y_test, y_pred)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="qhkKIjP7rTBG" outputId="92acbbfb-ea0e-4937-c67e-de37ac9b66ca"
confusion_matrix_(y_test,y_pred)
# + [markdown] colab_type="text" id="e1yDAanx4QHf"
# #### `Random Forest`
# + [markdown] colab_type="text" id="-gGxW_kG55Yi"
# Random Forest with default parameters
# + colab={} colab_type="code" id="ywueEkz86JjD"
# Splitting and applying the SMOTE to balance the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, train_size=0.7, random_state=1)
sm = SMOTE(kind = "regular")
X_train_sm, y_train_sm = sm.fit_sample(X_train,y_train)
# + colab={} colab_type="code" id="L9PJyblG59tB"
rfc = RandomForestClassifier()
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="yPvHBK7Q6DlB" outputId="e27162c4-aa4a-44d3-f9d6-987e7d4cc119"
rfc.fit(X_train_sm,y_train_sm)
# + colab={} colab_type="code" id="Ai5KURD0AVUa"
predictions = rfc.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="FUeDv-I9AdFm" outputId="1d180917-7370-4fd6-a3e9-ed10e55d5611"
confusion_matrix_(y_test, predictions)
# + colab={} colab_type="code" id="QWE8EYvV-8A8"
# Dictionary of importance of features from the model.
feature_dict={}
num = len(X_train.columns) - 1
for i in range(0,num):
feature_dict[X_train.columns[i]] = rfc.feature_importances_[i]
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="IgX9TCdVJ5Ya" outputId="d206c2ea-f50d-471e-9c1c-300e577671ae"
# Get the 10 most important features
def keyfunction(k):
return feature_dict[k]
for key in sorted(feature_dict, key=keyfunction, reverse=True)[:10]:
print (f"{key} : {feature_dict[key]}")
# + [markdown] colab_type="text" id="hiL-vrlV-3zJ"
# Tuning the hyperparameter
# + colab={} colab_type="code" id="JwAc8GWO4T8j"
# Tuning the hyper-parameter using the GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'max_depth': [10, 20, 30],
'min_samples_leaf': range(1, 5, 10),
'min_samples_split': range(2, 10, 20),
'n_estimators': [50, 75, 100],
'max_features': [10, 20, 30]
}
# Create a based model
rf = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1,verbose = 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 493} colab_type="code" id="TFeCJxhT5txJ" outputId="2b8f2d1f-249f-492c-84c0-c6fc85c829bc"
# Fitting the GridSearchCV to the training data
grid_search.fit(X_train_sm, y_train_sm)
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="zia5QuWLB3IR" outputId="b96777f3-d45f-4b07-f418-bef9c903d9a5"
# printing the optimal accuracy score and hyperparameters
print('The accuracy obtained by best parameters is',grid_search.best_score_,'using',grid_search.best_params_)
# + [markdown] colab_type="text" id="FAU438H7KvK2"
# Random Forest with optimal parameters obatained from Hyperparameter tuning
# + colab={} colab_type="code" id="fd5DkIvaK7Od"
rfc = RandomForestClassifier(bootstrap=True,
max_depth=8,
min_samples_leaf=1,
min_samples_split=2,
max_features=30,
n_estimators=75)
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="StdMCiRrXrF-" outputId="42b9539e-9962-4ad3-ad6e-3f1f07a96385"
rfc.fit(X_train_sm, y_train_sm)
# + colab={} colab_type="code" id="-vrRAEfsXvbL"
predictions_hp = rfc.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="LToryMLjX3pO" outputId="f2a6850a-6333-4e28-9165-01a159586c4b"
confusion_matrix_(y_test, predictions_hp)
# + [markdown] colab_type="text" id="ZIn1yMwPnq-d"
# #### `PCA`
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="NewMuUBpntwO" outputId="e5e9fb20-e9d3-40c3-8094-5b7a10799c73"
# Doing the train test split and then applying SMOTE to balance the dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, train_size=0.7, random_state=1)
# Applying SMOTE
sm = SMOTE(kind = "regular")
X_train_sm,y_train_sm = sm.fit_sample(X_train,y_train)
print(X_train_sm.shape)
print(y_train_sm.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="GMvHPovJtMZR" outputId="1915ef4c-45d0-4216-8a2d-5350fb902017"
# Applying PCA on train data
pca = PCA(random_state=100)
pca.fit(X_train_sm)
# + colab={} colab_type="code" id="5dk_jFiptUZj"
X_train_pca = pca.fit_transform(X_train_sm)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0oXglSmJtoh4" outputId="0e60c0c4-d518-41a9-bc64-fe584cde5986"
X_train_pca.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Tjimw01mtq-o" outputId="188cb53f-889d-46c2-e4c4-dfe4d9c53a31"
X_train_sm.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 497} colab_type="code" id="DuiyWe3nvZ_U" outputId="a61249ea-5004-40ea-b399-76aee4b602b7"
# Scree plot to check the variance explained by different PCAs
fig = plt.figure(figsize = (12,8))
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of principal components')
plt.ylabel('Explained variance - cumulative')
plt.show()
# + colab={} colab_type="code" id="LMLILjvnwelI"
# From the graph we can see that around 30 features can explain 90% of data
pca_30 = PCA(n_components=30)
X_train_pca_30 = pca_30.fit_transform(X_train_sm)
X_test_pca_30 = pca_30.transform(X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="wSPj5Rld2f4J" outputId="7c7cdc57-f3aa-491b-c759-5eb1a3e58222"
print(X_train_pca_30.shape)
print(y_train_sm.shape)
print(X_test_pca_30.shape)
print(y_test.shape)
# + [markdown] colab_type="text" id="TMYQtyC90BUf"
# #### `XGBoost`
# + colab={} colab_type="code" id="VTUo0crh1iQO"
# Applying the XGBoost on the features obtained after doing the PCA
train = xgb.DMatrix(X_train_pca_30,label=y_train_sm)
test=xgb.DMatrix(X_test_pca_30,label=y_test)
# + colab={} colab_type="code" id="YOgLLLry3E-O"
param={
'max_depth':10,
'eta':0.3,
'objective':'multi:softmax',
'num_class':2
}
epochs=10
# + colab={} colab_type="code" id="jyBIBf7u3fKZ"
model = xgb.train(param, train, epochs)
# + colab={} colab_type="code" id="YCPxgRV53zPF"
y_pred_xg = model.predict(test)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="G6fTIUnV532A" outputId="01370ee1-eab0-49f5-a506-2faa1ee1f751"
confusion_matrix_(y_test,y_pred_xg)
# + [markdown] colab_type="text" id="cwc6GqBY53zP"
# #### `Decision Tree`
# + colab={} colab_type="code" id="d4TJytRe57Ql"
from sklearn.tree import DecisionTreeClassifier
# + colab={} colab_type="code" id="yE4Jy5Uu7Amm"
clf = DecisionTreeClassifier( criterion = "entropy",class_weight= "balanced", random_state = 100, max_depth = 3, min_samples_leaf = 5)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="WUKCnU1c7aAG" outputId="5d14b71b-4908-477a-bea1-18403d06991d"
clf.fit(X_train_pca_30, y_train_sm)
# + colab={} colab_type="code" id="-cOM-VKN9Tdz"
y_pred = clf.predict(X_test_pca_30)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="cem3eDCN9bp1" outputId="eb4f50aa-128e-419c-8709-dfa6cd09c07c"
confusion_matrix_(y_test, y_pred)
# + [markdown] colab_type="text" id="I7rBR-TGXizd"
# #### `Random Forest with PCA and default parameters`
# + colab={} colab_type="code" id="xT8VyJea6ALN"
rfc = RandomForestClassifier()
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="A6KrQcLBXPWB" outputId="5c41c4d0-8cc6-4f0d-e9f0-6905a0017c47"
rfc.fit(X_train_pca_30, y_train_sm)
# + colab={} colab_type="code" id="wCbWr7snXY_g"
predictions = rfc.predict(X_test_pca_30)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="Vgb5MdX3XbZ3" outputId="c4a83b8e-b07f-4972-a609-57652cf7f32c"
confusion_matrix_(y_test, predictions)
# + [markdown] colab_type="text" id="G34TfFsdX0iq"
# #### `Random Forest with PCA and hyperparameter tuning`
# + colab={} colab_type="code" id="YCaTlPORX7LP"
param_grid = {
'max_depth': [2, 4, 8],
'min_samples_leaf': range(1, 5, 10),
'min_samples_split': range(2, 10, 20),
'n_estimators': [50, 75, 100],
'max_features': [10, 20, 30]
}
# Create a based model
rf = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1,verbose = 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 476} colab_type="code" id="pfwPTjcBYGs6" outputId="56efe1ab-3d41-43ad-c0d9-feeeaafd9de1"
grid_search.fit(X_train_pca_30, y_train_sm)
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="rhY02yQbgk8n" outputId="319c9855-653b-4af7-87fa-55960b4b3b6d"
# printing the optimal accuracy score and hyperparameters
print('The accuracy obtained by best parameters is',grid_search.best_score_,'using',grid_search.best_params_)
# + [markdown] colab_type="text" id="BEddESK1Ye4N"
# Using the optimal parameters obtained above
# + colab={} colab_type="code" id="zwbUr76BYdzO"
rfc = RandomForestClassifier(bootstrap=True,
max_depth=8,
min_samples_leaf=1,
min_samples_split=2,
max_features=30,
n_estimators=100)
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="Aeco6xYff0l1" outputId="8efb65e6-5a20-4b74-a27e-e373efb00562"
rfc.fit(X_train_pca_30, y_train_sm)
# + colab={} colab_type="code" id="vCC4dP3BYkMM"
predictions_hp = rfc.predict(X_test_pca_30)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="NOEQHPH4YsIF" outputId="9dd5dd4d-80a6-42d6-da2a-cc1f2f3514d5"
confusion_matrix_(y_test, predictions_hp)
# + [markdown] colab_type="text" id="hsXkIdgqLVfm"
# # Conclusion
# + [markdown] colab_type="text" id="Lfv9UIAPL0Vd"
# Random Forest is the best model which gives a sensitivity of 97%
# + [markdown] colab_type="text" id="U6YStPQCMDCT"
# Top 10 most important features which are needed to predict the churn were
#
#
# - roam_ic_mou_8 (Minutes of usage of Roaming Incoming calls on 8th month)
# - roam_og_mou_8 (Minutes of usage of Roaming Outgoing calls on 8th month)
# - total_ic_mou_8 (Total incoming calles on 8th month)
# - total_rech_amt_8 (Total recharge amount on 8th month)
# - last_day_rch_amt_8 (Last recharge amount on 8th month)
# - total_recharge_amt_8 (Total recharge amount)
# - av_rech_amt_data_8 ( Average recharge amount of data)
# - arpu_8 (Average revenue per user)
# - total_rech_data_8 (Total recharge of data)
# - total_og_mou_8 (Total outgoing minutes of usage)
#
#
#
| CaseStudy/TelecomChurn/CaseStudy_Telecom_Churn_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lsuhpchelp/lbrnloniworkshop2020/blob/master/day5/keras_mnist_v3_5layer_fc_dropout.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="DHMY-zMpnVHZ" colab_type="text"
# # MNIST handwritten digits classification with 5-layer dropout softmax regression model.
#
# Ref: https://github.com/CSCfi/machine-learning-scripts
#
# In the third step, we'll improve the 5 fully connected layer model to classify MNIST digits as shown in the slides. The last fully connected model has a problem of over-fitting, we will use the dropout layer to remedy this issue.
#
# Note that we want you to ignore the initialization part which could be environment/platform dependent, however, focus more on the part that is related to the structure of the neural network (NN) to understand how the full connected layers are implemented in Keras.
# + id="ZWCaCWGtnVHe" colab_type="code" colab={}
# initialization of the environment using keras
# %matplotlib inline
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras.utils import np_utils
from keras import backend as K
from distutils.version import LooseVersion as LV
from keras import __version__
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
print('Using Keras version:', __version__, 'backend:', K.backend())
assert(LV(__version__) >= LV("2.0.0"))
# + [markdown] id="1x31MwNUnVHn" colab_type="text"
# Let's load the MNIST or Fashion-MNIST dataset.
# + id="Mf6kfGotnVHp" colab_type="code" colab={}
from keras.datasets import mnist, fashion_mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# if you want to use the fashion_mnist data
#(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
nb_classes = 10
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# one-hot encoding:
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print()
print('MNIST data loaded: train:',len(X_train),'test:',len(X_test))
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('Y_train:', Y_train.shape)
# + [markdown] id="eHgnjkkinVHu" colab_type="text"
# We'll have to do a bit of tensor manipulations, depending on the used backend (Theano or Tensorflow).
# + id="mrC7wYDynVHw" colab_type="code" colab={}
# input image dimensions
img_rows, img_cols = 28, 28
if K.common.image_dim_ordering() == 'th':
X_train_disp = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test_disp = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
# Note how do we flatten the images
X_train = X_train.reshape(X_train.shape[0], img_rows*img_cols)
X_test = X_test.reshape(X_test.shape[0], img_rows*img_cols)
input_shape = (img_rows*img_cols,)
else:
X_train_disp = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test_disp = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
# Note how do we flatten the images
X_train = X_train.reshape(X_train.shape[0], img_rows*img_cols)
X_test = X_test.reshape(X_test.shape[0], img_rows*img_cols)
input_shape = (img_rows*img_cols,)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
# + [markdown] id="VMudB0jinVH1" colab_type="text"
# ## Initialization
#
# Build the 5 layer full connected softmax regression NN model, adding the dropout layers.
# + id="0BaZ2YPUnVH3" colab_type="code" colab={}
nb_classes = 10
model = Sequential()
# using the relu activation function
act_func='relu'
# dropout rate, float between 0 and 1. Fraction of the input units to drop.
pdropout=0.25
model.add(Dense(200,activation=act_func,input_shape=input_shape))
model.add(Dropout(pdropout))
model.add(Dense(100,activation=act_func))
model.add(Dropout(pdropout))
model.add(Dense( 60,activation=act_func))
model.add(Dropout(pdropout))
model.add(Dense( 30,activation=act_func))
model.add(Dropout(pdropout))
#build the softmax regression layer
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# using the cross-entropy loss function (objective)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
# + id="Qp9gIFHrnVH8" colab_type="code" colab={}
# uncomment the below line to visualize the neural network structure
#SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
# + [markdown] id="nDFCCNXcnVIB" colab_type="text"
# ## Learning
#
# Now let's train the one layer linear softmax regression model. This is a relatively simple model, training is very fast.
# + id="zLO8wM2mnVIE" colab_type="code" colab={}
# %%time
epochs = 50 # one epoch finishes in 1-2 seconds
history = model.fit(X_train,
Y_train,
epochs=epochs,
batch_size=128,
verbose=2,
# note the use of validation data
validation_data=(X_test, Y_test))
# + id="rxQn01QFnVIJ" colab_type="code" colab={}
# You should see the overfitting issue resolved based on the loss and accuracy
# curves, compare the curves with version 2.
# plot the training and validation loss
plt.figure(figsize=(5,4))
plt.plot(history.epoch,history.history['loss'],label='training loss',color='blue')
plt.plot(history.epoch,history.history['val_loss'],label='test loss',color='red')
plt.legend()
plt.title('loss')
# plot the training and validation accuracy
plt.figure(figsize=(5,4))
plt.plot(history.epoch,history.history['accuracy'],label='training acc',color='blue')
plt.plot(history.epoch,history.history['val_accuracy'],label='test acc',color='red')
plt.legend()
plt.title('accuracy');
# + [markdown] id="6ZAUW9TQnVIO" colab_type="text"
# ## Inference
#
# With enough training epochs, the test accuracy should be around 98% and can be slightly higher than the pure fully connected version.
#
# You can compare your result with the state-of-the art [here](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html). Even more results can be found [here](http://yann.lecun.com/exdb/mnist/).
# + id="QD3lFIuhnVIP" colab_type="code" colab={}
# %%time
scores = model.evaluate(X_test, Y_test, verbose=2)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# + [markdown] id="uWlwfnRunVIb" colab_type="text"
# We can again take a closer look on the results. Let's begin by defining
# a helper function to show the failure cases of our classifier.
# + id="NF_ZJ1AmnVIh" colab_type="code" colab={}
def show_failures(predictions, trueclass=None, predictedclass=None, maxtoshow=10):
rounded = np.argmax(predictions, axis=1)
errors = rounded!=y_test
print('Showing max', maxtoshow, 'first failures. '
'The predicted class is shown first and the correct class in parenthesis.')
ii = 0
plt.figure(figsize=(maxtoshow, 1))
for i in range(X_test.shape[0]):
if ii>=maxtoshow:
break
if errors[i]:
if trueclass is not None and y_test[i] != trueclass:
continue
if predictedclass is not None and predictions[i] != predictedclass:
continue
plt.subplot(1, maxtoshow, ii+1)
plt.axis('off')
if K.common.image_dim_ordering() == 'th':
plt.imshow(X_test_disp[i,0,:,:], cmap="gray")
else:
plt.imshow(X_test_disp[i,:,:,0], cmap="gray")
plt.title("%d (%d)" % (rounded[i], y_test[i]))
ii = ii + 1
# + [markdown] id="DzI18wcHnVIo" colab_type="text"
# Here are the first 10 test digits the CNN classified to a wrong class:
# + id="WtIPJjvQnVIp" colab_type="code" colab={}
predictions = model.predict(X_test)
show_failures(predictions)
# + [markdown] id="NtJrYZuhnVIt" colab_type="text"
# We can use `show_failures()` to inspect failures in more detail. For example, here are failures in which the true class was "6":
# + id="fT1XJWPqnVIv" colab_type="code" colab={}
show_failures(predictions, trueclass=6)
# + [markdown] id="vBxa7Kv3ANRA" colab_type="text"
# # Question
#
# Try to use different dropout rate in the above deep neural network and see what value will give the best accuracy.
| day5/keras_mnist_v3_5layer_fc_dropout.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Rolling Update Tests
#
# Check rolling updates function as expected.
import json
import time
# !kubectl create namespace seldon
# !kubectl config set-context $(kubectl config current-context) --namespace=seldon
# ## Change Image
# !kubectl apply -f resources/fixed_v1.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
# -o jsonpath='{.items[0].metadata.name}')
# !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
# -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
# -H "Content-Type: application/json"
# !kubectl apply -f resources/fixed_v2.yaml
time.sleep(5) # To allow operator to start the update
for i in range(60):
# responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
response = json.loads(responseRaw[0])
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
# jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 3:
break
time.sleep(1)
print("Rollout Success")
# !kubectl delete -f resources/fixed_v1.yaml
# ## Separate Service Orchestrator
# !kubectl apply -f resources/fixed_v1_sep.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
# -o jsonpath='{.items[0].metadata.name}')
# !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
# -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
# -H "Content-Type: application/json"
# !kubectl apply -f resources/fixed_v2_sep.yaml
time.sleep(5) # To allow operator to start the update
for i in range(60):
# responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
response = json.loads(responseRaw[0])
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
# jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 1:
break
time.sleep(1)
print("Rollout Success")
# !kubectl delete -f resources/fixed_v1_sep.yaml
# ## Two PodSpecs
# !kubectl apply -f resources/fixed_v1_2podspecs.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
# -o jsonpath='{.items[0].metadata.name}')
# !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
# -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
# -H "Content-Type: application/json"
# !kubectl apply -f resources/fixed_v2_2podspecs.yaml
time.sleep(5) # To allow operator to start the update
for i in range(60):
# responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
response = json.loads(responseRaw[0])
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
# jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 1:
break
time.sleep(1)
print("Rollout Success")
# !kubectl delete -f resources/fixed_v1_2podspecs.yaml
# ## Two Models
# !kubectl apply -f resources/fixed_v1_2models.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
# -o jsonpath='{.items[0].metadata.name}')
# !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
# -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
# -H "Content-Type: application/json"
# !kubectl apply -f resources/fixed_v2_2models.yaml
time.sleep(5) # To allow operator to start the update
for i in range(60):
# responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
response = json.loads(responseRaw[0])
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
# jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 3:
break
time.sleep(1)
print("Rollout Success")
# !kubectl delete -f resources/fixed_v2_2models.yaml
# ## Model name changes
#
# This will not do a rolling update but create a new deployment.
# !kubectl apply -f resources/fixed_v1.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
# -o jsonpath='{.items[0].metadata.name}')
# !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
# -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
# -H "Content-Type: application/json"
# !kubectl apply -f resources/fixed_v2_new_name.yaml
time.sleep(5) # To allow operator to start the update
for i in range(60):
# responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
response = json.loads(responseRaw[0])
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
# jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numItems = len(resources["items"])
if numItems == 1:
break
time.sleep(1)
print("Rollout Success")
# !kubectl delete -f resources/fixed_v2_new_name.yaml
| notebooks/rolling_updates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup Code
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
# %matplotlib inline
if __name__ == '__main__':
#LoadData
DataMatrix = pd.read_csv('voice.csv', sep=',',skipinitialspace=True)
DataMatrix.replace({'male': 1.0, 'female': -1.0},inplace=True)
DataLabels = DataMatrix['label']
# Transform to an NP Array
Data = DataMatrix.values[:,1]
Label = DataLabels.values
maskh = [Label == 1]
maskm = [Label == -1]
# Train Data Set
X_train = (np.concatenate((Data[:1200], Data[1585:2785]))).reshape(-1, 1)
y_train = (np.concatenate((Label[:1200], Label[1585:2785]))).reshape(-1, 1)
# Test
X_test_male = Data[1200:1585].reshape(-1, 1)
X_test_female = Data[2785:].reshape(-1, 1)
y_test_male = Label[1200:1585].reshape(-1, 1)
y_test_female = Label[2785:].reshape(-1, 1)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred_male = regr.predict(X_test_male)
y_pred_female = regr.predict(X_test_female)
# The coefficients
print('Coefficients: \n', regr.coef_)
# -
C1 = DataMatrix.values
C1>0.5
DataMatrix.values[C1>0.5].shape
X_train = (np.concatenate((Data[:1200], Data[1585:2785])))
X_train.shape
DataMatrix.values.shape
y_pred_male
| Jupyter/Class_ML_Path/03 Linear Regression/BasicRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
## This cell just imports necessary modules
# %pylab notebook
from mpl_toolkits.mplot3d import Axes3D # For 3D plotting of Cylindrical and Spherical coordinates
# # Lecture 1 (Coordinate Systems)
#
# This notebook will illustrate how to apply the maths discussed in the lecture using Python.
#
# In these notebooks, we will adopt the following prefix convention when naming variables:
#
# ```
# 's' (e.g. sDotProduct) means the variable is a scalar
# 'v' (e.g. vCrossProduct) means the variable is a vector
# 'm' (e.g. mA) means the variable is a matrix
# ```
#
# To help us apply coordinate transformations, we start by writing a number of functions for each of the different changes of coordinate that we may wish to apply. Note that we could write each of these functions much more efficiently, but as we are quite new to Python we will err on the side of clarity. First, let's consider the function for converting from 2D Cartesian coordinates to Polar coordinates.
def cartesian_to_polar(vCoordinate):
'''Converts 2D Cartesian coordinates to Polar coordinates.'''
# Interpret input
sX = vCoordinate[0] # The first component of the input vector is the x-coordinate
sY = vCoordinate[1] # The second component of the input vector is the y-coordinate
# Coordinate transform
sR = sqrt(sX**2 + sY**2) # Pythagoras' theorem
sTheta = numpy.arctan(sY/sX) # Simple trigonometry: 'TOA', Tangent = opposite/adjacent
# Remember: we might have to modify sTheta
# depending on which 'quadrant' we are in.
sTheta = check_angle(sX, sY, sTheta)
return (sR, sTheta)
# Notice that this function itself calls a function, because there will be circumstances when we need to resolve the ambiguity in the angle theta because the tangent fucntion repeats every 180 (or pi) degrees. Let's write a function `check_angle` to handle these situations.
def check_angle(sX, sY, sAngle):
'''Adjust Polar coordinate angle based on quadrant, returning angle in
radians between 0 and 2*pi.'''
if(sX < 0 and sY >= 0):
# We are in the upper left quadrant
# so add 180 degrees (pi radians)
# onto sAngle
sAngle = sAngle + pi
elif(sX < 0 and sY < 0):
# We are in the lower left quadrant
# so add 180 degrees (pi radians)
# onto sAngle
sAngle = sAngle + pi
elif(sX >= 0 and sY < 0):
# We are in the lower right quadrant
# so add 360 degrees (2*pi radians)
# onto sAngle
sAngle = sAngle + 2*pi
return sAngle
# Now consider the reverse function that converts from Polar coordinates to 2D Cartesian.
def polar_to_cartesian(vCoordinate):
'''A function to convert 2D Polar coordinates to Cartesian coordinates.'''
# Interpret input
sR = vCoordinate[0]
sTheta = vCoordinate[1]
# Coordinate transform
sX = sR*cos(sTheta)
sY = sR*sin(sTheta)
return (sX, sY)
# We can follow this same procedure to define the other functions we might need.
# +
def cartesian_to_cylindrical(vCoordinate):
'''Converts 3D Cartesian coordinates to Cylindrical coordinates.'''
# Interpret input
sX = vCoordinate[0]
sY = vCoordinate[1]
sZ = vCoordinate[2]
# Coordinate transform
sR = sqrt(sX**2 + sY**2)
sPhi = numpy.arctan(sY/sX)
# Again, check that we have the right value of sPhi
# for the quadrant we are in.
sPhi = check_angle(sX, sY, sPhi)
return (sR, sPhi, sZ)
def cylindrical_to_cartesian(vCoordinate):
'''Converts 3D Cylindrical coordinates to Cartesian coordinates.'''
# Interpret input
sR = vCoordinate[0]
sPhi = vCoordinate[1]
sZ = vCoordinate[2]
# Coordinate transform
sX = sR*cos(sPhi)
sY = sR*sin(sPhi)
sZ = sZ
return (sX, sY, sZ)
def spherical_to_cartesian(vCoordinate):
'''Converts 3D Cartesian coordinates to Spherical coordinates.'''
# Interpret inputs
sR = vCoordinate[0]
sPhi = vCoordinate[1]
sTheta = vCoordinate[2]
# Coordinate transform
sX = sR*sin(sTheta)*cos(sPhi)
sY = sR*sin(sTheta)*sin(sPhi)
sZ = sR*cos(sTheta)
return (sX, sY, sZ)
def cartesian_to_spherical(vCoordinate):
'''Converts 3D Spherical coordinates to Cartesian coordinates.'''
# Interpret inputs
sX = vCoordinate[0]
sY = vCoordinate[1]
sZ = vCoordinate[2]
# Coordinate transform
sR = sqrt(sX**2 + sY**2 + sZ**2)
sPhi = numpy.arctan(sY/sX)
sTheta = numpy.arccos(sZ/sR)
# Again, check that we have the right value of sPhi
# for the quadrant we are in.
sPhi = check_angle(sX, sY, sPhi)
return (sR, sPhi, sTheta)
# -
# Finally, we'll write a function to help us plot the coordinates for illustrative purposes. Don't worry about this function for now.
def plot_coordinates(vCoordinate, xSystem="Cart2D",
xlimits=[0.,1.], ylimits=[0.,1.], projection=False):
'''Plots 2D or 3D coordinates of a point on formatted axes with the
option of showing the projection of the point onto the x-y-(z) axes.'''
if (xSystem == 'Cart2D'):
fig, ax = plt.subplots()
# Set the spines to go through the origin
ax.spines['left'].set_position(('data',0.))
ax.spines['right'].set_color('none') # turn off the right spine/ticks
ax.yaxis.tick_left()
ax.spines['bottom'].set_position(('data',0.))
ax.spines['top'].set_color('none') # turn off the top spine/ticks
ax.xaxis.tick_bottom()
ax.set_xlabel('x')
ax.xaxis.set_label_coords(1.,0.475)
ax.set_ylabel('y')
ax.yaxis.set_label_coords(0.475,1.)
ax.set_xlim(xlimits)
ax.set_ylim(ylimits)
elif (xSystem == 'Polar'):
fig = pylab.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
elif (xSystem == 'Cylindrical' or xSystem == 'Spherical'):
fig = pylab.figure()
ax = Axes3D(fig)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.plot([0.,1.2*vCoordinate[0]],[0.,0.],[0.,0.],'k-')
ax.plot([0.,0.],[0.,1.2*vCoordinate[1]],[0.,0.],'k-')
ax.plot([0.,0.],[0.,0.],[0.,1.2*vCoordinate[2]],'k-')
[sX, sY, sZ] = vCoordinate
# Plot the projections
if (projection):
if (xSystem == 'Cylindrical'):
ax.plot([0.,sX],[0.,sY],[sZ,sZ],'b-',alpha=0.5)
ax.plot([0.,sX],[0.,sY],[0.,0.],'k--',alpha=0.5)
ax.plot([sX,sX],[sY,sY],[0.,sZ],'k:',alpha=0.5)
ax.plot([sX],[sY],[0.],'ko',alpha=0.5)
elif (xSystem == 'Spherical'):
ax.plot([0.,sX],[0.,sY],[0.,sZ],'b-',alpha=0.5)
ax.plot([0.,sX],[0.,sY],[0.,0.],'k--',alpha=0.5)
ax.plot([sX,sX],[sY,sY],[0.,sZ],'k:',alpha=0.5)
ax.plot([sX],[sY],[0.],'ko',alpha=0.5)
else:
ax.plot([0.,vCoordinate[0]],[0.,vCoordinate[1]],'b-',alpha=0.5)
if (xSystem == 'Cart2D'):
ax.plot([vCoordinate[0],vCoordinate[0]],[0.,vCoordinate[1]],'k--',alpha=0.5)
ax.plot([0.,vCoordinate[0]],[vCoordinate[1],vCoordinate[1]],'k--',alpha=0.5)
# Plot the point
if (xSystem == 'Cylindrical' or xSystem == 'Spherical'):
ax.plot([sX],[sY],[sZ],'ro')
else:
ax.plot(vCoordinate[0],vCoordinate[1],'ro')
# Add a grid
ax.grid(True)
# ## CARTESIAN COORDINATES
# **Lecture 1, slide 3**
#
# Right, so let's now utilise our functions to convert between different coordinate systems. Let's start by defining a point in 2D Cartesian coordinates and plotting the point.
# +
###### CARTESIAN COORDINATES ######
###### Lecture 1, slide 3 ######
# A Cartesian coordinate vector in the form (x, y).
vCoordinate = [-3.0, 4.0]
# Let's plot the above vCoordinate on the X-Y plane.
print("Plotting ", vCoordinate)
plot_coordinates(vCoordinate, xlimits=[-5,5], ylimits=[-5,5], projection=True)
# -
# ## CARTESIAN TO POLAR CONVERSION
# **Lecture 1, slide 4**
#
# We can convert these coordinates to Polar coordinates using our function and plot on a Polar diagram.
# +
###### CARTESIAN TO POLAR CONVERSION ######
###### Lecture 1, slide 4 ######
(sR, sTheta) = cartesian_to_polar(vCoordinate)
print("Converting ", vCoordinate, " to polar coordinates:")
print("r = %.3f" % sR)
print("theta = %.3f\n" % sTheta) # Remember: theta will be in RADIANS
print("Plotting ", (sR, sTheta))
plot_coordinates([sTheta, sR], xSystem='Polar', projection=True)
# Plot r and theta.
#fig = pylab.figure()
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
#axes.plot(sTheta, sR, 'or')
#pylab.grid(True)
# -
# We can also convert the Polar coordinates back to Cartesian coordinates
###### POLAR TO CARTESIAN CONVERSION ######
###### Lecture 1, slide 4 ######
print("Converting r = %.2f, theta = %.2f back to Cartesian coordinates" % (sR, sTheta))
vCoordinate = [sR, sTheta]
(sX, sY) = polar_to_cartesian(vCoordinate)
print("x = %.2f" % sX)
print("y = %.2f" % sY)
# ## CYLINDRICAL COORDINATES
# **Lecture 1, slide 7**
# +
###### CYLINDRICAL COORDINATES ######
###### Lecture 1, slide 7 ######
vCoordinate = [1.1, 1.6, 0.1] # 3D Cartesian coordinate vector
# Convert to cylindrical coordinates (r, phi, z)
(sR, sPhi, sZ) = cartesian_to_cylindrical(vCoordinate)
print("Plotting cylindrical coordinates ", (sR, sPhi, sZ), " in 3D Cartesian space")
# Convert back to Cartesian for 3D plotting purposes
vCoordinate = [sR, sPhi, sZ]
(sX, sY, sZ) = cylindrical_to_cartesian(vCoordinate)
plot_coordinates([sX,sY,sZ], xSystem = 'Cylindrical', projection=True)
#fig = pylab.figure()
#axes = Axes3D(fig)
#axes.plot([0.,sX],[0.,sY],[sZ,sZ],'b-')
#axes.plot([sX],[sY],[sZ],'or')
#axes.plot([0.,sX],[0.,sY],[0.,0.],'k--')
#axes.plot([sX,sX],[sY,sY],[0.,sZ],'k:')
#axes.plot([sX],[sY],[0.],'ok')
#axes.set_xlabel("X")
#axes.set_ylabel("Y")
#axes.set_zlabel("Z")
# -
# ## SPHERICAL COORDINATES
# **Lecture 1, slide 10**
# +
###### SPHERICAL COORDINATES ######
###### Lecture 1, slide 10 ######
# Let's define some spherical coordinates. . .
sR = 6400.0
# Convert angles in degrees to radians
sPhi = 316.8*(pi/180)
sTheta = 112.9*(pi/180)
print("Plotting spherical coordinates ", (sR, sPhi, sTheta), " in 3D Cartesian space")
# Convert back to Cartesian for 3D plotting purposes
vCoordinate = [sR, sPhi, sTheta]
(sX, sY, sZ) = spherical_to_cartesian(vCoordinate)
# Plot the 3D Cartesian coordinates
plot_coordinates([sX,sY,sZ], xSystem = 'Spherical', projection=True)
#fig = pylab.figure()
#axes = Axes3D(fig)
#axes.plot([0.],[0.],[0.],'oy')
#axes.plot([sX],[sY],[sZ],'or')
#axes.plot([0.,sX],[0.,sY],[0.,sZ],'b-')
#axes.plot([0.,sX],[0.,sY],[0.,0.],'k--')
#axes.plot([sX,sX],[sY,sY],[0.,sZ],'k:')
#axes.plot([sX],[sY],[0.],'ok')
#axes.set_xlabel("X")
#axes.set_ylabel("Y")
#axes.set_zlabel("Z")
# -
# ## Final tip
#
# So what did I mean when I said we could write these functions much more efficiently?
#
# Well, let's try writing the first function again in as few lines as possible:
def cartesian_to_polar(vC):
'''Converts 2D Cartesian coordinates to Polar coordinates.'''
return sqrt(vC[0]**2 + vC[1]**2), check_angle(vC[0], vC[1], numpy.arctan(vC[1]/vC[0]))
# It turns out we can write this function in just one line of code, because there was no need to rename the input vector components as scalars and we can write the transformation formulae (and the `check_angle` function in the return statement.
| mathematics/mm1/Lecture_1_Coordinate_Systems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_python3)
# language: python
# name: conda_python3
# ---
# # End-to-End Example #1
#
# 1. [Introduction](#Introduction)
# 2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)
# 1. [Permissions and environment variables](#Permissions-and-environment-variables)
# 2. [Data ingestion](#Data-ingestion)
# 3. [Data inspection](#Data-inspection)
# 4. [Data conversion](#Data-conversion)
# 3. [Training the K-Means model](#Training-the-K-Means-model)
# 4. [Set up hosting for the model](#Set-up-hosting-for-the-model)
# 5. [Validate the model for use](#Validate-the-model-for-use)
#
# ## Introduction
#
# Welcome to our first end-to-end example! Today, we're working through a classification problem, specifically of images of handwritten digits, from zero to nine. Let's imagine that this dataset doesn't have labels, so we don't know for sure what the true answer is. In later examples, we'll show the value of "ground truth", as it's commonly known.
#
# Today, however, we need to get these digits classified without ground truth. A common method for doing this is a set of methods known as "clustering", and in particular, the method that we'll look at today is called k-means clustering. In this method, each point belongs to the cluster with the closest mean, and the data is partitioned into a number of clusters that is specified when framing the problem. In this case, since we know there are 10 clusters, and we have no labeled data (in the way we framed the problem), this is a good fit.
#
# To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.
# ## Prequisites and Preprocessing
#
# ### Permissions and environment variables
#
# Here we set up the linkage and authentication to AWS services. There are two parts to this:
#
# 1. The role(s) used to give learning and hosting access to your data. Here we extract the role you created earlier for accessing your notebook. See the documentation if you want to specify a different role
# 1. The S3 bucket name and locations that you want to use for training and model data.
# + isConfigCell=true
from sagemaker import get_execution_role
role = get_execution_role()
bucket='<bucket-name>'
# -
# ### Data ingestion
#
# Next, we read the dataset from the existing repository into memory, for preprocessing prior to training. In this case we'll use the MNIST dataset, which contains 70K 28 x 28 pixel images of handwritten digits. For more details, please see [here](http://yann.lecun.com/exdb/mnist/).
#
# This processing could be done *in situ* by Amazon Athena, Apache Spark in Amazon EMR, Amazon Redshift, etc., assuming the dataset is present in the appropriate location. Then, the next step would be to transfer the data to S3 for use in training. For small datasets, such as this one, reading into memory isn't onerous, though it would be for larger datasets.
# +
# %%time
import pickle, gzip, numpy, urllib.request, json
# Load the dataset
urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz")
with gzip.open('mnist.pkl.gz', 'rb') as f:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
# -
# ### Data inspection
#
# Once the dataset is imported, it's typical as part of the machine learning process to inspect the data, understand the distributions, and determine what type(s) of preprocessing might be needed. You can perform those tasks right here in the notebook. As an example, let's go ahead and look at one of the digits that is part of the dataset.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (2,10)
def show_digit(img, caption='', subplot=None):
if subplot==None:
_,(subplot)=plt.subplots(1,1)
imgr=img.reshape((28,28))
subplot.axis('off')
subplot.imshow(imgr, cmap='gray')
plt.title(caption)
show_digit(train_set[0][30], 'This is a {}'.format(train_set[1][30]))
# -
# ## Training the K-Means model
#
# Once we have the data preprocessed and available in the correct format for training, the next step is to actually train the model using the data. Since this data is relatively small, it isn't meant to show off the performance of the k-means training algorithm. But Amazon SageMaker's k-means has been tested on, and scales well with, multi-terabyte datasets.
#
# After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between 7 and 11 minutes.
# +
from sagemaker import KMeans
data_location = 's3://{}/kmeans_highlevel_example/data'.format(bucket)
output_location = 's3://{}/kmeans_example/output'.format(bucket)
print('training data will be uploaded to: {}'.format(data_location))
print('training artifacts will be uploaded to: {}'.format(output_location))
kmeans = KMeans(role=role,
train_instance_count=2,
train_instance_type='ml.c4.8xlarge',
output_path=output_location,
k=10,
data_location=data_location)
# +
# %%time
kmeans.fit(kmeans.record_set(train_set[0]))
# -
# ## Set up hosting for the model
# Now, we can deploy the model we just trained behind a real-time hosted endpoint. This next step can take, on average, 7 to 11 minutes to complete.
# +
# %%time
kmeans_predictor = kmeans.deploy(initial_instance_count=1,
instance_type='ml.m4.xlarge')
# -
# ## Validate the model for use
# Finally, we'll validate the model for use. Let's generate a classification for a single observation from the trained model using the endpoint we just created.
result = kmeans_predictor.predict(train_set[0][30:31])
print(result)
# OK, a single prediction works.
#
# Let's do a whole batch and see how well the clustering works.
# +
# %%time
result = kmeans_predictor.predict(valid_set[0][0:100])
clusters = [r.label['closest_cluster'].float32_tensor.values[0] for r in result]
# -
for cluster in range(10):
print('\n\n\nCluster {}:'.format(int(cluster)))
digits = [ img for l, img in zip(clusters, valid_set[0]) if int(l) == cluster ]
height=((len(digits)-1)//5)+1
width=5
plt.rcParams["figure.figsize"] = (width,height)
_, subplots = plt.subplots(height, width)
subplots=numpy.ndarray.flatten(subplots)
for subplot, image in zip(subplots, digits):
show_digit(image, subplot=subplot)
for subplot in subplots[len(digits):]:
subplot.axis('off')
plt.show()
# ### The bottom line
#
# K-Means clustering is not the best algorithm for image analysis problems, but we do see pretty reasonable clusters being built.
# ### (Optional) Delete the Endpoint
# If you're ready to be done with this notebook, make sure run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
print(kmeans_predictor.endpoint)
import sagemaker
sagemaker.Session().delete_endpoint(kmeans_predictor.endpoint)
| sagemaker-python-sdk/1P_kmeans_highlevel/kmeans_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Beginning interactivity with tabular data: ipywidgets
# ## ipywidgets: More details
# Import our usual things:
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Also import ipywidgets:
import ipywidgets
# This is how we've used widgets before for the most part with the weird "decorator" function that "decorates" whatever function directly follows it:
@ipywidgets.interact(name = ['Linda', 'Tina', 'Louise'])
def print_name(name):
print(name) # just a simple print out
# ### Let's make sure we can mess with widgets
#
# But now, lets go into ipywidgets in a bit more detail and look at specific functions and call them directly from ipywidgets.
#
# For example, we can create a little display that increments integer numbers:
itext = ipywidgets.IntText()
itext
# But what if that didn't display for you? Depending on what version of jupyter notebook you have, you might have to use "display" to actually show your widgets.
#
# Try (uncommenting) the following:
# +
#from IPython.display import display
#display(itext)
# -
# Still didn't work? Try refreshing and/or restarting your notebook.
#
# *Still* not working? You might have to install the jupyter notebook widget extension:
# +
# #!jupyter nbextension enable --py widgetsnbextension
# -
# If you end up running the above cell (uncommented naturally!) then you might have to refresh or restart your jupyter notebook.
# Moving on:
#
# The value of `itext` is then stored - so we could in theory generate a toggle and then do stuff with its value:
itext.value
# Note if I go up and change the toggle value I have to re-run this cell to print out the newly stored value.
# I can also set the value "by hand":
itext.value = 10
# Once I run this cell, now the toggle values are updated above.
# ### Building up interfaces with ipywidgets
# Let's start thinking about how we can link up widgets together to make interfaces!
# Let's start making up a little progress bar, and I'm going to set it at 90%:
ip = ipywidgets.IntProgress(value = 90)
ip
# Let's think about how we'd link up this with a little integer slider to change our value:
irange = ipywidgets.IntSlider(min = 0, max = 100, step = 1)
irange
# Right now I can change one and not the other, so that's not what we are going for. We have to "link" whatever is in our widgets that controls both our progress bar and our integer slider.
#
# But what do we link? Let's see whats "in" our different widgets:
ip.keys
irange.keys
# So, there are a lot of parameters here, but one thing that is pretty standard across all widgets is that they have some sort of "value" that can be updated, for example, behold:
ip
ip.value=10
# How do we link the values of these two widgets? With a function called `jslink`:
# +
# ipywidgets.jslink?
# -
# Let's give it a shot!
ipywidgets.jslink((ip,'value'), (irange,'value'))
# Let's see!
ip
irange
# Cool! Note that they are all linked -- every instance!
# So, we probably want to display these two widgets on top of eachother, how can we do that?
#
# ipywidgets has a bunch of layout options we can use:
ipywidgets.VBox([ip, irange])
# What does this `jslink` function do? We know it links the values, but how? It does this on the "browser side", but what does that mean?
#
# There are actually 2 different programming languages operating when you are using a jupyter notebook:
# 1. Python is used to do your calculations
# 1. Javascript is used to "report" back the outputs from Python
#
# Here `jslink` means that the link between your progress bar and slider is happening through Javascript, and happens all within your browser - Python is never contacted about anything in this case!
#
# We'll see in a second that there are some instances where we want more options than presented with `jslink` and so we'll actually want Python to get involved in the linking of things.
#
# Let's redo what we just did but getting Python involved:
ip2 = ipywidgets.IntProgress(value = 90)
irange2 = ipywidgets.IntSlider(min = 0, max = 100, step = 1)
ipywidgets.link((ip2,'value'), (irange2,'value')) # here is the change: "jslink" changes to "link"
ipywidgets.VBox([ip2, irange2])
# So, just looking at it, it looks exactly the same! However, we did did this through *Python* this time.
# In this sense we add an extra step:
# 1. first the browser senses a change when you move the slider (Javascript)
# 1. sends this info to the kernel (Javascript -> Python)
# 1. Python registers this change and figures out what to do with it, in this case it says to update the progress bar (Python)
# 1. Python then tells the browser to update the progress bar (Python -> Javascript)
# 1. the browser displays the change in the progress bar (Javascript)
# Ok, that's a little complicated. The big question you can ask yourself is: Do we really care? At the moment, no - we can do everything we want using Python/JS in these notebooks, doesn't really affect us one way or the other. When we start thinking about developing for the web however, we have to think carefully about how to design things because a remote browser only has access to JS by default! But by then we'll be learning Javascript anyway, so not to worry!
#
# For now, let's move on and worry about this JS/Python linking distinction later.
#
# Let's try making a button that does something when we press it:
button = ipywidgets.Button(description = "I am a Clicker")
button
# So, obviously, we want to do something cool when the button is clicked. To do that, first we make a function that will do something when we click our button:
# let's just say I've clicked:
def say_click(event):
print("I have clicked. Click.")
# Now we have to link our button click to this function, which is more complicated then what `jslink` can handle, so we use the "on_click" function associated with this button through the Python-linking channel:
button.on_click(say_click)
# Now, note that if we go back and press our button, it now prints our statement. This seems indeed like magic, but its using "callbacks" to look for changes via Python. Practically, we don't have to worry about what is going on under the hood for what we want to do with ipywidgets, but it is none-the-less pretty neat. :)
# Let's build up a bit more complex interactivity using our new friend the button and this "on_click" function to interactively update our progress bar.
# Lets start by making a progress bar again:
ip = ipywidgets.IntProgress()
# Now, lets add in a button that will add 10:
button_plus = ipywidgets.Button(description = "+10")
# ... and one that will subtract 10:
button_minus = ipywidgets.Button(description = "-10")
# Lets see how this looks in a horizontal box row:
ipywidgets.HBox([button_minus, ip, button_plus])
# Note if we click these, nothing happens. This is because we haven't associated actions to our clicks just yet. We have to make some functions that will tell our buttons and progress bar what to do when each respective button is clicked. So, let's do that!
# First, let's remind ourselves what we want to be changing here -- the value of the progress bar:
ip.value
# Lets now associate a change in the value of our progress bar when we click the down button:
def click_down(event):
ip.value -= 10
# Note that while I'm going to associate the click of my button with the `-10` button the *action* that happens when I click is to change the value of the progress bar!
# Lets tie this change in value to the click with the "on_click" function of our down button:
button_minus.on_click(click_down)
# Now let's do the same for our "+10" button:
def click_up(event):
ip.value += 10
button_plus.on_click(click_up)
# We can now go back up and click on our progress bar, or we can re-display again as well:
ipywidgets.HBox([button_minus, ip, button_plus])
# ## A few more ipywidget linking practice examples
#
# Let's try a few more!
# ### #1: More with layouts (and text boxes) -- OPTIONAL-ISH
# We can use ipywidgets to create text boxes:
# +
ta1 = ipywidgets.Textarea("Hi, this is a box of text. (1)")
ta2 = ipywidgets.Textarea("Hi, this is a box of text. (2)")
ta3 = ipywidgets.Textarea("Hi, this is a box of text. (3)")
ta4 = ipywidgets.Textarea("Hi, this is a box of text. (4)")
# -
# There are different ways we can layout these text boxes.
#
# We can make tabs:
tabs = ipywidgets.Tab([ta1, ta2, ta3, ta4])
tabs
# Or we can "accordian" them:
acc = ipywidgets.Accordion([ta1, ta2, ta3, ta4])
acc
# We can organize them into horizontal boxes like we did before:
ipywidgets.HBox([ta1, ta2, ta3, ta4])
# Note this means a few of them are off screen so this might not be the best option in this calse.
#
# We can organize them in a vertical box instead:
ipywidgets.VBox([ta1, ta2, ta3, ta4])
# Or, if we want to be super fancy we can organize them in a verticle box of sets of horizontal boxes:
ipywidgets.VBox( [ipywidgets.HBox([ta1, ta2]),
ipywidgets.HBox([ta3, ta4])] )
# We can even insert another widget in our array of horizontal boxes and see how ipywidgets would place it -- let's try it with a label widget:
ipywidgets.VBox( [ipywidgets.HBox([ta1, ta2]),
ipywidgets.Label("Hello there!"),
ipywidgets.HBox([ta3, ta4])] )
# ### #2: More with layouts (and text boxes) -- putting together sliders with play button -- OPTIONAL
# As a quick example, lets link an integer slider to our play button. First, lets define the range of our play button:
play = ipywidgets.Play(interval = 50, value = 50, min = 1, max = 100, step = 1, description = "Press Play")
# Here, we start at 50, and create min and max value around that. Now, lets create our slider:
slider = ipywidgets.IntSlider()
# Now let's link the min and max of the play interval with the min and max of the integer slider using the `jslink` function:
ipywidgets.jslink((play, 'min'), (slider, 'min'))
ipywidgets.jslink((play, 'max'), (slider, 'max'))
ipywidgets.jslink((play, 'value'), (slider, 'value'))
# Note that we don't have to tell the .link function how to link these objects - it knows intuatively. How to link the play widget min & max to the slider min and max.
#
# Finally, let's display them side by side:
ipywidgets.HBox([play, slider])
# ### #3: slider colorpicker and an intro to traitlets and .observe -- NECESSARY FOR TRAITLETS
# Lets first make a slider:
islider = ipywidgets.IntSlider(min = 0, max = 10, step = 1, orientation = 'vertical')
# Lets give this slider a base color that is sort of purple-y, using a hex code. Recall: HTML hex codes are another way to "name" colors:
#
# <img src="https://i.pinimg.com/originals/7b/f7/c6/7bf7c6e53128592dcd608f368571821c.gif">
islider.style.handle_color = "#750075"
# Let's take a quick look:
islider
# Just slides up and down, nothing too exciting. Lets create a new widget object called a color picker to pick the color of our handle:
cp = ipywidgets.ColorPicker()
# cp
# When we show this we can click on the little box and it pops up a color picker we can mess around with! Neato.
#
# Now let's link the slider's color with the color picker's value. For practice, let's use the Python-based `link` function:
ipywidgets.link( (cp, 'value'), (islider.style, 'handle_color') )
# And let's see them both together:
ipywidgets.VBox([cp, islider])
# But what is this `traitlets.traitlets` thing above that occurs when we use `link` instead of `jslink`? I thought we were doing things with widgets! Let's talk a bit about what `traitlets` are in Python.
# ## Traitlets --- SAVE THIS FOR BQPLOT NEXT WEEK??
#
# Traitlets are a way to link the *change in a variable* to an action.
# We'll start by making a class w/o using traitlets. We've worked with classes before, just sort of "under the radar" like with Pandas objects. Now we will define our own:
class MyObject():
name = 'unnamed'
age = 0
# Let's define a new variable of this `class` type:
my_obj = MyObject()
my_obj.name, my_obj.age
# We can update the values of these variables like we would any other object:
my_obj.name = 'Bob'; my_obj.age = 47
# Now, let's check out our new values:
print('Hello, my name is:', my_obj.name, 'and I am', my_obj.age, 'years old')
# Now, let's say I wanted to re-set the values of the variables in my class -- to print out the above statement, I'd have to redo everything:
my_obj.name = 'Linda'; my_obj.age = 45
print('Hello, my name is:', my_obj.name, 'and I am', my_obj.age, 'years old')
# The `traitlets` library allows us to "track" changes so that we can associate actions with updates to variables. Let's try this example again:
import traitlets
class MyTraitletObject(traitlets.HasTraits):
# lets use the unicode trait to let our object have a name
name = traitlets.Unicode("unnamed")
# lets also give our object an age
age = traitlets.Int(0)
my_t_obj = MyTraitletObject()
my_t_obj.age = 47
my_t_obj.name = 'Bob'
print('Hello, my name is:', my_t_obj.name, 'and I am', my_t_obj.age, 'years old')
# So far this is not very exciting - we are back where we started! However, there are options to "observe" changes in our traits:
# Let's create a function that can act on one of the variables of our variables, in particluar, its going to react to the name in my object:
# +
def name_changed(change):
print(change) # this is a format that is required of a traitlets class
# lets tell traitlets that we are going to change something
my_t_obj.observe(name_changed, ['name'])
# -
# Now when we update, we get info about our update back:
my_t_obj.name = 'Linda'
# We can see that there are these weird "new" and "old" names -- this is tracking how things change. The "owner" is just pointing to the memory location of `my_t_obj`.
# Let's do something a little more exciting - have a default print-out of the introduction:
# +
def name_changed2(change):
print('Hello, my name is', change['new'], 'and I am', change['owner'].age, 'years old')
# lets tell traitlets that we are going to change something
my_t_obj.observe(name_changed2, ['name'])
# -
my_t_obj.name = 'Linda'
# Note that now, I've got *2* things associated with any changes -- let's take off the first one:
my_t_obj.unobserve(name_changed, ['name'])
my_t_obj.name = "Bob"
# We can practice by making a "watching for a change" function for age as well:
# +
def age_changed(change):
print('Hello, my name is', change['owner'].name, 'and I am', change['new'], 'years old')
my_t_obj.observe(age_changed, ['age'])
# -
my_t_obj.age = 48
# ### #4. Michigan colormap and scale - 2 ways with widgets
# Let's go back to the Michigan dataset from last week. First we read in and reshaped our dataset:
mich_filename = '/Users/jillnaiman/Downloads/michigan_lld.flt'
michigan = np.fromfile(mich_filename, dtype='f4').reshape((5365,4201))
# We can take a quick look at our dataset:
plt.imshow(michigan)
plt.show()
# If we recall, we had to "mask" out the bad measurement at -9999:
michigan[michigan==-9999] = np.nan
# ... so that we could make a meaningful histogram:
plt.hist(michigan[~np.isnan(michigan)])
plt.show()
# Neat! We also talked a bit about some ways we could transform the color scale for our `imshow` call above:
# +
# for data color transformation
import matplotlib.colors as mpl_colors
plt.imshow(michigan, cmap="terrain", norm = mpl_colors.SymLogNorm(10)) # symmetric log scaling
plt.clim(-352,352) # symmetric about positive and negative heights
plt.colorbar()
plt.show()
# -
# What is this `SymLogNorm` function?
# +
# mpl_colors.SymLogNorm?
# -
np.log10([1,10,50]),np.log10(np.abs([-1,-10,-50]))
# We see that 1 and 10 are mapped to a jump of 1 but 1->50 is mapped only to a jump of 0.7 instead of 40.
#
# One natural thing we might want to do is change color scheme and be able to toggle on and off the SymLogNorm color remapper. We can do this 2 ways - by using our widget `@interact` decorator function again, and by explicitly laying out widgets. Let's try to first way first:
@ipywidgets.interact(colormap = plt.colormaps(),
sym_log=True)
def plot(colormap = 'terrain', sym_log = True):
if sym_log:
norm = mpl_colors.SymLogNorm(10)
else:
norm = mpl_colors.Normalize()
fig, ax = plt.subplots(figsize=(6,8))
# calling colorbar in a different way:
CAX = ax.imshow(michigan, cmap=colormap, norm = norm)
plt.colorbar(CAX, extend = 'both')
plt.show()
# **Bonus:**
@ipywidgets.interact(colormap = plt.colormaps(), color_range = (1.0, 352.0, 1.0),
sym_log=True)
def plot(colormap = 'terrain', color_range = 352, sym_log = True):
if sym_log:
norm = mpl_colors.SymLogNorm(10)
else:
norm = mpl_colors.Normalize()
fig, ax = plt.subplots(figsize=(6,8))
# calling colorbar in a different way:
CAX = ax.imshow(michigan, cmap=colormap, norm = norm)
CAX.set_clim(-color_range, color_range)
plt.colorbar(CAX, extend = 'both')
plt.show()
# We'll need a few extra functions to do this sort of thing "by hand":
from IPython.display import display, clear_output
# +
fig, ax = plt.subplots(figsize=(4,4))
out = ipywidgets.Output()
fig, ax = plt.subplots(figsize=(6,8))
# calling colorbar in a different way:
CAX = ax.imshow(michigan, cmap='terrain')
plt.colorbar(CAX, extend = 'both')
display(fig)
# +
# plt.figure?
# -
#fig, ax = plt.subplots(figsize=(4,4))
plt.close('all')
#fig.clear()
fig = plt.figure(figsize=(8,8))
#fig.clear()
ax = fig.add_axes([0.0, 0.15, 1.0, 0.8]) # first set of axis: will hold color-levels plot
#ax = plt.subplot(111)
CAX = ax.imshow(michigan, cmap='terrain')
#CB = plt.colorbar(CAX, extend = 'both')
CB = fig.colorbar(CAX, extend = 'both')
plt.show()
fig.axes
# +
#fig, ax = plt.subplots(figsize=(4,4))
#plt.close('all')
fig = plt.figure()
#ax = plt.subplots(111)
ax = fig.add_axes([0.0, 0.15, 1.0, 0.8]) # first set of axis: will hold color-levels plot
CAX = ax.imshow(michigan, cmap='terrain')
#CB = plt.colorbar(CAX, extend = 'both')
CB = fig.colorbar(CAX, extend = 'both')
out = ipywidgets.Output(layout=ipywidgets.Layout(height='300px', width = '300px'))
dropdown = ipywidgets.Dropdown(options=plt.colormaps())
vbox=ipywidgets.VBox(children=(out, dropdown))
display(vbox)
def updateDropdown(change):
if change is not None:
cmap=plt.colormaps()[change['owner'].index]
for a in fig.axes:
fig.delaxes(a)
ax = fig.add_axes([0.0, 0.15, 1.0, 0.8]) # first set of axis: will hold color-levels plot
CAX = ax.imshow(michigan, cmap=cmap)
#CB = plt.colorbar(CAX, extend = 'both')
CB = fig.colorbar(CAX, extend = 'both')
#fig.axes.clear()
#CAX = fig.axes[0].imshow(michigan, cmap=cmap)
#fig.axes[1] = plt.colorbar(CAX)
#for a in fig.axes:
# fig.remove(a)
#ax = plt.subplots(111)
#ax = fig.add_axes([0.0, 0.15, 1.0, 0.8]) # first set of axis: will hold color-levels plot
#CAX = ax.imshow(michigan, cmap=cmap)
#CB.remove()
#CB = fig.colorbar(CAX)
#CB = plt.colorbar(CAX, extend = 'both')
#ax.clear()
#CB.clear()
#CB = fig.colorbar(CAX, extend = 'both')
#fig.clear()
#ax = plt.subplot(111)
#CAX = ax.imshow(michigan, cmap=cmap)
#CB = fig.colorbar(CAX, extend = 'both')
#ax.clear()
#ax.imshow(michigan, cmap=plt.colormaps()[change['owner'].index])
#CB.set_cmap(plt.colormaps()[change['owner'].index])
#CAX = ax.imshow(michigan, cmap=plt.colormaps()[change['owner'].index])
#fig.colorbar(CAX, extend = 'both')
#CB = .colorbar(CAX, extend='both')
#CB.set_cmap(plt.colormaps()[change['owner'].index])
#fig.colorbar.set_cmap(plt.colormaps()[change['owner'].index])
#CB.cmap = plt.colormaps()[change['owner'].index]
with out:
clear_output(wait=True)
display(fig)
dropdown.observe(updateDropdown)
#button.on_click(click)
#click(None)
updateDropdown(None)
# +
# fig.delaxes?
# +
# fig.axes.remove?
# +
# working
fig, ax = plt.subplots(figsize=(4,4))
out=ipywidgets.Output(layout=ipywidgets.Layout(height='300px', width = '300px'))
button=ipywidgets.Button(description='Next')
vbox=ipywidgets.VBox(children=(out,button))
display(vbox)
def click(b):
ax.clear()
ax.plot(np.random.randn(100),np.random.randn(100),'+')
with out:
clear_output(wait=True)
display(fig)
button.on_click(click)
click(None)
# -
# +
# works
#slider = ipywidgets.IntSlider(value=40)
#embed_minimal_html('export_slider.html', views=[slider], title='Widgets export')
# +
# # %matplotlib inline
# # To prevent automatic figure display when execution of the cell ends
# # #%config InlineBackend.close_figures=False
# import matplotlib.pyplot as plt
# import numpy as np
# #from IPython.html import widgets
# from ipywidgets import widgets
# from ipywidgets import Layout
# from IPython.display import display,clear_output
# plt.ioff()
# ax=plt.gca()
# out=widgets.Output(layout=Layout(height='300px', width = '400px', border='solid'))
# button=widgets.Button(description='Next')
# vbox=widgets.VBox(children=(out,button))
# display(vbox)
# def click(b):
# ax.clear()
# ax.plot(np.random.randn(100),np.random.randn(100),'+')
# with out:
# clear_output(wait=True)
# display(ax.figure)
# #ax.figure
# button.on_click(click)
# click(None)
# -
# +
dropdown = ipywidgets.Dropdown(options=plt.colormaps())
def plot(colormap = dropdown, sym_log = True):
if sym_log:
norm = mpl_colors.SymLogNorm(10)
else:
norm = mpl_colors.Normalize()
fig, ax = plt.subplots(figsize=(6,8))
# calling colorbar in a different way:
CAX = ax.imshow(michigan, cmap=colormap, norm = norm)
plt.colorbar(CAX, extend = 'both')
plt.show()
dropdown.observe(plot, [''])
# -
dropdown.keys
dropdown.on_trait_change
# +
def print_change(change):
print(change)
dropdown.observe(print_change, ['index'])
# -
dropdown
# 4: Using @interact vs. a .observe call -- NECESSARY
# example: picking a color map for the michigan dataset & changing the log of the color scale on & off
# example: changing the number of bins & the color of the bars of the depth plot using only .observes
# +
# 5: reading in the UFO dataset with column names and taking a look
# +
# 6: sub-setting the dataset for faster processing -> creating a subset dataframe for quick testing
# +
# 7: lab
# -
| week05/_prep_notebook_week04_old.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Largest Rectangle
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'largestRectangle' function below.
#
# The function is expected to return a LONG_INTEGER.
# The function accepts INTEGER_ARRAY h as parameter.
#
def largestRectangle(heights):
# Write your code here
max_area = 0
heights.append(0)
h = len(heights)
pstack, hstack = [], []
for i in range(h):
lastwidth = h + 1
while pstack and hstack[-1] > heights[i]:
lastwidth = pstack[-1]
max_area = max(max_area, (i - pstack.pop()) * hstack.pop())
if not pstack or hstack[-1] <= heights[i]:
pstack.append(min(i, lastwidth))
hstack.append(heights[i])
return max_area
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
h = list(map(int, input().rstrip().split()))
result = largestRectangle(h)
fptr.write(str(result) + '\n')
fptr.close()
# -
# ## Truck Tour
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'truckTour' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY petrolpumps as parameter.
#
def truckTour(petrolpumps):
# Write your code here
res = tank = 0
for i in range(n):
petrol, dist = petrolpumps[i][0], petrolpumps[i][1]
tank += petrol
if dist <= tank: tank -= dist
else:
tank = 0
res = i + 1
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
petrolpumps = []
for _ in range(n):
petrolpumps.append(list(map(int, input().rstrip().split())))
result = truckTour(petrolpumps)
fptr.write(str(result) + '\n')
fptr.close()
| contest/Stack & Queue - I (16-05-2021).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming Exercise 1: Linear Regression
#
# # Introduction
# In this exercise, you will implement linear regression and get to see it work on data.
#
# # 1 Simple functions
# We can use `vstack`, `reshape`, or `transpose` numpy functions. Additionally, you can use broadcasting to reshape the array. We will also define a helper function `paddOnes` that adds a column of ones to the start of the matrix. We will also define a helper function `head` that displays the first 5 rows of a dataset.
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.linear_model import LinearRegression
from scipy import optimize
arr = np.arange(5)
# all the lines below are equivalent
np.vstack(arr)
arr.reshape(len(arr), 1)
np.transpose([arr])
arr[:, None]
# +
def padOnes(X):
m = len(X) # number of observations
ones = np.vstack(np.ones(m))
return np.append(ones, X, axis=1)
padOnes([[1, 2], [3, 4], [5, 6]])
# +
def head(matrix, n=5):
if len(matrix) < n: return matrix
else: return matrix[:n]
head([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
# -
# # 2 Linear regression with one variable
# We will implement linear regression with one variable to predict profits for a food truck. Suppose you are the CEO of a restaurant franchise and are considering different cities for opening a new
# outlet. The chain already has trucks in various cities and you have data for profits and populations from the cities. You would like to use this data to help you select which city to expand to next.
#
# The file `ex1data1.txt` contains the dataset for our linear regression problem. The first column is the population of a city and the second column is the profit of a food truck in that city. A negative value for profit indicates a loss.
#
# ## 2.1 Plotting the Data
# Before starting on any task, it is often useful to understand the data by visualizing it. For this dataset, you can use a scatter plot to visualize the data, since it has only two properties to plot (profit and population). Many
# other problems that you will encounter in real life are multi-dimensional and can’t be plotted on a 2-d plot.
# +
# reading the textfile, and then plotting the data
data = np.genfromtxt('ex1data1.txt', delimiter=",")
print(head(data))
# extracting X, and y
X = data[:, 0]
y = data[:, 1]
# plotting
plt.figure(dpi=90)
plt.scatter(X, y, alpha=0.7)
plt.ylabel('Profit in $10,000s')
plt.xlabel('Population of City in 10,000s')
plt.show()
# -
# ## 2.2 Gradient Descent
# We will fit the linear regression parameters θ to our dataset using gradient descent.
# ### 2.2.1 Update Equations
# The objective of linear regression is to minimize the cost function:
#
# $$ J(\theta) = {1\over2m} \times \sum_{i=1}^{m} {(h_\theta(x^i) - y^i)^2} $$
#
#
# where the hypothesis $h_\theta(x)$ is given by the linear model:
#
# $$ h_\theta(x) = \theta^T.x = \theta_0 + \theta_1.x$$
#
#
# Recall that the parameters of your model are the θj values. These are the values you will adjust to minimize cost J(θ). One way to do this is to use the batch gradient descent algorithm. In batch gradient descent, each
# iteration performs the update:
#
# $$ \theta_j = \theta_j − α{1\over m} \times \sum_{i=1}^{m} {(h_\theta(x^i) - y^i).x_j^i} $$
#
# Note that we simultaneously update $\theta_j$ for all $j$. With each step of gradient descent, your parameters $\theta_j$ come closer to the optimal values that will achieve the lowest cost J(θ).
#
# ### 2.2.2 Computing the cost J(θ)
# As we perform gradient descent to learn minimize the cost function J(θ), it is helpful to monitor the convergence by computing the cost. In this section, we implement a function to calculate J(θ) so we can check the convergence of your gradient descent implementation. The function `computeCost` computes J(θ). As you are doing this, remember that the variables X and y are not scalar values, but matrices whose rows represent the examples from the training set.
#
# Once we have completed the function, the next step will be to run `computeCost` once using θ initialized to zeros, and we will see the cost printed to the screen. You should expect to see a cost of `32.07`.
#
# **Implementation Note:** We store each example as a row in the the X matrix. To take into account the intercept term (θ0), we add an additional first column to X and set it to all ones. This allows
# us to treat θ0 as simply another ‘feature’.
# +
def computeCost(X, y, theta):
"""COMPUTECOST Compute cost for linear regression
COMPUTECOST(X, y, theta) computes the cost of using theta as the
parameter for linear regression to fit the data points in X and y"""
m = len(y)
dot = X.dot(theta)
costs = np.power(dot - y, 2)
return (sum(costs)) / (2*m)
theta = [0, 0]
X = padOnes(np.vstack(X))
print(head(X))
computeCost(X, y, theta)
# -
# ### 2.2.3 Implementing gradient descent
# Next, you will implement gradient descent. As you program, make sure you understand what you are trying to optimize and what is being updated. Keep in mind that the cost J(θ) is parameterized by the vector θ, not X and y. That is, we minimize the value of J(θ) by changing the values of the vector θ, not by changing X or y.
#
# A good way to verify that gradient descent is working correctly is to look at the value of J(θ) and check that it is decreasing with each step. Assuming you have implemented gradient descent and
# computeCost correctly, your value of J(θ) should never increase, and should converge to a steady value by the end of the algorithm.
# +
def computeCostDerivative(X, y, theta, j):
m = len(y)
dcosts = np.multiply((X.dot(theta) - y), X[:, j])
return sum(dcosts) / m
computeCostDerivative(X, y, theta, 0)
# +
def gradientDescent(X, y, theta = None, alpha = 0.01, num_iters = 5000, verbose = False):
"""
GRADIENTDESCENT Performs gradient descent to learn theta
theta = GRADIENTDESCENT(X, y, theta, alpha, num_iters) updates theta by
taking num_iters gradient steps with learning rate alpha
"""
m = len(y) # number of training examples
if theta==None: theta = np.zeros(X.shape[1])
theta_temp = theta
for i in range(num_iters):
if verbose:
cost = computeCost(X, y, theta)
print('Iteration {}, Cost {:f}'.format(i, cost))
for j in range(len(theta)):
theta_temp[j] = theta[j] - alpha * computeCostDerivative(X, y, theta, j)
theta = theta_temp
return theta
gradientDescent(X, y)
# -
# ### Putting it together
# We will derive the final parameters, and use them to plot the linear fit. The final values for θ will also be used to make predictions on profits in areas of 35,000 and 70,000 people.
# +
iterations, alpha = 1500, 0.01 # Some gradient descent settings
print('\nTesting the cost function ...\n')
# compute and display initial cost
theta = [0, 0]
J = computeCost(X, y, theta);
print('With theta = [0 ; 0]\nCost computed = ', J);
print('Expected cost value (approx) 32.07\n');
# further testing of the cost function
theta = [-1, 2]
J = computeCost(X, y, theta);
print('\nWith theta = [-1 ; 2]\nCost computed = ', J);
print('Expected cost value (approx) 54.24\n');
print('\nRunning Gradient Descent ...\n')
# run gradient descent
theta = gradientDescent(X, y, theta, alpha, iterations);
# print theta to screen
print('Theta found by gradient descent:\n', theta);
print('Expected theta values (approx):');
print(' -3.6303, 1.1664\n\n');
# Predict values for population sizes of 35,000 and 70,000
predict1 = np.dot([1, 3.5], theta);
print('For population = 35,000, we predict a profit of ', predict1*10000);
predict2 = np.dot([1, 7], theta);
print('For population = 70,000, we predict a profit of ', predict2*10000);
# Plot the linear fit
plt.figure(dpi=90)
plt.scatter(X[:,1], y, alpha=0.7, label='Training data')
plt.plot(X[:,1], X.dot(theta), 'r-', alpha=0.7, label='Linear regression')
plt.ylabel('Profit in $10,000s')
plt.xlabel('Population of City in 10,000s')
plt.legend()
plt.show()
# -
# ## Visualizing J(θ)
# To understand the cost function J(θ) better, you will now plot the cost over a 2-dimensional grid of θ0 and θ1 values.
#
# After these lines are executed, you will have a 2-D array of J(θ) values. The script will then use these values to produce surface and contour plots of J(θ).
#
# The purpose of these graphs is to show you that how J(θ) varies with changes in θ0 and θ1. The cost function J(θ) is bowl-shaped and has a global mininum. (This is easier to see in the contour plot than in the 3D surface plot). This minimum is the optimal point for θ0 and θ1, and each step of
# gradient descent moves closer to this point.
# +
print('Visualizing J(theta_0, theta_1) ...\n')
# Grid over which we will calculate J
theta0_vals = np.linspace(-10, 10, 100)
theta1_vals = np.linspace(-1, 4, 100)
# initialize J_vals to a matrix of 0's
J_vals = np.zeros((len(theta0_vals), len(theta1_vals)))
# Fill out J_vals
for i in range(len(theta0_vals)):
for j in range(len(theta1_vals)):
t = [theta0_vals[i], theta1_vals[j]]
J_vals[i,j] = computeCost(X, y, t)
plt.plot(theta[0], theta[1], 'rx')
plt.contour(theta0_vals, theta1_vals, J_vals, np.logspace(-2, 3, 20))
plt.xlabel(r'$\theta_0$'); plt.ylabel(r'$\theta_1$');
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.xlabel(r'$\theta_0$'); plt.ylabel(r'$\theta_1$');
ax.plot_surface(theta0_vals, theta1_vals, J_vals)
print(theta)
# -
# ## Normal Equations
# In the lecture videos, you learned that the closed-form solution to linear regression is:
# $$ \theta = (X^TX)^{-1}.(X^Ty) $$
#
# Using this formula does not require any feature scaling, and you will get an exact solution in one calculation: there is no “loop until convergence” like in gradient descent. Remember that while you don’t need to scale your features, we still need to add a column of 1’s to the X matrix to have an intercept term (θ0).
def normalEq(X, y):
X = np.matrix(X)
y = np.matrix(y).T
thetas = (X.T*X).I * (X.T*y)
return thetas.T
def normalEq(X, y):
term1 = X.T.dot(X)
term2 = X.T.dot(y)
inv = np.linalg.inv
thetas = inv(term1).dot(term2)
return thetas
# %time th1 = normalEq(X, y)
print(th1)
computeCost(X, y, th1)
# %time th2 = gradientDescent(X, y)
print(th2)
computeCost(X, y, th2)
# ## Comparing 3 ways to obtain θ
# +
# using gradient descent
print(gradientDescent(X, y))
# using normalizing equation
print(normalEq(X, y))
# using the sklearn library
unpaddedX = np.vstack(data[:,0])
model = LinearRegression().fit(unpaddedX, y)
print(model.intercept_, model.coef_)
# -
# # Linear regression with multiple variables
# In this part, you will implement linear regression with multiple variables to predict the prices of houses. Suppose you are selling your house and you want to know what a good market price would be. One way to do this is to first collect information on recent houses sold and make a model of housing
# prices.
#
# The file `ex1data2.txt` contains a training set of housing prices in Portland, Oregon. The first column is the size of the house (in square feet), the second column is the number of bedrooms, and the third column is the price of the house.
#
# importing the data
data = np.genfromtxt('ex1data2.txt', delimiter=",")
X = data[:, 0:2]
y = data[:, 2]
print('SQFT of house', 'N bedrooms', 'price')
print(data[:5])
# ## Feature Normalization
# When features differ by orders of magnitude, first performing feature scaling can make gradient descent converge much more quickly.
# The function featureNormalize.m will
# - Subtract the mean value of each feature from the dataset.
# - After subtracting the mean, additionally scale (divide) the feature values by their respective “standard deviations.”
#
# The standard deviation is a way of measuring how much variation there is in the range of values of a particular feature (most data points will lie within ±2 standard deviations of the mean); this is an alternative to taking the range of values (max-min). Note that each column of the matrix X corresponds to one feature.
#
# **Implementation Note:** When normalizing the features, it is important to store the values used for normalization - the mean value and the standard deviation used for the computations. After learning the parameters from the model, we often want to predict the prices of houses we have not seen before. Given a new x value (living room area and number of bedrooms), we must first normalize x using the mean and standard deviation that we had previously computed from the training set.
#
# +
def featureNormalize(X):
"""
FEATURENORMALIZE Normalizes the features in X.
FEATURENORMALIZE(X) returns a normalized version of X where the mean value of each feature is 0 and the standard deviation is 1.
"""
mu = np.mean(X, axis=0)
sigma = np.std(X, axis=0)
X_norm = (X - mu) / sigma
return X_norm, mu, sigma
X_norm, mu, sigma = featureNormalize(X)
print(X_norm[:5])
print('mean =', mu)
print('SD =', sigma)
# -
# ## Gradient Descent
# Previously, you implemented gradient descent on a univariate regression problem. The only difference now is that there is one more feature in the matrix X. The hypothesis function and the batch gradient descent update rule remain unchanged. The code in `gradientDescentMulti` implement the cost function and gradient descent for linear regression with multiple variables. If your code in the previous part (single variable) already supports multiple variables, you can use it here too. Make sure your code supports any number of features and is well-vectorized. You can use `X.shape[1]` to find out how many features are present in the dataset
# +
def gradientDescentMulti(X, y, theta = None, alpha = 0.01, num_iters = 5000):
theta_norm = gradientDescent(paddedX_norm, y, theta, alpha, num_iters)
#theta = (theta_norm + mu) * sigma
return theta_norm
X_norm, mu, sigma = featureNormalize(X)
paddedX_norm = padOnes(X_norm)
gradientDescentMulti(paddedX_norm, y)
# -
# ## Comparing 3 ways to obtain θ
# +
# using gradient descent
print(gradientDescentMulti(padOnes(featureNormalize(X)[0]), y))
# using normalizing equation
print(normalEq(padOnes(X), y))
# using the sklearn library
model = LinearRegression().fit(X, y)
print(model.intercept_, model.coef_)
# -
# ## Optimizing the cost function using SciPy
# ### Using `optimize.fmin`
# Note that we will have to provide `fmin` with a function whose input is what we want to optimize (i.e. θ), and hence, will have to define a partial function.
# +
def cost(X, y):
def on(theta):
m = len(y)
costs = np.power(X.dot(theta) - y, 2)
return (sum(costs)) / (2*m)
return on
th0 = np.array([0.0, 0.0, 0.0])
X_padded = padOnes(X)
X_norm_padded = padOnes(featureNormalize(X)[0])
print(cost(X_padded, y)(th0))
print(cost(X_norm_padded, y)(th0))
cost(X_padded, y)
# -
from scipy import optimize
thiscost = cost(X_padded, y)
optimize.fmin(thiscost, th0)
thiscost_norm = cost(X_norm_padded, y)
optimize.fmin(thiscost_norm, th0)
# ### Using minimze with constrains
#
# We will use the `minimize` function from `SciPy` library.
thiscost = cost(X_padded, y)
# optimize with no constraints
optimize.minimize(thiscost, th0, method='SLSQP')
# +
thiscost = cost(X_padded, y)
# define a constraint where the intercept, ie theta0 is positive
# Equality constraint means that the constraint function result is to be zero
# whereas inequality means that it is to be non-negative.
con = {'type':'ineq', 'fun': lambda theta: theta[0]}
optimize.minimize(thiscost, th0, method='SLSQP', constraints=con)
# +
thiscost = cost(X_padded, y)
# define a constraint where the intercept, ie theta0 is negative
con = {'type':'ineq', 'fun': lambda theta: -theta[0]}
optimize.minimize(thiscost, th0, method='SLSQP', constraints=con)
| Machine Learning - Coursera/machine-learning-ex1/ex1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 재귀
# - 하나의 문제를 기본 단계와 재귀 단계로 나눔
# - 분할 정복법에서 이 개념을 사용해 문제를 품
# 수도 코드
def look_for_key(box):
for item in box:
if item.is_a_box():
look_for_key(item) # <= Recursive
elif item.is_a_key():
print("열쇠 찾음!")
import time
def count_down(i):
print(i)
time.sleep(i)
if i <= 1:
return
else:
count_down(i-1)
count_down(3)
# ### 스택
# - Push : 가장 위에 새 항목을 추가
# - Pop : 가장 위의 항목을 떼어내고 읽음
# - 포스트잇!
# - 재귀 함수에서 호출 스택이 사용
# - 모든 정보를 저장해야 해서 메모리를 많이 소비
# - 함수 호출할 때마다 메모리 사용
# - 스택이 너무 커질 경우
# - 재귀 대신 반복문을 써서 코드 리팩토링
# - 꼬리 재귀(tail recursion)라는 방법을 사용
def fact(x):
if x==1:
return 1
else:
return x*fact(x-1)
fact(3)
| Grokking-Algorithms/03.recursive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import json
import pandas as pd
from bs4 import BeautifulSoup
# +
# # !wget https://raw.githubusercontent.com/huseinzol05/malay-dataset/master/dictionary/dialect/johor.json
# -
with open('johor.json') as fopen:
johor = json.load(fopen)
# +
res = []
for j in johor:
s = BeautifulSoup(j)
s.tbody.table.decompose()
table_rows = s.tbody.find_all('tr')
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text.strip() for tr in td if tr.text.strip()]
if len(row):
res.append(row)
# -
df = pd.DataFrame(res)
df
df.iloc[0,1]
import malaya
columns = [1, 4, 5]
results = {}
for c in columns:
for i in range(len(df)):
try:
stripped = df.iloc[i, c].strip()
if stripped[0] == '[' and stripped[-1] == ']':
before = df.iloc[i, c - 1].strip()
splitted = malaya.text.function.split_into_sentences(before, minimum_length = 4)
if len(splitted) > 1:
stripped_splitted = stripped.split('] [')[0]
before = splitted[0]
stripped = stripped_splitted + ']'
results[before] = stripped[1:-1]
except Exception as e:
print(e)
malaya.text.function.split_into_sentences('1. Lohe bena suwanye sampai nampak tak kemas. 2. Buwang saje barang yang lohe tu, beli yang baru.', minimum_length = 4)
results
with open('johor-phoneme.json', 'w') as fopen:
json.dump(results, fopen)
| phoneme/parse-johor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="zvI35BjxZiR_"
# # Decision Tree Classification with Robust Scalar
#
#
# -
# This Code template is for the Classification task using simple DecisionTreeClassifier based on the Classification and Regression Trees algorithm along with Feature scaling technique RobustScaler.
# ### Required Packages
# + id="OedHQBiYZiSA"
# !pip install imblearn
# + colab={"base_uri": "https://localhost:8080/"} id="10vyTUoMZiSB" outputId="61bce29b-fc29-403a-ca53-cb0b495f92e6"
import numpy as np
import pandas as pd
import seaborn as se
import warnings
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from sklearn.tree import DecisionTreeClassifier,plot_tree
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
# + [markdown] id="aiAMTA_xZiSB"
# ### Initialization
#
# Filepath of CSV file
# + id="WN_Jcs-1ZiSC"
file_path= ""
# + [markdown] id="xgJc2G1VZiSC"
# List of features which are required for model training .
# + id="6nRajCZRZiSC"
features=[]
# + [markdown] id="BIZmVzBjZiSD"
# Target feature for prediction.
# + id="MNQJ7614ZiSE"
target=''
# + [markdown] id="CEwzbf2VZiSE"
# ### Data Fetching
#
# Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
#
# We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="JsEPbAMIZiSF" outputId="202726d1-6aee-4d17-c763-4aa408178f5e"
df=pd.read_csv(file_path);
df.head()
# + [markdown] id="moC498R5ZiSF"
# ### Feature Selections
#
# It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
#
# We will assign all the required input features to X and target/outcome to Y.
# + id="VmHf-0JYZiSG"
X=df[features]
Y=df[target]
# + [markdown] id="54AwaONIZiSG"
# ### Data Preprocessing
#
# Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
# + id="XXyTDAKjZiSG"
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
# + [markdown] id="i_Zd49qaZiSH"
# Calling preprocessing functions on the feature and target set.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 220} id="IxtA_U7KZiSH" outputId="33ecd602-3666-48bf-d163-6de844bb13d6"
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
# + [markdown] id="JdnPBTukZiSH"
# #### Correlation Map
#
# In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="HvH0VL5zZiSH" outputId="2b8a42b4-ea18-483a-c1dd-be02d8085e07"
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
# + [markdown] id="IZBxOUyDZiSI"
# #### Distribution Of Target Variable
# + colab={"base_uri": "https://localhost:8080/", "height": 391} id="tmgpFBOTZiSI" outputId="ab07e631-9b42-4a3f-bed2-e84a0caf4a8b"
plt.figure(figsize = (10,6))
se.countplot(Y)
# + [markdown] id="KD4D7V3yZiSI"
# ### Data Splitting
#
# The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
# + id="jsm_hZOJZiSI"
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
# + [markdown] id="u34iJaFShNwY"
# ###Data Scaling
# This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set.
# + id="VWXkIQymdeGP"
scaler = RobustScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.fit_transform(x_test)
# + [markdown] id="u-xPodk5ZiSJ"
# #### Handling Target Imbalance
#
# The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.
#
# One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
# + id="LgZMl1m-ZiSJ"
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
# + [markdown] id="DeeOgVK3ZiSJ"
# ### Model
# Decision tree is the most powerful and popular tool for classification and prediction. A Decision tree is a flowchart like tree structure, where each internal node denotes a test on an attribute, each branch represents an outcome of the test, and each leaf node holds a outcome label.
#
# As with other classifiers, DecisionTreeClassifier takes as input two arrays: an array X, sparse or dense, of shape (n_samples, n_features) holding the training samples, and an array Y of integer values, shape (n_samples,), holding the class labels for the training samples.
# It is capable of both binary ([-1,1] or [0,1]) classification and multiclass ([0, …,K-1]) classification.
#
# #### Model Tuning Parameter
#
# > - criterion -> The function to measure the quality of a split. Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain.
#
# > - max_depth -> The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.
#
# > - max_leaf_nodes -> Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes.
#
# > - max_features -> The number of features to consider when looking for the best split: **{auto , sqrt, log2}**
# + colab={"base_uri": "https://localhost:8080/"} id="gQPu_E6ZZiSJ" outputId="0b595b3d-f644-4945-8535-febcc30eda8f"
model = DecisionTreeClassifier(random_state=123)
model.fit(x_train,y_train)
# + [markdown] id="I4a9275iZiSK"
# #### Model Accuracy
#
# score() method return the mean accuracy on the given test data and labels.
#
# In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
# + colab={"base_uri": "https://localhost:8080/"} id="v5cs4vMxZiSK" outputId="9e465ad3-9681-498b-bd73-af5d01a789d7"
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
# + [markdown] id="TMYvZztNZiSK"
# #### Confusion Matrix
#
# A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
# + [markdown] id="b83Zh1ZCZiSK"
# Plotting confusion matrix for the predicted values versus actual values.
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="eZftbS5tZiSK" outputId="3098edbd-b512-4cdf-fe69-faf7850c1c18"
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
# + [markdown] id="ROUA5DTYZiSL"
# #### Classification Report
# A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
#
# * where:
# - Precision:- Accuracy of positive predictions.
# - Recall:- Fraction of positives that were correctly identified.
# - f1-score:- percent of positive predictions were correct
# - support:- Support is the number of actual occurrences of the class in the specified dataset.
# + colab={"base_uri": "https://localhost:8080/"} id="8qKPD116ZiSL" outputId="175ccbfd-2be7-49c8-eaca-97164b0b90b8"
print(classification_report(y_test,model.predict(x_test)))
# + [markdown] id="55oj5aEUZiSL"
# #### Feature Importances.
#
# The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction.
# + colab={"base_uri": "https://localhost:8080/", "height": 405} id="-HmPPbrPZiSL" outputId="b18cdfd7-b20d-402b-d40f-a8abf53feabc"
plt.figure(figsize=(8,6))
n_features = len(X.columns)
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
# + [markdown] id="PUMS4b7WZiSM"
# #### Tree Plot
#
# Plot a decision tree.The visualization is fit automatically to the size of the axis. Use the figsize or dpi arguments of plt.figure to control the size of the rendering.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="vOEQaiqqZiSM" outputId="8758174d-523a-44da-986d-ca1316c9a9b7"
fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (3,3), dpi=400)
cls_target = [str(x) for x in pd.unique(y_train)]
cls_target.sort()
plot_tree(model,feature_names = X.columns, class_names=cls_target,filled = True)
fig.savefig('./tree.png')
# + [markdown] id="DFcUtcFxZiSM"
# #### Creator: <NAME>iga B , Github: [Profile - Iamgrootsh7](https://github.com/iamgrootsh7)
| Classification/Decision Tree/DecisionTreeClassifier_RobustScaler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Counting Sort
# ### Constraints:
# - There are No Negative and Floting Point Values
# - The range of values should be 0 <= a[i] <= K
# - Upper bound of K: K <= O(N) (N, 2N, 3N...) but not N^2
#
#
# +
def CountSort(arr):
N = len(arr)
K = get_K(arr) # If K is not given
K_arr = [0 for i in range(K + 1)]
for num in arr:
K_arr[num] += 1
for j in range(1, len(K_arr)):
K_arr[j] += K_arr[j-1]
result_arr = [None for i in range(N)]
for j in range(N -1 , -1, -1):
K_arr[arr[j]] -= 1
index = K_arr[arr[j]]
result_arr[index] = arr[j]
return result_arr
def get_K(arr):
N = len(arr)
maximum = arr[0]
for i in range(1, N-1):
maximum = max(maximum, arr[i])
return maximum
# -
# Test
arr = [1, 0, 2, 1, 0, 1, 1, 5, 6, 7, 5, 4, 2, 2, 0, 0, 1]
print(CountSort(arr))
| Algorithms/SearchingAndSorting/Counting_Sort.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py37
# language: python
# name: py37
# ---
# + executionInfo={"elapsed": 2990, "status": "ok", "timestamp": 1630956134659, "user": {"displayName": "\uc789\uc29d\ube59", "photoUrl": "", "userId": "01566298293687163523"}, "user_tz": -540} id="xCEi4wueLgkw"
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Conv1D, MaxPooling1D, Input,Flatten, Dropout
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.backend as kb
import warnings
warnings.filterwarnings(action='ignore')
import autokeras as ak
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
# + executionInfo={"elapsed": 4, "status": "ok", "timestamp": 1630956134660, "user": {"displayName": "\uc789\uc29d\ube59", "photoUrl": "", "userId": "01566298293687163523"}, "user_tz": -540} id="IS_hMwJjL-WK"
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 13424, "status": "ok", "timestamp": 1630956148081, "user": {"displayName": "\uc789\uc29d\ube59", "photoUrl": "", "userId": "01566298293687163523"}, "user_tz": -540} id="HPtB2hKwL_UO" outputId="37d7fc27-8803-4551-95e1-6001b291ca25"
def txtToDataframe(filename, flag1, flag2):
file = open(filename, 'r')
lines = file.readlines()
datas = []
for line in lines:
txt = line.replace(' ', ' ').lstrip().rstrip().replace(' ', ',')
data = txt.split(',')
datas.append(data)
df = pd.DataFrame(datas)
df.columns = ['AF3', 'F7', 'F3', 'FC5', 'T7', 'O1', 'O2', 'P8', 'T8', 'FC6', 'F4', 'F8', 'F8', 'AF4']
df['label1']=flag1
df['label2']=flag2
return df
def getData(src) :
file_list = os.listdir(src)
rating = pd.DataFrame(pd.read_csv(src+'ratings.txt'))
file_list.remove('ratings.txt')
dataList=[]
highList=[]
lowList=[]
print(rating.columns)
j=0
for i in rating['subject']:
if i<10:
num = str(0)+str(i)
else:
num = str(i)
dataList.append(txtToDataframe(src+'sub'+num+'_hi.txt', 1,rating['test'][j]))
dataList.append(txtToDataframe(src+'sub'+num+'_lo.txt', 0,rating['rest'][j]))
highList.append(txtToDataframe(src+'sub'+num+'_hi.txt', 1,rating['test'][j]))
lowList.append(txtToDataframe(src+'sub'+num+'_lo.txt', 0,rating['rest'][j]))
j+=1
return dataList, highList, lowList
src = './STEW Dataset/'
originalData, highData, lowData = getData(src)
# + colab={"base_uri": "https://localhost:8080/", "height": 439} executionInfo={"elapsed": 55280, "status": "ok", "timestamp": 1630956203359, "user": {"displayName": "\uc789\uc29d\ube59", "photoUrl": "", "userId": "01566298293687163523"}, "user_tz": -540} id="79KwWryLMkbG" outputId="3e330fbb-090c-4f52-8200-d1b38e1326e6"
mergedData = pd.concat([originalData[0],originalData[1]],ignore_index=True)
for i in range(2,len(originalData)):
mergedData = pd.concat([mergedData,originalData[i]],ignore_index=True)
mergedData = mergedData.apply(pd.to_numeric)
mergedData
# + executionInfo={"elapsed": 4, "status": "ok", "timestamp": 1630956203360, "user": {"displayName": "\uc789\uc29d\ube59", "photoUrl": "", "userId": "01566298293687163523"}, "user_tz": -540} id="J9-m1dXhMbqW"
label=mergedData['label1']
label2=mergedData['label2']
data=mergedData.drop(['label1','label2'],axis=1)
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaled = scaler.fit_transform(data)
data = pd.DataFrame(scaled, columns = data.columns, index=data.index)
data
# +
def windowing_dataset(data, label, window_size):
data_list = []
label_list = []
for i in range(0,len(data)//window_size,window_size):
data_list.append(np.array(data.iloc[i:i+window_size]))
label_list.append(np.array(label.iloc[i]))
return np.array(data_list), np.array(label_list)
dataList, labelList = windowing_dataset(data,label,10)
#labelList = to_categorical(labelList,9)
print(data.shape, label.shape, dataList.shape, labelList.shape)
# -
# Autokeras
import autokeras as ak
from tensorflow.keras.utils import plot_model
from datetime import datetime
X_train, X_test, Y_train, Y_test = train_test_split(dataList,labelList, train_size=0.7, random_state=True ,stratify = labelList)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
x_train, x_test, y_train, y_test = train_test_split(data,label, train_size=0.7, random_state=True ,stratify = label)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
trials=[20] #3,5,10,
for trial in trials:
clf_ = ak.ImageClassifier(overwrite=True, max_trials=trial)
clf_.fit(x=X_train, y=Y_train, epochs=20)
predicted_y = clf_.predict(X_test)
print(predicted_y)
loss, acc = clf_.evaluate(X_test, Y_test)
print(clf_.evaluate(X_test, Y_test))
print('Loss: %.3f Accuracy: %.3f' % (loss,acc))
model = clf_.export_model()
model.summary()
plot_model(model, show_shapes=True)
tmp = int(acc*100)
print(tmp)
model.save('model/WindowingLabel2AutoKeras_'+
str(datetime.now().strftime('%Y-%m-%d %H-%M-%S'))+' ACC_'+str(tmp)+'try.h5')
# Model1 + (Model2-1 + Model2-2)
# +
def windowing_dataset(data, label, label2, window_size):
data_list = []
label_list = []
label2_list = []
for i in range(0,len(data)//window_size,window_size):
data_list.append(np.array(data.iloc[i:i+window_size]))
label_list.append(np.array(label.iloc[i]))
label2_list.append(np.array(label2.iloc[i]))
return np.array(data_list), np.array(label_list), np.array(label2_list)
dataList, labelList, label2List = windowing_dataset(data,label,label2,10)
label2List = to_categorical(label2List,9)
print(data.shape, label.shape, dataList.shape, labelList.shape, label2List.shape)
# -
modelList=os.listdir('model/')
print(modelList)
def makeInputShape(data):
d=[]
d.append(dataList[i])
d=np.array(d)
return d
# +
model = tf.keras.models.load_model('model/WindowingAutoKeras_2021-09-11 02-37-32 ACC_98try.h5')
data2_1=[]
data2_2=[]
label2_1=[]
label2_2=[]
cnt = 0
for i in range(len(dataList)):
pred=model.predict(makeInputShape(dataList[i]))
if pred < 0.5: #rest
data2_1.append(dataList[i])
label2_1.append(label2List[i])
if labelList[i] != 0 :
#print('No: '+str(i)+', Count : ' + str(cnt)+', True : ' + str(labelList[i]) + ', Predict : ' + str(pred))
cnt+=1
else: #test
data2_2.append(dataList[i])
label2_2.append(label2List[i])
if labelList[i] != 1 :
#print('No: '+str(i)+', Count : ' + str(cnt) + ', True : ' + str(labelList[i]) + ', Predict : ' + str(pred))
cnt+=1
print("Loss: "+str(cnt/len(dataList)))
data2_1 = np.array(data2_1)
data2_2 = np.array(data2_2)
label2_1 = np.array(label2_1)
label2_2 = np.array(label2_2)
# +
X_train, X_test, Y_train, Y_test = train_test_split(data2_1,label2_1, train_size=0.7, random_state=True ,stratify = label2_1)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
trials=[3,5,10,20]
for trial in trials:
clf_ = ak.ImageClassifier(overwrite=True, max_trials=trial)
clf_.fit(x=X_train, y=Y_train, epochs=20)
predicted_y = clf_.predict(X_test)
print(predicted_y)
loss, acc = clf_.evaluate(X_test, Y_test)
print(clf_.evaluate(X_test, Y_test))
print('Loss: %.3f Accuracy: %.3f' % (loss,acc))
model = clf_.export_model()
model.summary()
plot_model(model, show_shapes=True)
tmp = int(acc*100)
print(tmp)
model.save('model/model2_1AutoKeras_'+
str(datetime.now().strftime('%Y-%m-%d %H-%M-%S'))+' ACC_'+str(tmp)+'try.h5')
# +
X_train, X_test, Y_train, Y_test = train_test_split(data2_2,label2_2, train_size=0.7, random_state=True ,stratify = label2_2)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
trials=[3,5,10,20]
for trial in trials:
clf_ = ak.ImageClassifier(overwrite=True, max_trials=trial)
clf_.fit(x=X_train, y=Y_train, epochs=20)
predicted_y = clf_.predict(X_test)
print(predicted_y)
loss, acc = clf_.evaluate(X_test, Y_test)
print(clf_.evaluate(X_test, Y_test))
print('Loss: %.3f Accuracy: %.3f' % (loss,acc))
model = clf_.export_model()
model.summary()
plot_model(model, show_shapes=True)
tmp = int(acc*100)
print(tmp)
model.save('model/model2_2AutoKeras_'+
str(datetime.now().strftime('%Y-%m-%d %H-%M-%S'))+' ACC_'+str(tmp)+'try.h5')
# + [markdown] id="4XCOOarcj7B_"
# 모델 shape확인
# +
modelName=['model1','model2_1','model2_2']
modelList=[]
for name in modelName:
modelList.append(tf.keras.models.load_model('model/'+name+'.h5'))
for i in range(len(modelList)):
print(modelName[i])
model = modelList[i]
model.summary()
# +
modelName=['model1','model2_1','model2_2']
modelList=[]
for name in modelName:
modelList.append(tf.keras.models.load_model('model/'+name+'.h5'))
for i in range(len(modelList)):
print(modelName[i])
model = modelList[i]
model.summary()
| EEG/model/STEW_autoKeras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Python performance optimization
# + [markdown] slideshow={"slide_type": "slide"}
# ## Membership testing is faster in dict than in list.
#
# Python dictionaries use hash tables, this means that a lookup operation (e.g., if x in y) is O(1). A lookup operation in a list means that the entire list needs to be iterated, resulting in O(n) for a list of length n. http://www.clips.ua.ac.be/tutorials/python-performance-optimization
# + slideshow={"slide_type": "subslide"}
import timeit
def test_ifin(d):
if 5000 in d:
a = 1
else:
a = 2
d1 = dict.fromkeys(range(10000), True)
d2 = range(10000)
# + slideshow={"slide_type": "subslide"}
print (timeit.timeit(lambda: test_ifin(d1), number=1000))
print (timeit.timeit(lambda: test_ifin(d2), number=1000))
# -
| code/Python performance optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# +
import os, types
import pandas as pd
from botocore.client import Config
import ibm_boto3
def __iter__(self): return 0
# @hidden_cell
# The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials.
# You might want to remove those credentials before you share the notebook.
if os.environ.get('RUNTIME_ENV_LOCATION_TYPE') == 'external':
endpoint_e10620ca820f4d14b1736291cd652394 = 'https://s3.us.cloud-object-storage.appdomain.cloud'
else:
endpoint_e10620ca820f4d14b1736291cd652394 = 'https://s3.us.cloud-object-storage.appdomain.cloud'
client_e10620ca820f4d14b1736291cd652394 = ibm_boto3.client(service_name='s3',
ibm_api_key_id='<KEY>',
ibm_auth_endpoint="https://iam.cloud.ibm.com/oidc/token",
config=Config(signature_version='oauth'),
endpoint_url=endpoint_e10620ca820f4d14b1736291cd652394)
body = client_e10620ca820f4d14b1736291cd652394.get_object(Bucket='<KEY>',Key='zomato.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df = pd.read_csv(body)
df.head()
# -
rows = df.shape[0]
columns = df.shape[1]
print("Rows : "+str(rows)+" Columns : "+str(columns))
# Having 51717 rows doesn't mean that there are these much unique restaurants in the city.
df['name'].value_counts()
# There are 8792 unique restaurants.
df['name'].value_counts()[:10]
df_temp = df['name'].value_counts()[:10]
sns.barplot(x = df_temp , y = df_temp.index)
del df['url']
del df['address']
del df['phone']
del df['reviews_list']
del df['menu_item']
df['rate'].value_counts()
df['rate'].isnull().sum()
# Extract rating as number from
df['rating'] = df['rate'].str.split('/',n=2,expand=True)[0]
df = df.drop('rate',axis=1)
del df['location']
df.isnull().sum()
df = df.rename(columns = {"approx_cost(for two people)" : "avg_cost", "listed_in(type)" : "meal_type",
"listed_in(city)" : "city"})
df.head()
# Convert data type of approx cost from character to numeric
df['avg_cost'] = df['avg_cost'].str.replace(',','').apply(lambda x:float(x))
# make non numeric values as null
df['rating'] = df['rating'].apply(lambda x: None if x in ('NEW',None,'-') else float(x))
# Lets look at distribution of Continues variables
fig = plt.figure(figsize=(14,10))
ax1 = fig.add_subplot(3,2,1)
ax2 = fig.add_subplot(3,2,2)
sns.distplot(df['votes'],ax=ax1)
sns.boxplot(df['votes'],ax=ax2)
ax3 = fig.add_subplot(3,2,3)
ax4 = fig.add_subplot(3,2,4)
sns.distplot(df['avg_cost'],ax=ax3)
sns.boxplot(df['avg_cost'],ax=ax4)
ax5 = fig.add_subplot(3,2,5)
ax6 = fig.add_subplot(3,2,6)
sns.distplot(df['rating'],ax=ax5)
sns.boxplot(df['rating'],ax=ax6)
sns.set_context("paper", font_scale = 2, rc = {"font.size": 20,"axes.titlesize": 25,"axes.labelsize": 20})
sns.catplot(data = df, kind = 'count', x = 'online_order')
plt.title('Number of restaurants that take order online')
plt.show()
# +
df_temp = df['online_order'].value_counts()
plt.pie(df_temp, labels=df_temp.index)
plt.title('Overall online order service')
plt.show()
popular_restaurant = df['name'].value_counts()[:20].index
df_popular = df[df['name'].isin(popular_restaurant)]
df_temp = df_popular['online_order'].value_counts()
plt.pie(df_temp, labels=df_temp.index)
plt.title('Scenario for top 20 restaurants')
plt.show()
# -
sns.set_context("paper", font_scale = 2, rc = {"font.size": 20, "axes.titlesize": 25, "axes.labelsize": 20})
sns.catplot(data = df, kind = 'count', x = 'book_table')
plt.title('Number of restaurants that have the option to book table')
plt.show()
sns.set_context("paper", font_scale = 0.8, rc = {"font.size": 20,"axes.titlesize": 25,"axes.labelsize": 20})
sns.catplot(data = df, kind = 'count', x = 'meal_type')
plt.title('Number of restaurants according to meal type')
plt.show()
sns.set_context("paper", font_scale = 1, rc={"font.size": 20,"axes.titlesize": 25,"axes.labelsize": 20})
b = sns.catplot(data = df, kind = 'count', x = 'city')
plt.title('Number of restaurants in each area')
b.set_xticklabels(rotation = 90)
plt.show()
sns.set_context("paper", font_scale = 0.8, rc = {"font.size": 20,"axes.titlesize": 25,"axes.labelsize": 15})
b = sns.catplot(data = df, kind = 'count', x = 'rating', order = df['rating'].value_counts().head(30).index)
plt.title('Number of restaurants for each rating')
b.set_xticklabels(rotation = 90)
plt.show()
sns.set_context("paper", font_scale = 1.2, rc = {"font.size": 20,"axes.titlesize": 25,"axes.labelsize": 20})
b = sns.catplot(data = df, kind = 'count', x = 'rest_type', order = df['rest_type'].value_counts().head(30).index)
plt.title('Number of restaurants for each type')
b.set_xticklabels(rotation = 90)
plt.show()
df[['votes', 'name']].groupby(['name']).median().sort_values("votes", ascending = False).head(30).plot.bar()
plt.title('Bar plot of votes vs names')
plt.show()
# +
# finding the details of the most voted restaurant
b=[]
a = df.iloc[:, :].values
for i in range(0, len(a)):
if a[i][0] == 'Byg Brewski Brewing Company' or a[i][0] == 'Byg Brewski Brewing Company ' or a[i][0] == ' Byg Brewski Brewing Company':
b.append(list(a[i]))
b = pd.DataFrame(b)
b.columns = df.columns
b
# +
# plotting the top 10 dishes liked by people
a = df.iloc[:, :].values
for i in range(0, len(a)):
if a[i, 6] == 'Friendly Staff':
#print(a[i, 6])
a[i, 6] = None
a = pd.DataFrame(a)
a.columns = df.columns
sns.set_context("paper", font_scale = 2, rc = {"font.size": 20,"axes.titlesize": 25,"axes.labelsize": 20})
b = sns.countplot(data = a, x = 'dish_liked', order = a.dish_liked.value_counts().iloc[: 10].index)
b.set_xticklabels(b.get_xticklabels(),rotation = 90)
plt.title('Top 10 dishes liked by people of Bangalore')
plt.show()
# -
# !pip install wordcloud
#This is an example of Casual Dining!
df_temp = df[df['rest_type'] == 'Casual Dining']
df_temp.head()
words = list(df_temp['dish_liked'])
word_cloud = []
for i in words:
if(type(i) == str):
temp = i.split(',')
for i in temp:
i=" ".join(i.split())
word_cloud.append(i)
import matplotlib.pyplot as plt
from wordcloud import WordCloud
#convert list to string and generate
unique_string=(" ").join(word_cloud)
wordcloud = WordCloud(width = 1000, height = 500).generate(unique_string)
plt.figure(figsize=(15,8))
plt.imshow(wordcloud)
plt.axis("off")
plt.savefig("your_file_name"+".png", bbox_inches='tight')
plt.show()
plt.close()
| DataAnalytics/Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from utils import utils
from utils import scale_by_scale_optim
from utils import agent_based_simulation_on_grid
import random
import numpy as np
import time
# %pylab inline
prop_cycle = plt.rcParams['axes.prop_cycle']
# # Simulation example
# +
### Generate parameters
L = 3
comb = [random.randint(20,100) for i in range(L)]
c0 = random.random()*2
c1 = random.random()*2
d = random.random()*2
### Run simulation
model = agent_based_simulation_on_grid.ContainerModel(comb, c0, c1, d)
model.run_simulation(1000)
x,y= zip(*model.positions)
# +
### Visualize
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
output_notebook()
#Colors of transitions
colors = [i['color'] for i in list(prop_cycle)]
transitions = utils.get_scale_labels(model.positions_scales)
tools = "wheel_zoom, pan"
p = figure(tools=tools)
p.scatter(x,y, alpha=0.5, color = 'black')
xs = [[i,i2] for (i,i2) in zip(x[:-1],x[1:])]
ys = [[j,j2] for (j,j2) in zip(y[:-1],y[1:])]
p.multi_line(xs,ys, alpha=0.5, color = [colors[i-1] for i in transitions])
show(p)
# -
# # Evaluate fitting routine
# +
def simulate_fit_evaluate():
random.seed(int(time.time()))
#######################
#### Simulate #######
#######################
#Generate parameters
L = 3
comb = [random.randint(20,100) for i in range(L)]
c0 = random.random()*2
c1 = random.random()*2
d = random.random()*2
#Run simulation
model = agent_based_simulation_on_grid.ContainerModel(comb, c0, c1, d)
model.run_simulation(1000)
x,y= zip(*model.positions)
#######################
#### Fit #######
#######################
stop_locations = np.unique(model.positions, axis = 0)
locs = dict(zip([tuple(i) for i in stop_locations], range(len(stop_locations))))
labels = [locs[i] for i in model.positions]
optimization = scale_by_scale_optim.ScalesOptim(labels, stop_locations, verbose = False, distance_func=utils.euclidean, bootstrap = True)
res = optimization.find_best_scale()
alphas = np.median(list(res[-1].values()),axis = 0)
#######################
#### Evaluate #######
#######################
#Compute correlation
corr = utils.cophenetic_correlation(model.positions_scales, res[0])
#Compute number of scales
n_s = len(res[0][0]) - L
#Compute difference in p
if (len(alphas)-1)==len(model.bernullis):
diff = np.abs(alphas[:-1] - model.bernullis)
else:
diff = np.nan
return(corr, n_s, diff)
# -
from joblib import Parallel, delayed
res4 = Parallel(n_jobs=2)(delayed(simulate_fit_evaluate)() for i in range(100))
corr, n_s, diff = zip(*res4)
#Cophenetic correlation
plt.figure()
plt.hist([i[0] for i in corr])
plt.xlabel('cophenetic correlation')
plt.ylabel('counts')
plt.show()
#Number of levels
plt.figure()
plt.hist([i for i in n_s])
plt.xlabel(r'$n_{original} - n_{recovered}$')
plt.ylabel('counts')
plt.show()
# +
#absolute distance between p recovered and p original
plt.figure()
plt.hist([i.mean() for i in diff if not type(i)==np.float], bins = 50)
plt.xlabel(r'$|p_{original} - p_{recovered}$|')
plt.ylabel('counts')
plt.show()
# -
| 3_Generate_data_on_a_grid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A.1. Data Curation
# ## <NAME>
#
# The necessary imports are below
# +
import sys
import json
import requests
import numpy as np
import pandas as pd
import datetime as dt
from collections import defaultdict
# %matplotlib inline
import matplotlib.pyplot as plt
# -
# # Step 1: Data Aquisition
#
# We use two Wikimedia API endpoints to access the data we need:
# 1. https://wikimedia.org/api/rest_v1/#!/Pagecounts_data_(legacy)/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end
# 2. https://wikimedia.org/api/rest_v1/#!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end
# +
endpoint_legacy = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}'
endpoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
# -
# ## Retrieving Legacy Data
# +
desktop_params_legacy = {"project" : "en.wikipedia.org",
"access-site" : "desktop-site",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : "2021100100"
}
mobile_params_legacy = {"project" : "en.wikipedia.org",
"access-site" : "mobile-site",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : "2021100100"
}
# -
# ## Retrieving Pageview Data
# +
desktop_params_pageviews = {"project" : "en.wikipedia.org",
"access" : "desktop",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2021101000'
}
mobile_app_params_pageviews = {"project" : "en.wikipedia.org",
"access" : "mobile-app",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2021101000'
}
mobile_web_params_pageviews = {"project" : "en.wikipedia.org",
"access" : "mobile-web",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2021101000'
}
# -
# ## Creating the Call to Pull the Data
# ### Then calling the function
# Customize these with your own information
headers = {
'User-Agent': 'https://github.com/ams884',
'From': '<EMAIL>'
}
def api_call(endpoint,parameters):
call = requests.get(endpoint.format(**parameters), headers=headers)
response = call.json()
return response
desktop_params_monthly_pageviews = api_call(endpoint_pageviews, desktop_params_pageviews)
mobile_app_params_monthly_pageviews = api_call(endpoint_pageviews, mobile_app_params_pageviews)
mobile_web_params_monthly_pageviews = api_call(endpoint_pageviews, mobile_web_params_pageviews)
desktop_monthly_legacy = api_call(endpoint_legacy, desktop_params_legacy)
mobile_monthly_legacy = api_call(endpoint_legacy, mobile_params_legacy)
# ## Saving the data in JSON files
# +
# Parse JSON
with open('pagecounts_desktop-site_200712-202108.json', 'w') as outfile:
json.dump(desktop_monthly_legacy, outfile)
# Parse JSON
with open('pagecounts_mobile-site_200712-202108.json', 'w') as outfile:
json.dump(mobile_monthly_legacy, outfile)
# Parse JSON
with open('pageviews_desktop_201507-202109.json', 'w') as outfile:
json.dump(desktop_params_monthly_pageviews, outfile)
# Parse JSON
with open('pageviews_mobile-app_201507-202109.json', 'w') as outfile:
json.dump(mobile_app_params_monthly_pageviews, outfile)
# Parse JSON
with open('pageviews_mobile-web_201507-202109.json', 'w') as outfile:
json.dump(mobile_web_params_monthly_pageviews, outfile)
# -
# # Step 2: Data Processing
# Now we need to clean the data to make it functional for analysis.
# First, create default, dictionaries that we will add to
# +
mobile_monthly_pageviews = defaultdict(int)
desktop_monthly_pageviews = defaultdict(int)
mobile_monthly_pagecounts = defaultdict(int)
desktop_monthly_pagecounts = defaultdict(int)
# -
# Set up functions that can then grab the month and year pieces of the timestamp
# +
def yyyy(timestamp):
return int(timestamp[:4])
def mm(timestamp):
return int(timestamp[4:6])
def yyyymm(timestamp):
return str(yyyy(timestamp)) + str(mm(timestamp)).rjust(2, "0")
# -
# The next two functions are used as followed:
# 1. add_counts aggregates the views so that they are grouped by YYYYMM
# 2. make_df converts these dictionaries into dataframes
# +
def add_counts(st_dict, fin_dict):
for item in st_dict['items']:
key = yyyymm(item['timestamp'])
if 'views' in item:
fin_dict[key] += item['views']
else:
fin_dict[key] += item['count']
def make_df(dicty, valkey):
return pd.DataFrame({'yyyymm': list(dicty.keys()), valkey: list(dicty.values())})
# -
# Now we use the first function to begin adding to the blank dictionaries
# Note: We want to combine mobile app views with mobile web views into the same dictionary
# +
add_counts(mobile_app_params_monthly_pageviews, mobile_monthly_pageviews)
add_counts(mobile_web_params_monthly_pageviews, mobile_monthly_pageviews)
add_counts(desktop_params_monthly_pageviews, desktop_monthly_pageviews)
add_counts(mobile_monthly_legacy, mobile_monthly_pagecounts)
add_counts(desktop_monthly_legacy, desktop_monthly_pagecounts)
# -
# We now want to turn these updated dictionaries into dataframes
# +
pageview_mobile = make_df(mobile_monthly_pageviews, 'pageview_mobile_views')
pagecount_mobile = make_df(mobile_monthly_pagecounts, 'pagecount_mobile_views')
pageview_desktop = make_df(desktop_monthly_pageviews, 'pageview_desktop_views')
pagecount_desktop = make_df(desktop_monthly_pagecounts, 'pagecount_desktop_views')
# -
# Since it would be best if all the data is located in one file, we will merge the previous dataframes into one master df
# +
df = pageview_mobile.merge(pagecount_mobile, on='yyyymm', how='outer')
df = df.merge(pageview_desktop, on='yyyymm', how='outer')
df = df.merge(pagecount_desktop, on='yyyymm', how='outer')
# -
# We now have to do the following steps:
# 1. Create new columns for an accumulation of views and counts
# 2. Separate the timestamp into Month and Year
# 3. Drop the timestamp column
# +
df['pagecount_all_views'] = df['pagecount_mobile_views'] + df['pagecount_desktop_views']
df['pageview_all_views'] = df['pageview_mobile_views'] + df['pageview_desktop_views']
df['year'] = df.yyyymm.apply(str).apply(yyyy)
df['month'] = df.yyyymm.apply(str).apply(mm)
df.drop('yyyymm', axis=1, inplace=True)
# -
# Finally we can write out the master df into a single csv file
df.to_csv('en-wikipedia_traffic_200712-202108.csv', index=False)
# # Step 3: Data Analysis
# Our final step is analyzing the data. We will do this by visualizing the dataset as a time series graph.
# We want to track mobile, desktop, and all traffic. Since we are looking at both counts and views, there will be 6 lines to keep track of on the graph. On top of that, the timelines for each api endpoint are different, but there is an overlapping year of data.
# We will now read in the same csv file we just created
df = pd.read_csv('en-wikipedia_traffic_200712-202108.csv')
df = df.replace(to_replace=0, value=np.nan)
df['date'] = df.apply(lambda r: dt.datetime(int(r.year), int(r.month), 1), axis=1)
df = df.sort_values('date')
# Now we will plot the graph we want. This will involve:
# 1. Plotting both the pageviews and pagecounts
# 2. Creating a Legend for the different lines that will be on the graph
# 3. Formatting the graph so that is has proper labels, titles, sizing, etc.
# +
# Create a plot
plt.figure(figsize=(18, 8))
# Plotting the Page Counts
plt.plot(df.date, df.pagecount_all_views)
plt.plot(df.date, df.pagecount_mobile_views)
plt.plot(df.date, df.pagecount_desktop_views)
# Plotting the Page Views
plt.plot(df.date, df.pageview_all_views)
plt.plot(df.date, df.pageview_mobile_views)
plt.plot(df.date, df.pageview_desktop_views)
# Create a legend for the different lines that will be displayed
plt.legend(["PageCount All", "PageCount Mobile", "PageCount Desktop", "PageView All", "PageView Mobile", "PageView Desktop"])
# Formatting the time series graph
plt.xlabel("Year")
plt.ylabel("View Count")
plt.title("Wikimedia Page Visits from 2007 to 2021")
plt.grid()
# -
| hcds-a1-data-curation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data insight with
# 
#
#
#
#
# __<NAME>__
# _<EMAIL>_
# + [markdown] slideshow={"slide_type": "skip"}
# <img src="http://clp.no/wp-content/uploads/2017/10/webstep-727x409.jpg" alt="Drawing" style="width: 200px; align:left;"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## But first..
# + [markdown] slideshow={"slide_type": "fragment"}
# <center><img src="https://jupyter.org/assets/main-logo.svg" alt="Drawing" style="width: 300px;"/></center>
# + slideshow={"slide_type": "skip"}
from IPython.display import Image
Image(url='http://clp.no/wp-content/uploads/2017/10/webstep-727x409.jpg')
# + slideshow={"slide_type": "skip"}
import pandas as pd
# ?pd.DataFrame
# + slideshow={"slide_type": "skip"}
# %lsmagic
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Latex
# + [markdown] slideshow={"slide_type": "-"}
# \\( P(A \mid B) = \frac{P(B \mid A) \, P(A)}{P(B)} \\)
# + [markdown] slideshow={"slide_type": "fragment"}
# ### HTML
# + slideshow={"slide_type": "-"} language="html"
#
# <h1>I'm a header</h1>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Shell commands
# + slideshow={"slide_type": "-"}
# !ls *.csv
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Other Jupyter kernels
#
# - Javascript
# - nodejs
# - Ruby
# - Perl
# - Fortran
# - Go
# - Lua
# - Scala
# - Kotlin
# - Matlab
# - R
# - Brainfuck
#
# + [markdown] slideshow={"slide_type": "skip"}
# L : toggle line numbers
#
# O : toggle output
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Generate the slides and serve them using nbconvert:
#
# `!jupyter nbconvert --to slides 1_introduction.ipynb --post serve`
| 1_introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Procedure
# There are many approaches to apply a sentiment analysis on text. Some are more common like for example dictionnary approaches (dict containing + and - words),
# or more recent and state of the art techniques like deeplearning methods.
# In the next session I will describe some and choose 2 of them.
# One very interessing challenge is that we are dealing with multilingual data. We cannot create a model for each of the language for many reasons:
# - Some languages lack of data for traing models
# - It will take a lot of time to create all of them
# - Too difficult to maintain
# So we have 2 solutions:
# 1. Create a multilingual model that is support all the languages.
# 1. Translate all non-english text to english (machine translation is something that we know how to do well) and then apply any english sentiment analysis model.
# As I have currently no access to an API that can translate 20k+ texts in a reasonable amount of time, i did not choose this option.
# However I descibed below the procedure I would have done if I had time.
# 1. Clean text
# 1. Detect language
# 1. Translate to english
# 1. Tokenize the data (words)
# 1. Remove the stop words (words that does not carry any useful information for example pronouns, ...), it allows to keep only relevent words
# 1. Normalize the text (stemming/lemmatization)
# 1. Vectorize data (BOW, TF-IDF)
# 1. Train a machine learning model
# I could also use a pretrained english model like bert and fine-tune it on the dataset
# ## Solutions I choose
# The solution I whave chosen for doing my experiments is to train/fine-tune already-existing deep learning models as many of them are available online and support multilinguism for example :
# - bert-base-multilingual-cased
# - xlm-roberta-base
# They are transformer models pretrained on a lot of languages, we'll import them and fine-tume using the Huggingface library since it is the fastest and easiest way to be ready with NLP models.
# ## My two approaches
# ### Classification
# There are two approaches I decided to experiment to train my model.
# The first one is to treat this problem as a classification. The model will be fine tuned to predict 3 classes :
# - Neutral
# - Positive
# - Negative
#
# ### Regression
# A second approach I want to try is to do a regression prior to classify to the final labels.
# My idea was to no consider our 3 sentiments as independant but placed on a scale between 0 and 1 where 0 is Negative, 0.5 Neutral and 1 Positive.
# And then calculating the best lower and higher bounds for the neutral class in order to predict the 3 classes.
#
# My hypothesis was the following
# - Allows better gradients since Positive should be further away from Negative than from Neutral
# - May reduce the rate of complete missclassification (ie. classifying a Positive as Negative or a Negative as Positive) and classifying more as Neutral (makes more sense)
# - Maybe improve the overall results (accuracy)
# ## Training phase
# To train my models, I decided to use google colab since it provides free GPUs, it allows to train bigger models faster.
# However, to reduce GPU memory usage, I had to set mixed presision to true hence sacrifying some performance.
| notebooks/2_procedure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import psycopg2
import psycopg2.extras
import numpy as np
import pandas as pd
# +
conn = psycopg2.connect(dbname="nyc_taxi_data", user="postgres", password="<PASSWORD>")
cur = conn.cursor( )
# -
addr_df=pd.read_csv('dataset/yellow_tripdata_2020-01.csv', low_memory=False)
addr_df.reset_index(drop=True,inplace=True)
addr_df.head()
# addr_df_=addr_df.insert(0, 'id', range(1, 1 + len(addr_df)))
def create_staging_table(cursor):
cursor.execute("""
DROP TABLE IF EXISTS nyc_cab_data CASCADE;
CREATE UNLOGGED TABLE nyc_cab_data (
vendor_id INTEGER,
tpep_pickup_datetime TIMESTAMP,
tpep_dropoff_datetime TIMESTAMP,
passenger_count DECIMAL,
trip_distance DECIMAL,
RatecodeID DECIMAL,
store_and_fwd_flag TEXT,
PULocationID INTEGER,
DOLocationID INTEGER,
payment_type DECIMAL,
fare_amount DECIMAL,
extra DECIMAL,
mta_tax DECIMAL,
tip_amount DECIMAL,
tolls_amount DECIMAL,
improvement_surcharge DECIMAL,
total_amount DECIMAL,
congestion_surcharge DECIMAL
);""")
with conn.cursor() as cursor:
create_staging_table(cursor)
# +
def send_csv_to_psql(connection,csv,table_):
sql = "COPY %s FROM STDIN WITH CSV HEADER DELIMITER AS ','"
file = open(csv, "r")
table = table_
with connection.cursor() as cur:
cur.execute("truncate " + table + ";")
cur.copy_expert(sql=sql % table, file=file)
connection.commit()
return connection.commit()
send_csv_to_psql(conn,'dataset/yellow_tripdata_2020-01.csv','nyc_cab_data')
# -
def create_id(connection, sql, table_):
sql = "ALTER TABLE public.nyc_cab_data ADD COLUMN id SERIAL PRIMARY KEY;"
table = table_
with connection.cursor() as cur:
cur.execute(sql)
connection.commit()
return connection.commit()
create_id(conn,"ALTER TABLE public.nyc_cab_data ADD COLUMN id SERIAL PRIMARY KEY;",'nyc_cab_data')
sql_="SELECT COUNT(*) FROM nyc_cab_data LIMIT 10;"
cur.execute(sql_)
cur.fetchone()
cur.execute("SELECT * FROM nyc_cab_data LIMIT 10")
cur.fetchall()
| dataset_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="rDUdNeNd6xle"
# Import data from Excel sheet
import pandas as pd
import pandas as pd
df = pd.read_excel('ADNI combined.xlsx', sheet_name='sample')
#print(df)
sid = df['RID']
grp = df['Group at scan date (1=CN, 2=EMCI, 3=LMCI, 4=AD, 5=SMC)']
age = df['Age at scan']
sex = df['Sex (1=female)']
tiv = df['TIV_CAT12']
field = df['MRI_Field_Strength']
grpbin = (grp > 1) # 1=CN, ...
amybin = df['SUMMARYSUVR_WHOLECEREBNORM_1.11CUTOFF']
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="Oi75gi2z9JIS" outputId="3df595b4-66cc-45e0-b162-755e20aa2c9c"
# Scan for nifti file names
import glob
dataADNI3 = sorted(glob.glob('mwp1_MNI/ADNI3/*.nii.gz'))
dataFiles = dataADNI3
numfiles = len(dataFiles)
print('Found ', str(numfiles), ' nifti files')
# + colab={"base_uri": "https://localhost:8080/", "height": 55} colab_type="code" id="Bgn7LbCLbXND" outputId="c78ba415-13f4-4a9c-d3d1-969975e51cf3"
# Match covariate information
import re
import numpy as np
from pandas import DataFrame
from keras.utils import to_categorical
debug = False
cov_idx = [-1] * numfiles # list; array: np.full((numfiles, 1), -1, dtype=int)
print('Matching covariates for loaded files ...')
for i,id in enumerate(sid):
p = [j for j,x in enumerate(dataFiles) if re.search('_%04d_' % id, x)] # extract ID numbers from filename, translate to Excel row index
if len(p)==0:
if debug: print('Did not find %04d' % id) # did not find Excel sheet subject ID in loaded file selection
else:
if debug: print('Found %04d in %s: %s' % (id, p[0], dataFiles[p[0]]))
cov_idx[p[0]] = i # store Excel index i for data file index p[0]
print('Checking for scans not found in Excel sheet: ', sum(x<0 for x in cov_idx))
labels = pd.DataFrame({'Group':grpbin}).iloc[cov_idx, :]
labels = to_categorical(np.asarray(labels)) # use grps to access original labels
grps = pd.DataFrame({'Group':grp, 'RID':sid}).iloc[cov_idx, :]
amy_status = pd.DataFrame({'amy_pos':amybin}).iloc[cov_idx, :]
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="4KH0CEUJfcey" outputId="31a78006-0d93-48ca-b668-6b6970e16c2a"
# Load original data from disk
import h5py
hf = h5py.File('orig_images_ADNI3_wb_mwp1_CAT12_MNI.hdf5', 'r')
hf.keys # read keys
images = np.array(hf.get('images'))
hf.close()
print(images.shape)
# -
# determine amyloid status
amy_filter = np.equal(np.transpose(amy_status.to_numpy()), labels[:,1])
# filter index vector by amy status
test_idX = np.array(range(amy_filter.shape[1]))[np.squeeze(amy_filter)]
grps = grps.iloc[test_idX, :]
images = images[test_idX, :]
labels = labels[test_idX, :]
print(images.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="XJliKsql0adU" outputId="3abff222-03f2-42ba-ae71-86c8a0b7b17f"
# specify version of tensorflow
# #%tensorflow_version 1.x # <- use this for Google colab
import tensorflow as tf
# downgrade to specific version
# #!pip install tensorflow-gpu==1.15
#import tensorflow as tf
print(tf.__version__)
# disable tensorflow deprecation warnings
import logging
logging.getLogger('tensorflow').disabled=True
# -
# helper function to obtain performance result values
def get_values(conf_matrix):
assert conf_matrix.shape==(2,2)
tn, fp, fn, tp = conf_matrix.ravel()
sen = tp / (tp+fn)
spec = tn / (fp+tn)
ppv = tp / (tp+fp)
npv = tn / (tn+fn)
f1 = 2 * ((ppv * sen) / (ppv + sen))
bacc = (spec + sen) / 2
return bacc, sen, spec, ppv, npv, f1
# +
# validation
import numpy as np
from sklearn.metrics import roc_curve, auc
from matplotlib import pyplot as plt
# %matplotlib inline
import keras
from keras import models
import tensorflow as tf
from sklearn.metrics import confusion_matrix
acc_AD, acc_MCI, auc_AD, auc_MCI = [], [], [], []
bacc_AD, bacc_MCI = [], []
sen_AD, sen_MCI, spec_AD, spec_MCI = [], [], [], []
ppv_AD, ppv_MCI, npv_AD, npv_MCI = [], [], [], []
f1_AD, f1_MCI = [], []
batch_size = 20
for k in (1,):
print('validating model model_rawdat_checkpoints/rawmodel_wb_whole_ds.hdf5')
mymodel = models.load_model('model_rawdat_checkpoints/rawmodel_wb_whole_ds.hdf5')
# calculate area under the curve
# AUC as optimization function during training: https://stackoverflow.com/questions/41032551/how-to-compute-receiving-operating-characteristic-roc-and-auc-in-keras
pred = mymodel.predict(images, batch_size=batch_size)
fpr = dict()
tpr = dict()
roc_auc = dict()
acc = dict()
for i in range(2): # classes dummy vector: 0 - CN, 1 - MCI/AD
fpr[i], tpr[i], _ = roc_curve(labels[:, i], pred[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Plot the ROC curve
plt.figure()
plt.plot(fpr[1], tpr[1], color='darkorange', label='ROC curve (area = %0.2f)' % roc_auc[1])
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
# redo AUC for binary comparison: AD vs. HC and MCI vs. HC
for i in [3,4]:
grpi = np.equal(grps.Group.to_numpy(dtype=np.int), np.ones((grps.shape[0],), dtype=np.int)*i)
grp1 = np.equal(grps.Group.to_numpy(dtype=np.int), np.ones((grps.shape[0],), dtype=np.int))
grpidx = np.logical_or(grpi, grp1)
fpr[i], tpr[i], _ = roc_curve(labels[grpidx, 1], pred[grpidx, 1])
roc_auc[i] = auc(fpr[i], tpr[i])
acc[i] = np.mean((labels[grpidx, 1] == np.round(pred[grpidx, 1])).astype(int))*100
print('AUC for MCI vs. CN = %0.3f' % roc_auc[3])
print('AUC for AD vs. CN = %0.3f' % roc_auc[4])
print('Acc for MCI vs. CN = %0.1f' % acc[3])
print('Acc for AD vs. CN = %0.1f' % acc[4])
auc_AD.append(roc_auc[4])
auc_MCI.append(roc_auc[3])
acc_AD.append(acc[4])
acc_MCI.append(acc[3])
print('confusion matrix')
confmat = confusion_matrix(grps.Group, np.round(pred[:, 1]))
bacc, sen, spec, ppv, npv, f1 = get_values(confmat[(1,2),0:2]) # MCI
bacc_MCI.append(bacc); sen_MCI.append(sen); spec_MCI.append(spec); ppv_MCI.append(ppv); npv_MCI.append(npv); f1_MCI.append(f1)
bacc, sen, spec, ppv, npv, f1 = get_values(confmat[(1,3),0:2]) # AD
bacc_AD.append(bacc); sen_AD.append(sen); spec_AD.append(spec); ppv_AD.append(ppv); npv_AD.append(npv); f1_AD.append(f1)
print(confmat[1:4,0:2])
# +
# print model performance summary
print('AUC for MCI vs. CN = %0.3f' % auc_MCI[0])
print('AUC for AD vs. CN = %0.3f' % auc_AD[0])
print('Acc for MCI vs. CN = %0.3f' % acc_MCI[0])
print('Acc for AD vs. CN = %0.3f' % acc_AD[0])
print('Bacc for MCI vs. CN = %0.3f' % bacc_MCI[0])
print('Bacc for AD vs. CN = %0.3f' % bacc_AD[0])
print('Sen for MCI vs. CN = %0.3f' % sen_MCI[0])
print('Sen for AD vs. CN = %0.3f' % sen_AD[0])
print('Spec for MCI vs. CN = %0.3f' % spec_MCI[0])
print('Spec for AD vs. CN = %0.3f' % spec_AD[0])
print('PPV for MCI vs. CN = %0.3f' % ppv_MCI[0])
print('PPV for AD vs. CN = %0.3f' % ppv_AD[0])
print('NPV for MCI vs. CN = %0.3f' % npv_MCI[0])
print('NPV for AD vs. CN = %0.3f' % npv_AD[0])
print('F1 for MCI vs. CN = %0.3f' % f1_MCI[0])
print('F1 for AD vs. CN = %0.3f' % f1_AD[0])
| 9_Validate_3D_CNN_whole_ds_wb_rawdat_mwp1_CAT12_MNI_ADNI3_amy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Devise
#
# What if we could get a set of word and images to be in the same space
#
# ```
# beagle dog input --> model A --> jumbo jet
# beagle dog input --> model B --> corgie
# ```
# Consider models A and B. In traditional terms, both of these models are wrong (have the same score). But in word vector space, corgie (a dog) is much closer to beagle, so model B is much better than model A
#
# **idea** - train a model that finds a word vector for the word you want. Instead of class.
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
# ### Import our libraries
# +
import sys
sys.path.append('../')
from fastai.conv_learner import *
torch.backends.cudnn.benchmark=True
import fastText as ft
import torchvision.transforms as transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
tfms = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# -
# ### Setup our paths
PATH = Path('data/imagenet/')
TMP_PATH = PATH/'tmp'
TRANS_PATH = Path('data/translate/')
PATH_TRN = PATH/'train'
fname = 'valid/n01440764/ILSVRC2012_val_00007197.JPEG'
# ### Load the Word Vectors
ft_vecs = ft.load_model(str((TRANS_PATH/'wiki.en.bin')))
ft_vecs.get_word_vector('king')[:10]
# +
ft_words = ft_vecs.get_words(include_freq=True)
ft_word_dict = {k:v for k,v in zip(*ft_words)}
ft_words = sorted(ft_word_dict.keys(), key=lambda x: ft_word_dict[x])
len(ft_words)
# -
# ### Get Imagenet Classes
from fastai.io import get_data
CLASSES_FN = 'imagenet_class_index.json'
get_data(f'http://files.fast.ai/models/{CLASSES_FN}', TMP_PATH/CLASSES_FN)
# ### Get all nouns in English (WORDNET)
WORDS_FN = 'classids.txt'
get_data(f'http://files.fast.ai/data/{WORDS_FN}', PATH/WORDS_FN)
# ### Create imagenet class number to words
class_dict = json.load((TMP_PATH/CLASSES_FN).open())
classids_1k = dict(class_dict.values())
nclass = len(class_dict); nclass
class_dict['0']
# ### Wordnet class number to Nouns
classid_lines = (PATH/WORDS_FN).open().readlines()
classid_lines[:5]
classids = dict(l.strip().split() for l in classid_lines)
len(classids),len(classids_1k)
# #### Look up all teh nouns in FastText
lc_vec_d = {w.lower(): ft_vecs.get_word_vector(w) for w in ft_words[-1000000:]}
syn_wv = [(k, lc_vec_d[v.lower()]) for k,v in classids.items()
if v.lower() in lc_vec_d]
syn_wv_1k = [(k, lc_vec_d[v.lower()]) for k,v in classids_1k.items()
if v.lower() in lc_vec_d]
syn2wv = dict(syn_wv)
len(syn2wv)
# #### Save the lookups
pickle.dump(syn2wv, (TMP_PATH/'syn2wv.pkl').open('wb'))
pickle.dump(syn_wv_1k, (TMP_PATH/'syn_wv_1k.pkl').open('wb'))
# ### CHECKPOINT load
syn2wv = pickle.load((TMP_PATH/'syn2wv.pkl').open('rb'))
syn_wv_1k = pickle.load((TMP_PATH/'syn_wv_1k.pkl').open('rb'))
# #### Due to Imagenet Localization data = 157GB, will not run the rest of this code
# +
images = []
img_vecs = []
for d in (PATH/'train').iterdir():
if d.name not in syn2wv: continue
# grab the fast txt word vector
vec = syn2wv[d.name]
for f in d.iterdir():
images.append(str(f.relative_to(PATH)))
img_vecs.append(vec)
n_val=0
for d in (PATH/'valid').iterdir():
if d.name not in syn2wv: continue
vec = syn2wv[d.name]
for f in d.iterdir():
images.append(str(f.relative_to(PATH)))
img_vecs.append(vec)
n_val += 1
# -
img_vecs = np.stack(img_vecs)
img_vecs.shapeb
pickle.dump(images, (TMP_PATH/'images.pkl').open('wb'))
pickle.dump(img_vecs, (TMP_PATH/'img_vecs.pkl').open('wb'))
# +
# load the images for ImageNet
images = pickle.load((TMP_PATH/'images.pkl').open('rb'))
# have the corresponding vector for each image
img_vecs = pickle.load((TMP_PATH/'img_vecs.pkl').open('rb'))
# -
# ## Create the model architecture + datasets
# +
n = len(images); n
val_idxs = list(range(n-28650, n))
tfms = tfms_from_model(arch, 224, transforms_side_on, max_zoom=1.1)
# we can pass all the names from imagenet + word vecs
# then pass the indexes
# continuous = True - since we are predicting vectors
md = ImageClassifierData.from_names_and_array(PATH, images, img_vecs, val_idxs=val_idxs,
classes=None, tfms=tfms, continuous=True, bs=256)
"""
arch - resnet 50
md.c - how many classes
is_multi - not multiclass
is_reg - is regression
xtra_fc - extra fully connected layers
ps - how much dropout do you want?
*note no softmax
"""
arch = resnet50
models = ConvnetBuilder(arch, md.c, is_multi=False, is_reg=True, xtra_fc=[1024], ps=[0.2,0.2])
learn = ConvLearner(md, models, precompute=True)
learn.opt_fn = partial(optim.Adam, betas=(0.9,0.99))
# loss function - L1 loss is the difference
# but since we are doing high-dimensional vectors, most of the items
# are on the outside and the distance metric isn't the best metricb
def cos_loss(inp,targ): return 1 - F.cosine_similarity(inp,targ).mean()
learn.crit = cos_loss
# -
# ### Train the model with `precompute=True` to cut down on training time
#
# Quoted at 1+ hour length
# +
learn.lr_find(start_lr=1e-4, end_lr=1e15)
learn.sched.plot()
lr = 1e-2
wd = 1e-7
learn.precompute=True
learn.fit(lr, 1, cycle_len=20, wds=wd, use_clr=(20,10))
learn.bn_freeze(True)
learn.fit(lr, 1, cycle_len=20, wds=wd, use_clr=(20,10))
lrs = np.array([lr/1000,lr/100,lr])
learn.precompute=False
learn.freeze_to(1)
learn.save('pre0')
learn.load('pre0')
# -
# # Image Searching
# +
syns, wvs = list(zip(*syn_wv_1k))
wvs = np.array(wvs)
# %time pred_wv = learn.predict()
# -
# #### Let's take a look at some of the pictures
# +
denorm = md.val_ds.denorm
def show_img(im, figsize=None, ax=None):
if not ax: fig,ax = plt.subplots(figsize=figsize)
ax.imshow(im)
ax.axis('off')
return ax
def show_imgs(ims, cols, figsize=None):
fig,axes = plt.subplots(len(ims)//cols, cols, figsize=figsize)
for i,ax in enumerate(axes.flat): show_img(ims[i], ax=ax)
plt.tight_layout()
start=300
show_imgs(denorm(md.val_ds[start:start+25][0]), 5, (10,10))
# -
# <img src='https://snag.gy/OtP8k1.jpg' style='width:700px'>
# ### Use Nearest Neighbors search - 300D vector, what are the closest neighbors?
# +
# super fast library, that searches very quickly
import nmslib
def create_index(a):
index = nmslib.init(space='angulardist')
index.addDataPointBatch(a)
index.createIndex()
return index
def get_knns(index, vecs):
return zip(*index.knnQueryBatch(vecs, k=10, num_threads=4))
def get_knn(index, vec): return index.knnQuery(vec, k=10)
# -
nn_wvs = create_index(wvs)
idxs,dists = get_knns(nn_wvs, pred_wv)
[[classids[syns[id]] for id in ids[:3]] for ids in idxs[start:start+10]]
# ### What if we now bring in WordNet
# +
all_syns, all_wvs = list(zip(*syn2wv.items()))
all_wvs = np.array(all_wvs)
nn_allwvs = create_index(all_wvs)
idxs,dists = get_knns(nn_allwvs, pred_wv)
[[classids[all_syns[id]] for id in ids[:3]] for ids in idxs[start:start+10]]
# -
# # Text --> Image Search
# +
nn_predwv = create_index(pred_wv)
en_vecd = pickle.load(open(TRANS_PATH/'wiki.en.pkl','rb'))
## get the vector for boat
vec = en_vecd['boat']
idxs,dists = get_knn(nn_predwv, vec)
# then we only pull images who's vector is close to our 'boat' vector
show_imgs([open_image(PATH/md.val_ds.fnames[i]) for i in idxs[:3]], 3, figsize=(9,3));
# -
# <img src='https://snag.gy/bsOHQ4.jpg'>
vec = (en_vecd['engine'] + en_vecd['boat'])/2
idxs,dists = get_knn(nn_predwv, vec)
show_imgs([open_image(PATH/md.val_ds.fnames[i]) for i in idxs[:3]], 3, figsize=(9,3));
# <img src='https://snag.gy/eqK8dz.jpg'>
vec = (en_vecd['sail'] + en_vecd['boat'])/2
idxs,dists = get_knn(nn_predwv, vec)
show_imgs([open_image(PATH/md.val_ds.fnames[i]) for i in idxs[:3]], 3, figsize=(9,3));
# <img src='https://snag.gy/Bz6Hsw.jpg'>
| live_notes/dl2_042_devise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda env tensorflow
# language: python
# name: tensorflow
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
a = np.arange(0,20)
b = np.arange(20,40)
plt.scatter(a,b,c="b")
plt.scatter(a,b,c="b")
plt.xlabel("X axis")
plt.ylabel("Y axis")
plt.title("mat graph")
plt.savefig("mat.png")
b = b*b
plt.plot(a,b,"r--")
plt.plot(a,b,"r*-")
plt.plot(a,b,"ro--")
plt.plot(a,b,"ro",linestyle="dashed",linewidth=3)
plt.plot(a,b,"ro",linestyle="dashed",linewidth=3,markersize=10)
# # creating subplots
plt.subplot(2,2,1)
plt.plot(a,b,c="r")
plt.subplot(2,2,2)
plt.plot(a,b,c="b")
plt.subplot(2,2,3)
plt.plot(a,b,c="y")
np.pi
x = np.arange(0,4*np.pi,0.1)
y = np.sin(x)
plt.subplot(2,1,1)
plt.plot(x,y)
y = np.cos(x)
plt.subplot(2,1,2)
plt.plot(x,y)
# +
a = np.arange(1,5)
b = np.arange(6,10)
a1 = np.arange(11,15)
b1 = np.arange(16,20)
# -
plt.bar(a,b,color="b")
print(a1,b1)
plt.bar(a,b,color="b")
plt.bar(a1,b1,color="g")
a = np.array([22,87,5,43,56,73,55,54,11,20,51,5,79,31,27])
plt.hist(a)
plt.hist(a,bins=20)
data = [np.random.normal(0, std, 100) for std in range(1, 4)]
data
plt.boxplot(data,vert=True,patch_artist=True)
size = [120,54,73,87]
color = ["r","b","y","g"]
labels = ["Python","C","C++","Java"]
explode = [0.4,0,0,0]
plt.pie(size,explode=explode,colors=color,labels=labels,autopct="%1.1f%%",shadow=True)
plt.axis("equal")
| Matplotlib Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
#
# # <center>Comparisons</center>
# # <center>using</center>
# # <center>logic and conditions</center>
# + [markdown] slideshow={"slide_type": "slide"}
# - Arithmetic operators
# - Comparison operators
# - Conditionals
# - Logical operators
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Comparison operators
# + slideshow={"slide_type": "slide"}
## create two simple variables
x, y = 5, 10
# + slideshow={"slide_type": "slide"}
## comparison 1
x > y
# + slideshow={"slide_type": "slide"}
## comparison 2
x < y
# + slideshow={"slide_type": "slide"}
## Check if two varibles are equal:
x == y
# + slideshow={"slide_type": "slide"}
## different than a single equal sign
x = y
# + slideshow={"slide_type": "slide"}
## what is x equal to?
x
# + [markdown] slideshow={"slide_type": "slide"}
# - A single equal sign **assigns a value**
# - Double equal signs **check for equality**
# + slideshow={"slide_type": "slide"}
## reassign new values to x and y
x, y = 25, 25
# + slideshow={"slide_type": "slide"}
## comparison 4
x > y
# + slideshow={"slide_type": "slide"}
## comparison 5
x < y
# + slideshow={"slide_type": "slide"}
## comparison 6
x <= y
# + [markdown] slideshow={"slide_type": "slide"}
# #### The greater than/less than sign **always come before** the equal sign
#
# Just think of the equal sign as assigning a value if it comes first!
# +
## Check if two variables are NOT equal
x != y
# +
## assign new x and y values
x, y = 9, 99
# +
## now Check if two variables are NOT equal
x != y
# + [markdown] slideshow={"slide_type": "slide"}
# ## Conditionals
#
# if...else expressio
#
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + slideshow={"slide_type": "slide"}
## let's try it
person_income = 200_001
minimum_income = 100_101
if person_income >= minimum_income:
decision = "Approved for loan"
else:
decision = "Declined for loan"
print(decision)
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + slideshow={"slide_type": "slide"}
## the code retrieves a data point
person_zip = "11370"
high_risk_zip = "11370"
# + slideshow={"slide_type": "slide"}
if person_income >= minimum_income and person_zip != high_risk_zip:
decision = "Approved for loan"
else:
decision = "Declined for loan"
print(decision)
# -
if person_zip != high_risk_zip:
decision = "approved for loan"
elif person_income >= minimum_income:
decision = "Approved for loan x"
else:
decision = "Still trying to figure out"
print(decision)
count = 0
if person_zip != high_risk_zip:
count = count + 1
elif person_income >= minimum_income:
count = count + 1
else:
decision = "Still trying to figure out"
print(count)
| in-class-exerices/wk-05-logic-conditions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pandemicbat801/daa_2021_1/blob/master/Tarea11.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="raJfQzl6H361"
class NodoArbol:
def __init__(self, value, left=None, right=None):
self.data=value
self.left=left
self.right=right
# + [markdown] id="6FPXhqikH-40"
# #Arbol binario de busqueda
# Los nodos a la izq son menores a la raiz y los nodos a la derecha son mayores la raiz. Pueden ser recorridos en: pre-orden y pos-orden.
# + id="G6HdmDWbIAoV"
class BinarySearchTree:
def __init__(self):
self._root=None
def insert(self, value):
if self._root==None:
self._root=NodoArbol(value)
else:
self.__insert_nodo__(self._root, value)
def __insert_nodo__(self, nodo, value):
if nodo.data==value:
pass
elif value<nodo.data:
if nodo.left==None:
nodo.left=NodoArbol(value)
else:
self.__insert_nodo__(nodo.left,value)
else:
if nodo.right==None:
nodo.right=NodoArbol(value)
else:
self.__insert_nodo__(nodo.right,value)
def buscar(self, value):
if self._root==None:
return None
else:
return self.__busca_nodo(self._root,value)
def __busca_nodo(self, nodo, value):
if nodo==None:
return None
elif nodo.data==value:
return nodo
elif value<nodo.data:
return self.__busca_nodo(nodo.left,value)
else:
return self.__busca_nodo(nodo.right,value)
def transversal(self, format="inorden"):
if format=="inorden":
self.__recorrido_in(self._root)
elif format=="preorden":
self.__recorrido_pre(self._root)
elif format=="posorden":
self.__recorrido_pos(self._root)
else:
print("Formato de recorrido no valido")
def __recorrido_pre(self, nodo):
if nodo!=None:
print(nodo.data,end=",")
self.__recorrido_pre(nodo.left)
self.__recorrido_pre(nodo.right)
def __recorrido_in(self, nodo):
if nodo!=None:
self.__recorrido_in(nodo.left)
print(nodo.data,end=",")
self.__recorrido_in(nodo.right)
def __recorrido_pos(self, nodo):
if nodo!=None:
self.__recorrido_pos(nodo.left)
self.__recorrido_pos(nodo.right)
print(nodo.data,end=",")
def eliminar(self, value):
self.__eliminar_nodo(self._root, self._root, self._root, None, value)
def __eliminar_nodo(self, root, nodo, anterior, actual, value):
if nodo==None:
return print("No existe ese nodo")
if nodo.data==value:
if nodo.left==None and nodo.right==None:
if actual=="izq":
anterior.left=None
elif actual=="der":
anterior.right=None
print("solo se borro el nodo")
elif nodo.left==None and nodo.right!=None:
if actual=="izq":
anterior.left=nodo.right
else:
anterior.right=nodo.right
print("se paso el unico nodo derecho hacia arriba")
elif nodo.left!=None and nodo.right==None:
if actual=="izq":
anterior.left=nodo.left
else:
anterior.right=nodo.left
print("se paso el unico nodo izquierdo hacia arriba")
elif nodo.left!=None and nodo.right!=None:
print("se hizo algo complejo")
tmp,anterior2 =self.nodoMasBajo(nodo.right, nodo)
if nodo.data==anterior2.data:
anterior2.right=tmp.right
elif nodo.data!=anterior2.data:
anterior2.left=tmp.right
if actual=="izq":
anterior.left=tmp
else:
anterior.right=tmp
tmp.left=nodo.left
tmp.right=nodo.right
elif value<nodo.data:
return self.__eliminar_nodo(root, nodo.left, nodo, "izq", value)
else:
return self.__eliminar_nodo(root, nodo.right, nodo, "der", value)
def nodoMasBajo(self, nodo, anterior):
if nodo.left==None:
return nodo, anterior
elif nodo.left!=None:
return self.nodoMasBajo(nodo.left, nodo)
# + colab={"base_uri": "https://localhost:8080/"} id="6ZNnfMlpIuBu" outputId="dc012c3e-0a6d-4d00-85f1-977e11b54613"
arbol=BinarySearchTree()
arbol.insert(50)
arbol.insert(40)
arbol.insert(80)
arbol.insert(20)
arbol.insert(45)
arbol.insert(60)
arbol.insert(90)
arbol.insert(85)
arbol.insert(100)
arbol.insert(95)
print(arbol.transversal())
print("____________")
arbol.eliminar(80)
print("__________________")
print(arbol._root.data)
print(arbol._root.left.data)
print(arbol._root.right.data)
print(arbol._root.left.left.data)
print(arbol._root.right.left.data)
print(arbol.transversal())
print(arbol._root.right.right.right.left.data)
| Tarea11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .venv
# language: python
# name: .venv
# ---
# +
import matplotlib.pyplot as plt
# #%run ../src/plot_curves_appendix.py
# #%run ../src/plot_curves.py
# #%run ../src/plot_interaction_kernel.py
# #%run ../src/plot_temporal_contribution.py
import warnings
warnings.filterwarnings('ignore')
import sys
sys.path.append("../src/")
from plot_curves import curves
from plot_curves_no_periodic import curves_no_periodic
from plot_temporal_contribution import temporal_contribution
from plot_interaction_kernel import interaction_kernel
from plot_interaction_kernel_appendix import interaction_kernel_app
# %matplotlib notebook
# -
curves()
curves_no_periodic()
interaction_kernel()
temporal_contribution()
interaction_kernel_app()
| notebooks/visualization2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Topic modelling on news data for 10 Topics
#
# - Data is taken from kaggle and applying the topic modeling.
# - topic related to health start syncronising
# - Now increase the topic get the more syncronised results.
import pandas as pd
data=pd.read_csv('abcnews-date-text.csv',error_bad_lines=False)
data_text = data[['headline_text']]
data_text['index'] = data_text.index
documents = data_text
print(len(documents))
print(documents[:5])
# # Data preprocessing
# ## Different process we do here like as Tokenization ,lemetization and stemming the data
# - we wanted to convert the data to their normal form for example 'stolen' would converted to steal
# ### Here nltk is used for removing different language rather than english (Hindi and urdu like that word would be removed)
# - In preprocessing actually we remove all the punctuation marks , exclamatory marks and commas
# +
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
np.random.seed(2018)
import nltk
nltk.download('wordnet')
# -
# # Lemmatization
# ## Lemmatization is used for gouping of word that's contains the same meaning(synonyms,antonyms)
# # Tokenization
# ## Tokenization is used for keeps the word having meaningfull meaning
# - This is used for removal of word like if,the ,a,an that word doesn't make any sense in Topic
# # Stemming
# ## Stemming is used for convert the word into their root form
def lemmatize_stemming(text):
return WordNetLemmatizer().lemmatize(text, pos='v')
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize_stemming(token))
return result
# # preview data after preprocessing
# - How the data will look like
# +
# Select a document to preview after preprocessing
doc_sample = documents[documents['index'] == 4310].values[0][0]
print('original document: ')
words = []
for word in doc_sample.split(' '):
words.append(word)
print(words)
print('\n\n tokenized and lemmatized document: ')
print(preprocess(doc_sample))
# -
processed_docs = documents['headline_text'].map(preprocess)
processed_docs[:10]
# ### Dictionary is formed for
dictionary = gensim.corpora.Dictionary(processed_docs)
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
# ### Filtering the number of occurance of word
dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
bow_corpus[4310]
import pickle
pickle.dump(bow_corpus, open('bow_corpus.pkl', 'wb'))
dictionary.save('dictionary.gensim')
bow_doc_4310 = bow_corpus[4310]
for i in range(len(bow_doc_4310)):
print("Word {} (\"{}\") appears {} time.".format(bow_doc_4310[i][0],
dictionary[bow_doc_4310[i][0]],
bow_doc_4310[i][1]))
from gensim import corpora, models
tfidf = models.TfidfModel(bow_corpus)
corpus_tfidf = tfidf[bow_corpus]
from pprint import pprint
for doc in corpus_tfidf:
pprint(doc)
break
#
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=10, id2word=dictionary, passes=2, workers=2)
lda_model.save('model10.gensim')
for idx, topic in lda_model.print_topics(-1):
print('Topic: {} \nWords: {}'.format(idx, topic))
processed_docs[4310]
# ## Checking the model with unseen document
unseen_document = 'How a Pentagon de to help him identity crisis for Google'
bow_vector = dictionary.doc2bow(preprocess(unseen_document))
for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]):
print("Score: {}\t Topic: {}".format(score, lda_model.print_topic(index, 10)))
dictionary = gensim.corpora.Dictionary.load('dictionary.gensim')
corpus = pickle.load(open('bow_corpus.pkl', 'rb'))
lda = gensim.models.ldamodel.LdaModel.load('model10.gensim')
# ### Analyse the results with pyLDAvis
import pyLDAvis.gensim
lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=False)
pyLDAvis.display(lda_display)
# ### As topic related to health start syncronising but it not syncronised well so we increase the number of topic.
# #### so, we can get the more topic related to health.
| Topic modeling on text data-10_Topic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import time
import random
# creates an array of 10000 random integer between 1 and 10000
array = [random.randint(1, 10000) for i in range(1, 10000)]
# print 10 numbers from the array with same intevals
print(array[0:len(array):len(array) // 10])
# small testing array
a = [3,4,5,6,3,4,9,5,7,2,0,1]
# same as above
print(a[0:len(a):len(a) // 10])
class MyTimer(object):
def __enter__(self):
self.t0 = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
print('[spent time: {time:.20f} s]'.format(time = time.time() - self.t0))
# +
def bubble_sort(data):
if len(data) >= 2:
# iterate through the list
for i in range(0, len(data)):
# for each of the elements, compare with all other elements except the ones that are already sorted
# the last i elements are the largest i elements
for j in range(0, len(data) - i - 1):
# switch elements if the right hand side one is smaller
# so the largest element could go all the way to the right end of the list
if data[j] > data[j + 1]:
# swap
temp = data[j]
data[j] = data[j + 1]
data[j + 1] = temp
# return array
return data
else:
# dosen't require sorting
return data
# timer
with MyTimer() as t:
bubble_sort(array)
# +
def quick_sort(data):
if len(data) >= 2:
# mid = data[len(data)//2] # set pivot
mid = data[0]
left, right = [], []
data.remove(mid) # remove pivot
for num in data:
if num >= mid:
right.append(num)
else:
left.append(num)
return quick_sort(left) + [mid] + quick_sort(right)
else:
return data
with MyTimer() as t:
quick_sort(array) # execute algorithm
# arr is a subset of array
# adjust the size of arr acording to your computer performance
# since the worst case of quick sort is extremely memory cosuming
arr = array[0:1000]
# test with unsorted arr
with MyTimer() as t:
quick_sort(arr) # execute algorithm
# test with sorted arr (worst case)
sorted_array = quick_sort(arr)
# timer
with MyTimer() as t:
quick_sort(sorted_array)
# +
def bucket_sort(data):
max = data[0] # store the minimum number in data
min = data[0] # store the maximum number in data
for i in data:
if i > max:
max = i # update max number
elif i < min:
min = i # update min number
bucket = [0 for i in range(max - min + 1)] # array starts at 0, which is biased by max - min
for i in data:
bucket[i - min] += 1 # the biased index of bucket + 1
result = []
for i in range(len(bucket)):
for j in range(bucket[i]):
result.append(min + i)
return result
with MyTimer() as t:
bucket_sort(array)
# +
def merge(left, right):
res = []
while left and right:
if left[0] < right[0]:
res.append(left.pop(0))
else:
res.append(right.pop(0))
res = res + left + right
return res
def merge_sort(lists):
if len(lists) <= 1:
return lists
mid = len(lists) // 2
left = merge_sort(lists[:mid])
right = merge_sort(lists[mid:])
return merge(left, right)
with MyTimer() as t:
merge_sort(array)
| Algorithms/Sorting.ipynb |
# ## Manipulating data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
data = pd.read_csv('data/nyc_data.csv', parse_dates=['pickup_datetime',
'dropoff_datetime'])
fare = pd.read_csv('data/nyc_fare.csv', parse_dates=['pickup_datetime'])
# ### Selecting data
data[['trip_distance', 'trip_time_in_secs']].head(3)
data.loc[0]
data.loc[[0, 100000]]
data.loc[1000:2000:10,
['trip_distance', 'trip_time_in_secs']]
data.loc[data.trip_distance>50]
from ipywidgets import interact
@interact
def show_nrows(distance_threshold=(0, 200)):
return len(data.loc[data.trip_distance > distance_threshold])
# ### Computing with numbers
data['trip_time_in_mins'] = data.trip_time_in_secs / 60.0
data[['trip_time_in_secs', 'trip_time_in_mins']].head(3)
a = data.trip_distance[:5]
a
b = data.trip_distance[2:6]
b
a + b
# ### Working with text
data.medallion.head(3)
data.medallion.str.slice(0, 4).head(3)
# ### Working with dates and times
data.pickup_datetime.dt.dayofweek[::200000]
day_p = data.pickup_datetime.dt.day
day_d = data.dropoff_datetime.dt.day
selection = (day_p != day_d)
print(len(data.loc[selection]))
data.loc[selection].head(3)
# ### Handling missing data
| Section 2/22-manipulating.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow 2.3 on Python 3.6 (CUDA 10.1)
# language: python
# name: python3
# ---
# + [markdown] id="oXsz2JFUw_Nq"
# # 시그모이드 활성화 함수
# + [markdown] id="wiP3O4uuw_Nr"
# [](https://colab.research.google.com/github/rickiepark/dl-illustrated/blob/master/notebooks/sigmoid_function.ipynb)
# + id="Mw4kUI2Ow_Ns"
from math import e
# + id="2u7mQgEHw_Ns"
def sigmoid(z):
return 1/(1+e**-z)
# + id="pBoFXStpw_Ns" outputId="3a083064-80ba-40e6-8a66-503163957cef" colab={"base_uri": "https://localhost:8080/"}
sigmoid(.00001)
# + id="RsGW7IXqw_Nt" outputId="a920d743-7677-4f4b-97ac-44012e0d51d5" colab={"base_uri": "https://localhost:8080/"}
sigmoid(10000)
# + id="J7qiDVKjw_Nt" outputId="70bfb360-6a0a-4a91-c952-0410bfe89bec" colab={"base_uri": "https://localhost:8080/"}
sigmoid(-1)
# + id="G9CubnUzw_Nu" outputId="352b81d2-0dca-4fd1-a478-bee648d508c2" colab={"base_uri": "https://localhost:8080/"}
sigmoid(-10)
# + id="lR2AsZFsw_Nu" outputId="323414b9-db4c-46fd-991c-e5ed10455608" colab={"base_uri": "https://localhost:8080/"}
sigmoid(-2.0)
| notebooks/6-1.sigmoid_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.925651, "end_time": "2020-11-27T10:48:44.898007", "exception": false, "start_time": "2020-11-27T10:48:43.972356", "status": "completed"} tags=[]
kernel_mode = False
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import sys
if kernel_mode:
sys.path.append('../input/iterativestratification')
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 1.543715, "end_time": "2020-11-27T10:48:46.469924", "exception": false, "start_time": "2020-11-27T10:48:44.926209", "status": "completed"} tags=[]
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
import os
import copy
import seaborn as sns
from sklearn import preprocessing
from sklearn.metrics import log_loss
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA,FactorAnalysis
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import warnings
warnings.filterwarnings('ignore')
# + papermill={"duration": 0.036352, "end_time": "2020-11-27T10:48:46.535940", "exception": false, "start_time": "2020-11-27T10:48:46.499588", "status": "completed"} tags=[]
from sklearn.preprocessing import QuantileTransformer
# + _kg_hide-input=true papermill={"duration": 0.040257, "end_time": "2020-11-27T10:48:46.604114", "exception": false, "start_time": "2020-11-27T10:48:46.563857", "status": "completed"} tags=[]
dataset_folder = "../input/lish-moa" if kernel_mode else "/workspace/Kaggle/MoA"
model_output_folder = "." if kernel_mode \
else f"{dataset_folder}/simple-nn-using-old-cv-markpeng"
BATCH_SIZE = 128
INFER_BATCH_SIZE = 256
if kernel_mode:
os.listdir(dataset_folder)
# + _kg_hide-input=true papermill={"duration": 6.795642, "end_time": "2020-11-27T10:48:53.430527", "exception": false, "start_time": "2020-11-27T10:48:46.634885", "status": "completed"} tags=[]
train_features = pd.read_csv(f'{dataset_folder}/train_features.csv')
train_targets_scored = pd.read_csv(
f'{dataset_folder}/train_targets_scored.csv')
train_targets_nonscored = pd.read_csv(
f'{dataset_folder}/train_targets_nonscored.csv')
test_features = pd.read_csv(f'{dataset_folder}/test_features.csv')
sample_submission = pd.read_csv(f'{dataset_folder}/sample_submission.csv')
# + papermill={"duration": 0.03754, "end_time": "2020-11-27T10:48:53.496590", "exception": false, "start_time": "2020-11-27T10:48:53.459050", "status": "completed"} tags=[]
GENES = [col for col in train_features.columns if col.startswith('g-')]
CELLS = [col for col in train_features.columns if col.startswith('c-')]
# + papermill={"duration": 0.036164, "end_time": "2020-11-27T10:48:53.561782", "exception": false, "start_time": "2020-11-27T10:48:53.525618", "status": "completed"} tags=[]
IS_TRAIN = True
if IS_TRAIN:
os.makedirs(model_output_folder, exist_ok=True)
# + papermill={"duration": 10.833594, "end_time": "2020-11-27T10:49:04.424733", "exception": false, "start_time": "2020-11-27T10:48:53.591139", "status": "completed"} tags=[]
for col in (GENES + CELLS):
# transformer = QuantileTransformer(n_quantiles=100,random_state=0, output_distribution="normal")
vec_len = len(train_features[col].values)
vec_len_test = len(test_features[col].values)
raw_vec = train_features[col].values.reshape(vec_len, 1)
if IS_TRAIN:
transformer = QuantileTransformer(n_quantiles=100,
random_state=0,
output_distribution="normal")
transformer.fit(raw_vec)
pd.to_pickle(transformer, f'{model_output_folder}/{col}_quantile_transformer.pkl')
else:
transformer = pd.read_pickle(f'{model_output_folder}/{col}_quantile_transformer.pkl')
train_features[col] = transformer.transform(raw_vec).reshape(1, vec_len)[0]
test_features[col] = transformer.transform(
test_features[col].values.reshape(vec_len_test,
1)).reshape(1, vec_len_test)[0]
# + papermill={"duration": 0.046837, "end_time": "2020-11-27T10:49:04.507664", "exception": false, "start_time": "2020-11-27T10:49:04.460827", "status": "completed"} tags=[]
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed_everything(seed=42)
# + papermill={"duration": 13.758284, "end_time": "2020-11-27T10:49:18.298556", "exception": false, "start_time": "2020-11-27T10:49:04.540272", "status": "completed"} tags=[]
# GENES
n_comp = 90 #<--Update
data = pd.concat(
[pd.DataFrame(train_features[GENES]),
pd.DataFrame(test_features[GENES])])
#data2 = (FactorAnalysis(n_components=n_comp, random_state=42).fit_transform(data[GENES]))
if IS_TRAIN:
fa = FactorAnalysis(n_components=n_comp,
random_state=1903).fit(data[GENES])
pd.to_pickle(fa, f'{model_output_folder}/factor_analysis_g.pkl')
#umap = UMAP(n_components=n_dim, random_state=1903).fit(data[GENES])
#pd.to_pickle(umap, f'{MODEL_DIR}/{NB}_umap_g.pkl')
else:
fa = pd.read_pickle(f'{model_output_folder}/factor_analysis_g.pkl')
#umap = pd.read_pickle(f'{MODEL_DIR}/{NB}_umap_g.pkl')
data2 = fa.transform(data[GENES])
train2 = data2[:train_features.shape[0]]
test2 = data2[-test_features.shape[0]:]
train2 = pd.DataFrame(train2, columns=[f'pca_G-{i}' for i in range(n_comp)])
test2 = pd.DataFrame(test2, columns=[f'pca_G-{i}' for i in range(n_comp)])
# drop_cols = [f'c-{i}' for i in range(n_comp,len(GENES))]
train_features = pd.concat((train_features, train2), axis=1)
test_features = pd.concat((test_features, test2), axis=1)
# + papermill={"duration": 2.574156, "end_time": "2020-11-27T10:49:20.902084", "exception": false, "start_time": "2020-11-27T10:49:18.327928", "status": "completed"} tags=[]
#CELLS
n_comp = 50 #<--Update
data = pd.concat(
[pd.DataFrame(train_features[CELLS]),
pd.DataFrame(test_features[CELLS])])
if IS_TRAIN:
fa = FactorAnalysis(n_components=n_comp,
random_state=1903).fit(data[CELLS])
pd.to_pickle(fa, f'{model_output_folder}/factor_analysis_c.pkl')
#umap = UMAP(n_components=n_dim, random_state=1903).fit(data[GENES])
#pd.to_pickle(umap, f'{MODEL_DIR}/{NB}_umap_g.pkl')
else:
fa = pd.read_pickle(f'{model_output_folder}/factor_analysis_c.pkl')
#umap = pd.read_pickle(f'{MODEL_DIR}/{NB}_umap_g.pkl')
data2 = fa.transform(data[CELLS])
#data2 = (FactorAnalysis(n_components=n_comp, random_state=42).fit_transform(data[CELLS]))
train2 = data2[:train_features.shape[0]]
test2 = data2[-test_features.shape[0]:]
train2 = pd.DataFrame(train2, columns=[f'pca_C-{i}' for i in range(n_comp)])
test2 = pd.DataFrame(test2, columns=[f'pca_C-{i}' for i in range(n_comp)])
# drop_cols = [f'c-{i}' for i in range(n_comp,len(CELLS))]
train_features = pd.concat((train_features, train2), axis=1)
test_features = pd.concat((test_features, test2), axis=1)
# + papermill={"duration": 0.040369, "end_time": "2020-11-27T10:49:20.973115", "exception": false, "start_time": "2020-11-27T10:49:20.932746", "status": "completed"} tags=[]
train_features.shape
# + papermill={"duration": 9.782993, "end_time": "2020-11-27T10:49:30.788961", "exception": false, "start_time": "2020-11-27T10:49:21.005968", "status": "completed"} tags=[]
from sklearn.feature_selection import VarianceThreshold
#var_thresh = VarianceThreshold(0.8) #<-- Update
var_thresh = QuantileTransformer(n_quantiles=100,
random_state=0,
output_distribution="normal")
data = train_features.append(test_features)
if IS_TRAIN:
transformer = QuantileTransformer(n_quantiles=100,
random_state=123,
output_distribution="normal")
transformer.fit(data.iloc[:, 5:])
pd.to_pickle(transformer, f'{model_output_folder}/{col}_quantile_transformer2.pkl')
else:
transformer = pd.read_pickle(f'{model_output_folder}/{col}_quantile_transformer2.pkl')
data_transformed = transformer.transform(data.iloc[:, 5:])
train_features_transformed = data_transformed[:train_features.shape[0]]
test_features_transformed = data_transformed[-test_features.shape[0]:]
train_features = pd.DataFrame(train_features[['sig_id','cp_type','cp_time','cp_dose']].values.reshape(-1, 4),\
columns=['sig_id','cp_type','cp_time','cp_dose'])
train_features = pd.concat(
[train_features, pd.DataFrame(train_features_transformed)], axis=1)
test_features = pd.DataFrame(test_features[['sig_id','cp_type','cp_time','cp_dose']].values.reshape(-1, 4),\
columns=['sig_id','cp_type','cp_time','cp_dose'])
test_features = pd.concat(
[test_features, pd.DataFrame(test_features_transformed)], axis=1)
train_features.shape
# + papermill={"duration": 0.07808, "end_time": "2020-11-27T10:49:30.898086", "exception": false, "start_time": "2020-11-27T10:49:30.820006", "status": "completed"} tags=[]
train_features
# + papermill={"duration": 0.044226, "end_time": "2020-11-27T10:49:30.978520", "exception": false, "start_time": "2020-11-27T10:49:30.934294", "status": "completed"} tags=[]
from pickle import load, dump
# + papermill={"duration": 93.982281, "end_time": "2020-11-27T10:51:04.994307", "exception": false, "start_time": "2020-11-27T10:49:31.012026", "status": "completed"} tags=[]
from sklearn.cluster import KMeans
def fe_cluster_genes(train, test, n_clusters_g=45, SEED=123):
#features_g = GENES
#features_c = CELLS
features_g = list(train.columns[4:776])
def create_cluster(train,
test,
features,
kind='g',
n_clusters=n_clusters_g):
train_ = train[features].copy()
test_ = test[features].copy()
data = pd.concat([train_, test_], axis=0)
kmeans_genes = KMeans(n_clusters=n_clusters,
random_state=SEED).fit(data)
dump(kmeans_genes, open(f'{model_output_folder}/kmeans_genes.pkl', 'wb'))
train[f'clusters_{kind}'] = kmeans_genes.predict(train_.values)
test[f'clusters_{kind}'] = kmeans_genes.predict(test_.values)
train = pd.get_dummies(train, columns=[f'clusters_{kind}'])
test = pd.get_dummies(test, columns=[f'clusters_{kind}'])
return train, test
train, test = create_cluster(train,
test,
features_g,
kind='g',
n_clusters=n_clusters_g)
# train, test = create_cluster(train, test, features_c, kind = 'c', n_clusters = n_clusters_c)
return train, test
train_features, test_features = fe_cluster_genes(train_features, test_features)
# + papermill={"duration": 13.247756, "end_time": "2020-11-27T10:51:18.292336", "exception": false, "start_time": "2020-11-27T10:51:05.044580", "status": "completed"} tags=[]
def fe_cluster_cells(train, test, n_clusters_c=15, SEED=123):
#features_g = GENES
#features_c = CELLS
features_c = list(train.columns[776:876])
def create_cluster(train,
test,
features,
kind='c',
n_clusters=n_clusters_c):
train_ = train[features].copy()
test_ = test[features].copy()
data = pd.concat([train_, test_], axis=0)
kmeans_cells = KMeans(n_clusters=n_clusters,
random_state=SEED).fit(data)
dump(kmeans_cells, open(f'{model_output_folder}/kmeans_cells.pkl', 'wb'))
train[f'clusters_{kind}'] = kmeans_cells.predict(train_.values)
test[f'clusters_{kind}'] = kmeans_cells.predict(test_.values)
train = pd.get_dummies(train, columns=[f'clusters_{kind}'])
test = pd.get_dummies(test, columns=[f'clusters_{kind}'])
return train, test
# train, test = create_cluster(train, test, features_g, kind = 'g', n_clusters = n_clusters_g)
train, test = create_cluster(train,
test,
features_c,
kind='c',
n_clusters=n_clusters_c)
return train, test
train_features, test_features = fe_cluster_cells(train_features, test_features)
# + papermill={"duration": 5.837022, "end_time": "2020-11-27T10:51:24.162412", "exception": false, "start_time": "2020-11-27T10:51:18.325390", "status": "completed"} tags=[]
def fe_stats(train, test):
features_g = list(train.columns[4:776])
features_c = list(train.columns[776:876])
for df in train, test:
df['g_sum'] = df[features_g].sum(axis=1)
df['g_mean'] = df[features_g].mean(axis=1)
df['g_std'] = df[features_g].std(axis=1)
df['g_kurt'] = df[features_g].kurtosis(axis=1)
df['g_skew'] = df[features_g].skew(axis=1)
df['c_sum'] = df[features_c].sum(axis=1)
df['c_mean'] = df[features_c].mean(axis=1)
df['c_std'] = df[features_c].std(axis=1)
df['c_kurt'] = df[features_c].kurtosis(axis=1)
df['c_skew'] = df[features_c].skew(axis=1)
df['gc_sum'] = df[features_g + features_c].sum(axis=1)
df['gc_mean'] = df[features_g + features_c].mean(axis=1)
df['gc_std'] = df[features_g + features_c].std(axis=1)
df['gc_kurt'] = df[features_g + features_c].kurtosis(axis=1)
df['gc_skew'] = df[features_g + features_c].skew(axis=1)
return train, test
train_features, test_features = fe_stats(train_features, test_features)
# + papermill={"duration": 0.665401, "end_time": "2020-11-27T10:51:24.861177", "exception": false, "start_time": "2020-11-27T10:51:24.195776", "status": "completed"} tags=[]
train = train_features.merge(train_targets_scored, on='sig_id')
train = train[train['cp_type'] != 'ctl_vehicle'].reset_index(drop=True)
test = test_features[test_features['cp_type'] != 'ctl_vehicle'].reset_index(
drop=True)
target = train[train_targets_scored.columns]
# + papermill={"duration": 0.120898, "end_time": "2020-11-27T10:51:25.014579", "exception": false, "start_time": "2020-11-27T10:51:24.893681", "status": "completed"} tags=[]
train = train.drop('cp_type', axis=1)
test = test.drop('cp_type', axis=1)
# + papermill={"duration": 0.076381, "end_time": "2020-11-27T10:51:25.124163", "exception": false, "start_time": "2020-11-27T10:51:25.047782", "status": "completed"} tags=[]
train
# + papermill={"duration": 0.055486, "end_time": "2020-11-27T10:51:25.218602", "exception": false, "start_time": "2020-11-27T10:51:25.163116", "status": "completed"} tags=[]
target_cols = target.drop('sig_id', axis=1).columns.values.tolist()
# + papermill={"duration": 2.991823, "end_time": "2020-11-27T10:51:28.245302", "exception": false, "start_time": "2020-11-27T10:51:25.253479", "status": "completed"} tags=[]
folds = train.copy()
mskf = MultilabelStratifiedKFold(n_splits=5)
for f, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)):
folds.loc[v_idx, 'kfold'] = int(f)
folds['kfold'] = folds['kfold'].astype(int)
folds
# + papermill={"duration": 0.060901, "end_time": "2020-11-27T10:51:28.348859", "exception": false, "start_time": "2020-11-27T10:51:28.287958", "status": "completed"} tags=[]
print(train.shape)
print(folds.shape)
print(test.shape)
print(target.shape)
print(sample_submission.shape)
# + [markdown] papermill={"duration": 0.039426, "end_time": "2020-11-27T10:51:28.431210", "exception": false, "start_time": "2020-11-27T10:51:28.391784", "status": "completed"} tags=[]
# # Dataset Classes
# + papermill={"duration": 0.05262, "end_time": "2020-11-27T10:51:28.521990", "exception": false, "start_time": "2020-11-27T10:51:28.469370", "status": "completed"} tags=[]
class MoADataset:
def __init__(self, features, targets):
self.features = features
self.targets = targets
def __len__(self):
return (self.features.shape[0])
def __getitem__(self, idx):
dct = {
'x': torch.tensor(self.features[idx, :], dtype=torch.float),
'y': torch.tensor(self.targets[idx, :], dtype=torch.float)
}
return dct
class TestDataset:
def __init__(self, features):
self.features = features
def __len__(self):
return (self.features.shape[0])
def __getitem__(self, idx):
dct = {'x': torch.tensor(self.features[idx, :], dtype=torch.float)}
return dct
# + papermill={"duration": 0.058828, "end_time": "2020-11-27T10:51:28.619840", "exception": false, "start_time": "2020-11-27T10:51:28.561012", "status": "completed"} tags=[]
def train_fn(model, optimizer, scheduler, loss_fn, dataloader, device):
model.train()
final_loss = 0
for data in dataloader:
optimizer.zero_grad()
inputs, targets = data['x'].to(device), data['y'].to(device)
# print(inputs.shape)
outputs = model(inputs)
loss = loss_fn(outputs, targets)
loss.backward()
optimizer.step()
scheduler.step()
final_loss += loss.item()
final_loss /= len(dataloader)
return final_loss
def valid_fn(model, loss_fn, dataloader, device):
model.eval()
final_loss = 0
valid_preds = []
for data in dataloader:
inputs, targets = data['x'].to(device), data['y'].to(device)
outputs = model(inputs)
loss = loss_fn(outputs, targets)
final_loss += loss.item()
valid_preds.append(outputs.sigmoid().detach().cpu().numpy())
final_loss /= len(dataloader)
valid_preds = np.concatenate(valid_preds)
return final_loss, valid_preds
def inference_fn(model, dataloader, device):
model.eval()
preds = []
for data in dataloader:
inputs = data['x'].to(device)
with torch.no_grad():
outputs = model(inputs)
preds.append(outputs.sigmoid().detach().cpu().numpy())
preds = np.concatenate(preds)
return preds
# + papermill={"duration": 0.055085, "end_time": "2020-11-27T10:51:28.713670", "exception": false, "start_time": "2020-11-27T10:51:28.658585", "status": "completed"} tags=[]
import torch
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
class SmoothBCEwLogits(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth(targets: torch.Tensor, n_labels: int, smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, inputs, targets):
targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1),
self.smoothing)
loss = F.binary_cross_entropy_with_logits(inputs, targets, self.weight)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
# + papermill={"duration": 0.051814, "end_time": "2020-11-27T10:51:28.803829", "exception": false, "start_time": "2020-11-27T10:51:28.752015", "status": "completed"} tags=[]
class Model(nn.Module): # <-- Update
def __init__(self, num_features, num_targets, hidden_size):
super(Model, self).__init__()
self.batch_norm1 = nn.BatchNorm1d(num_features)
self.dense1 = nn.utils.weight_norm(nn.Linear(num_features,
hidden_size))
self.batch_norm2 = nn.BatchNorm1d(hidden_size)
self.dropout2 = nn.Dropout(0.25)
self.dense2 = nn.Linear(hidden_size, hidden_size)
self.batch_norm3 = nn.BatchNorm1d(hidden_size)
self.dropout3 = nn.Dropout(0.25)
self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets))
def forward(self, x):
x = self.batch_norm1(x)
x = F.leaky_relu(self.dense1(x))
x = self.batch_norm2(x)
x = self.dropout2(x)
x = F.leaky_relu(self.dense2(x))
x = self.batch_norm3(x)
x = self.dropout3(x)
x = self.dense3(x)
return x
# + papermill={"duration": 0.044943, "end_time": "2020-11-27T10:51:28.885956", "exception": false, "start_time": "2020-11-27T10:51:28.841013", "status": "completed"} tags=[]
def process_data(data):
data = pd.get_dummies(data, columns=['cp_time', 'cp_dose'])
return data
# + papermill={"duration": 0.231478, "end_time": "2020-11-27T10:51:29.154749", "exception": false, "start_time": "2020-11-27T10:51:28.923271", "status": "completed"} tags=[]
feature_cols = [c for c in process_data(folds).columns if c not in target_cols]
feature_cols = [c for c in feature_cols if c not in ['kfold', 'sig_id']]
len(feature_cols)
# + papermill={"duration": 0.422989, "end_time": "2020-11-27T10:51:29.616460", "exception": false, "start_time": "2020-11-27T10:51:29.193471", "status": "completed"} tags=[]
# HyperParameters
DEVICE = ('cuda' if torch.cuda.is_available() else 'cpu')
EPOCHS = 25
LEARNING_RATE = 5e-3
WEIGHT_DECAY = 1e-5
NFOLDS = 5 #<-- Update
EARLY_STOPPING_STEPS = 10
EARLY_STOP = False
num_features = len(feature_cols)
num_targets = len(target_cols)
hidden_size = 2048
# + papermill={"duration": 0.071102, "end_time": "2020-11-27T10:51:29.726239", "exception": false, "start_time": "2020-11-27T10:51:29.655137", "status": "completed"} tags=[]
def run_training(fold, seed):
seed_everything(seed)
train = process_data(folds)
test_ = process_data(test)
trn_idx = train[train['kfold'] != fold].index
val_idx = train[train['kfold'] == fold].index
train_df = train[train['kfold'] != fold].reset_index(drop=True)
valid_df = train[train['kfold'] == fold].reset_index(drop=True)
x_train, y_train = train_df[feature_cols].values, train_df[
target_cols].values
x_valid, y_valid = valid_df[feature_cols].values, valid_df[
target_cols].values
train_dataset = MoADataset(x_train, y_train)
valid_dataset = MoADataset(x_valid, y_valid)
trainloader = torch.utils.data.DataLoader(train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
validloader = torch.utils.data.DataLoader(valid_dataset,
batch_size=INFER_BATCH_SIZE,
shuffle=False)
model = Model(
num_features=num_features,
num_targets=num_targets,
hidden_size=hidden_size,
)
model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(),
lr=5e-3,
weight_decay=WEIGHT_DECAY)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer,
pct_start=0.1,
div_factor=1e3,
max_lr=1e-2,
epochs=EPOCHS,
steps_per_epoch=len(trainloader))
loss_fn = nn.BCEWithLogitsLoss()
loss_tr = SmoothBCEwLogits(smoothing=0.001)
early_stopping_steps = EARLY_STOPPING_STEPS
early_step = 0
oof = np.zeros((len(train), target.iloc[:, 1:].shape[1]))
best_loss = np.inf
for epoch in range(EPOCHS):
train_loss = train_fn(model, optimizer, scheduler, loss_tr,
trainloader, DEVICE)
print(
f"SEED: {seed}, FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss}"
)
valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE)
print(
f"SEED: {seed} ,FOLD: {fold}, EPOCH: {epoch}, valid_loss: {valid_loss}"
)
if valid_loss < best_loss:
best_loss = valid_loss
oof[val_idx] = valid_preds
torch.save(model.state_dict(),
f"{model_output_folder}/SEED{seed}_FOLD{fold}_.pth")
elif (EARLY_STOP == True):
early_step += 1
if (early_step >= early_stopping_steps):
break
#--------------------- PREDICTION---------------------
x_test = test_[feature_cols].values
testdataset = TestDataset(x_test)
testloader = torch.utils.data.DataLoader(testdataset,
batch_size=INFER_BATCH_SIZE,
shuffle=False)
model = Model(
num_features=num_features,
num_targets=num_targets,
hidden_size=hidden_size,
)
model.load_state_dict(
torch.load(f"{model_output_folder}/SEED{seed}_FOLD{fold}_.pth"))
model.to(DEVICE)
predictions = np.zeros((len(test_), target.iloc[:, 1:].shape[1]))
predictions = inference_fn(model, testloader, DEVICE)
return oof, predictions
# + papermill={"duration": 0.047659, "end_time": "2020-11-27T10:51:29.813624", "exception": false, "start_time": "2020-11-27T10:51:29.765965", "status": "completed"} tags=[]
def run_k_fold(NFOLDS, seed):
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
for fold in range(NFOLDS):
oof_, pred_ = run_training(fold, seed)
predictions += pred_ / NFOLDS
oof += oof_
return oof, predictions
# + papermill={"duration": 1183.480743, "end_time": "2020-11-27T11:11:13.331511", "exception": false, "start_time": "2020-11-27T10:51:29.850768", "status": "completed"} tags=[]
# Averaging on multiple SEEDS
SEED = [940, 1513, 1269, 1392, 1119, 1303] #<-- Update
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
for seed in SEED:
oof_, predictions_ = run_k_fold(NFOLDS, seed)
oof += oof_ / len(SEED)
predictions += predictions_ / len(SEED)
train[target_cols] = oof
test[target_cols] = predictions
# + papermill={"duration": 0.350112, "end_time": "2020-11-27T11:11:14.010675", "exception": false, "start_time": "2020-11-27T11:11:13.660563", "status": "completed"} tags=[]
train_targets_scored
# + papermill={"duration": 0.326173, "end_time": "2020-11-27T11:11:14.647738", "exception": false, "start_time": "2020-11-27T11:11:14.321565", "status": "completed"} tags=[]
len(target_cols)
# + papermill={"duration": 1.481091, "end_time": "2020-11-27T11:11:16.441615", "exception": false, "start_time": "2020-11-27T11:11:14.960524", "status": "completed"} tags=[]
valid_results = train_targets_scored.drop(columns=target_cols).merge(
train[['sig_id'] + target_cols], on='sig_id', how='left').fillna(0)
y_true = train_targets_scored[target_cols].values
y_pred = valid_results[target_cols].values
score = 0
for i in range(len(target_cols)):
score_ = log_loss(y_true[:, i], y_pred[:, i])
score += score_ / target.shape[1]
print("CV log_loss: ", score)
# + papermill={"duration": 2.144517, "end_time": "2020-11-27T11:11:18.907149", "exception": false, "start_time": "2020-11-27T11:11:16.762632", "status": "completed"} tags=[]
sub = sample_submission.drop(columns=target_cols).merge(test[['sig_id'] +
target_cols],
on='sig_id',
how='left').fillna(0)
sub.to_csv('submission.csv', index=False)
# sub.to_csv('submission_script_simpleNN_oldcv_0.01836.csv', index=False)
# + papermill={"duration": 0.322273, "end_time": "2020-11-27T11:11:19.543603", "exception": false, "start_time": "2020-11-27T11:11:19.221330", "status": "completed"} tags=[]
sub.shape
# + papermill={"duration": 0.321238, "end_time": "2020-11-27T11:11:20.180504", "exception": false, "start_time": "2020-11-27T11:11:19.859266", "status": "completed"} tags=[]
| models/simple-nn-using-old-cv-markpeng.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sqlite3
with sqlite3.connect("chapter.db") as conn:
cursor = conn.cursor()
rows = cursor.execute('SELECT * FROM user ORDER BY age DESC')
for row in rows:
print(row)
with sqlite3.connect("chapter.db") as conn:
cursor = conn.cursor()
rows = cursor.execute('SELECT * FROM user ORDER BY age')
for row in rows:
print(row)
| Chapter08/.ipynb_checkpoints/Exercise 8.03-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="CwNLv6dKKny3"
# ## Interacting with CerebralCortex Data
# + [markdown] id="GSrq6c-llvwG"
# Cerebral Cortex is MD2K's big data cloud tool designed to support population-scale data analysis, visualization, model development, and intervention design for mobile-sensor data. It provides the ability to do machine learning model development on population scale datasets and provides interoperable interfaces for aggregation of diverse data sources.
#
# This page provides an overview of the core Cerebral Cortex operations to familiarilze you with how to discover and interact with different sources of data that could be contained within the system.
#
# _Note:_ While some of these examples are showing generated data, they are designed to function on real-world mCerebrum data and the signal generators were built to facilitate the testing and evaluation of the Cerebral Cortex platform by those individuals that are unable to see those original datasets or do not wish to collect data before evaluating the system.
# + [markdown] id="YfJoVwtMN-a_"
# ### Download Sample Dataset
# We use [WESAD](https://archive.ics.uci.edu/ml/datasets/WESAD+%28Wearable+Stress+and+Affect+Detection%29) dataset to demonstarte Cerebral Cortex Kernel capabilities. WESAD is a publicly available dataset for wearable stress and affect detection. This multimodal dataset features physiological and motion data, recorded from both a wrist- and a chest-worn device, of 15 subjects during a lab study. The following sensor modalities are included: blood volume pulse, electrocardiogram, electrodermal activity, electromyogram, respiration, body temperature, and three-axis acceleration. Moreover, the dataset bridges the gap between previous lab studies on stress and emotions, by containing three different affective states (neutral, stress, amusement). In addition, self-reports of the subjects, which were obtained using several established questionnaires, are contained in the dataset.
# + [markdown] id="wQykWeXzOyhZ"
# ## Setting Up Environment
# + [markdown] id="nywqzvgnweeS"
# Colab does not contain the necessary runtime enviornments necessary to run Cerebral Cortex. The following commands will download and install these tools, framework, and datasets.
# + colab={"base_uri": "https://localhost:8080/"} id="nxRRhbDX3EaT" executionInfo={"status": "ok", "timestamp": 1627422365684, "user_tz": 300, "elapsed": 151, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="0e7346e9-4230-4d54-e1fd-0451a76154a3"
import importlib, sys, os
from os.path import expanduser
sys.path.insert(0, os.path.abspath('..'))
DOWNLOAD_USER_DATA=True
ALL_USERS=False #this will only work if DOWNLOAD_USER_DATA=True
IN_COLAB = 'google.colab' in sys.modules
MD2K_JUPYTER_NOTEBOOK = "MD2K_JUPYTER_NOTEBOOK" in os.environ
if (get_ipython().__class__.__name__=="ZMQInteractiveShell"): IN_JUPYTER_NOTEBOOK = True
JAVA_HOME_DEFINED = "JAVA_HOME" in os.environ
SPARK_HOME_DEFINED = "SPARK_HOME" in os.environ
PYSPARK_PYTHON_DEFINED = "PYSPARK_PYTHON" in os.environ
PYSPARK_DRIVER_PYTHON_DEFINED = "PYSPARK_DRIVER_PYTHON" in os.environ
HAVE_CEREBRALCORTEX_KERNEL = importlib.util.find_spec("cerebralcortex") is not None
SPARK_VERSION = "3.1.2"
SPARK_URL = "https://archive.apache.org/dist/spark/spark-"+SPARK_VERSION+"/spark-"+SPARK_VERSION+"-bin-hadoop2.7.tgz"
SPARK_FILE_NAME = "spark-"+SPARK_VERSION+"-bin-hadoop2.7.tgz"
CEREBRALCORTEX_KERNEL_VERSION = "3.3.14"
DATA_PATH = expanduser("~")
if DATA_PATH[:-1]!="/":
DATA_PATH+="/"
USER_DATA_PATH = DATA_PATH+"cc_data/"
if MD2K_JUPYTER_NOTEBOOK:
print("Java, Spark, and CerebralCortex-Kernel are installed and paths are already setup.")
else:
SPARK_PATH = DATA_PATH+"spark-"+SPARK_VERSION+"-bin-hadoop2.7/"
if(not HAVE_CEREBRALCORTEX_KERNEL):
print("Installing CerebralCortex-Kernel")
# !pip -q install cerebralcortex-kernel==$CEREBRALCORTEX_KERNEL_VERSION
else:
print("CerebralCortex-Kernel is already installed.")
if not JAVA_HOME_DEFINED:
if not os.path.exists("/usr/lib/jvm/java-8-openjdk-amd64/") and not os.path.exists("/usr/lib/jvm/java-11-openjdk-amd64/"):
print("\nInstalling/Configuring Java")
# !sudo apt update
# !sudo apt-get install -y openjdk-8-jdk-headless
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/"
elif os.path.exists("/usr/lib/jvm/java-8-openjdk-amd64/"):
print("\nSetting up Java path")
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/"
elif os.path.exists("/usr/lib/jvm/java-11-openjdk-amd64/"):
print("\nSetting up Java path")
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-11-openjdk-amd64/"
else:
print("JAVA is already installed.")
if (IN_COLAB or IN_JUPYTER_NOTEBOOK) and not MD2K_JUPYTER_NOTEBOOK:
if SPARK_HOME_DEFINED:
print("SPARK is already installed.")
elif not os.path.exists(SPARK_PATH):
print("\nSetting up Apache Spark ", SPARK_VERSION)
# !pip -q install findspark
import pyspark
spark_installation_path = os.path.dirname(pyspark.__file__)
import findspark
findspark.init(spark_installation_path)
if not os.getenv("PYSPARK_PYTHON"):
os.environ["PYSPARK_PYTHON"] = os.popen('which python3').read().replace("\n","")
if not os.getenv("PYSPARK_DRIVER_PYTHON"):
os.environ["PYSPARK_DRIVER_PYTHON"] = os.popen('which python3').read().replace("\n","")
else:
print("SPARK is already installed.")
else:
raise SystemExit("Please check your environment configuration at: https://github.com/MD2Korg/CerebralCortex-Kernel/")
if DOWNLOAD_USER_DATA:
if not os.path.exists(USER_DATA_PATH):
if ALL_USERS:
print("\nDownloading all users' data.")
# !rm -rf $USER_DATA_PATH
# !wget -q http://mhealth.md2k.org/images/datasets/cc_data.tar.bz2 && tar -xf cc_data.tar.bz2 -C $DATA_PATH && rm cc_data.tar.bz2
else:
print("\nDownloading a user's data.")
# !rm -rf $USER_DATA_PATH
# !wget -q http://mhealth.md2k.org/images/datasets/s2_data.tar.bz2 && tar -xf s2_data.tar.bz2 -C $DATA_PATH && rm s2_data.tar.bz2
else:
print("Data already exist. Please remove folder", USER_DATA_PATH, "if you want to download the data again")
# + [markdown] id="h37i3y4fKny_"
# ## Import packages
# + id="WwYy9gpCKnzA"
from cerebralcortex.util.helper_methods import get_study_names
from cerebralcortex.kernel import Kernel
# + [markdown] id="C2vSXtv4KnzC"
# ## List all the available studies
#
# Studies are stored on disk as a SQLite database `cc_kernel_database.db` for the metadata which is coupled with a directory structure `study=wesad` beginning with the study name. Typically, a user calls the `get_study_names` method to list out all the possible studies that Cerebral Cortex currently has access to.
# + colab={"base_uri": "https://localhost:8080/"} id="lLv48U46KnzC" executionInfo={"status": "ok", "timestamp": 1627422365830, "user_tz": 300, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="9c255679-68e5-418b-acc7-f8608c46f7fe"
get_study_names()
# + [markdown] id="4VCMHBC_KnzD"
# ## Create CerebralCortex object
# The __Kernel__ object is the main entry point to the Cerebral Cortex system. It is necessary to pass a configuration directory that tells it all the different parameters it needs to communicate with its other components. You can examine the details of these configurations for this server by looking at the files contained in the `cc_conf` folder.
# + id="RFwxXEQdKnzD"
CC = Kernel(cc_configs="default", study_name="wesad")
# + [markdown] id="xsTIYM9OKnzE"
# ## Getting help
# These are the typical ways to learn more about the code and objects within Cerebral Cortex.
# 1. Intelligent context help by typing the object or class into a cell followed by the period, `.`, then when you press `<tab>` a popup will appear showing additional information about the object or method. Uncomment the first line to try it out.
# 2. Formatting the commands with a question mark retrieves the documentation strings and examples when appropriate. `? CC.list_streams`
# 3. Reading the documentation on our site: https://cerebralcortex-kernel.readthedocs.io/en/latest/
# + id="hMKtxl_mKnzE"
# CC.list_streams?
# + [markdown] id="Z7tSgshuKnzF"
# ## List available streams in CC
# One of the first things a researcher typically wants to know is what data is available to explore. The kernel offers a couple of methods to facilitate this. The first, `list_streams`, is shown below and exposes all the available streams within the system.
# + colab={"base_uri": "https://localhost:8080/"} id="QRaooFQiKnzF" executionInfo={"status": "ok", "timestamp": 1627422366011, "user_tz": 300, "elapsed": 36, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="5f9136db-977b-427a-830b-be3b89ff394f"
CC.list_streams()
# + [markdown] id="7yaPu0TmKnzG"
# ### Search streams by name
# For larger deployments, the list of all streams may be too long to easily sort through, or you may be interested in a specific type of information. In this case, the second method `search_stream` would be more applicable. This search returns streams that have a substring match of the search parameter.
# + colab={"base_uri": "https://localhost:8080/"} id="MjKtR7CPKnzG" executionInfo={"status": "ok", "timestamp": 1627422366011, "user_tz": 300, "elapsed": 34, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="c7536d7b-1174-4326-e1ac-214fd55c9d7c"
results = CC.search_stream("acc")
for result in results:
print(result)
# + [markdown] id="i4Bhv7HeKnzG"
# ## Get stream data
# Once a stream is identified by name, it needs to be loaded into a `DataStream` object by calling `get_stream`. This pulls into a single object all the metadata associated with the stream as well as a reference to the data so that it can be accessed as needed.
# + id="DyKHTI6mKnzH"
wrist_accel = CC.get_stream("wesad.wrist.acc")
# + [markdown] id="PFYLVb2dKnzH"
# ## Print stream statistics
# The summary method displays some basic statistics about the datastream such as the number of points as well as max, mean, stdev, and min values. These statistics are shown for each column of data in the stream.
# + colab={"base_uri": "https://localhost:8080/"} id="qGp5tiDKKnzH" executionInfo={"status": "ok", "timestamp": 1627422378383, "user_tz": 300, "elapsed": 8353, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="26f78b4f-4bcd-4f36-f485-b96e264c038e"
wrist_accel.summary()
# + [markdown] id="WUsbhhrcKnzI"
# ## Print stream data
# Any datastream can be printed or visualized to the screen; however, it is important to limit, in this case to 3, the number of rows to show. Streams can contain millions to billions of samples depending on the size of the system and even for the case of a single individual wearing a motion-capture band, this number can exceed 30,000,000 samples for a short two week study. Cerebral Cortex defaults to settings that try to not load all the data unless needed.
#
# This example prints the first 3 rows of the loaded battery stream and it contains 5 columns.
# - __timestamp__: This is the time in UTC that the sample was recorded at
# - __localtime__: This is the time in the local timezone that the sample was recorded at
# - __battery_level__: This is the battery percentage of the smartphone device
# - __version__: This is the Cerebral Cortex version code assigned to this stream.
# - __user__: This is the specific UUID that identifies the user that owns this data point
# + colab={"base_uri": "https://localhost:8080/"} id="UBXzuiatKnzI" executionInfo={"status": "ok", "timestamp": 1627422379081, "user_tz": 300, "elapsed": 701, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="7df40727-3c05-4de3-b852-941537843500"
wrist_accel.show(3, truncate=False)
# + [markdown] id="DJtkE7xLKnzJ"
# ## Print stream metadata
# Each stream contains
#
# - __name__: The complete string name of this stream
# - __description__: A text description of this stream
# - __data_descriptor__: A list of objects that describe the data components of the stream (e.g. battery_level)
# - ...
# - __name__: data descriptor name
# - __type__: the object type (e.g. integer, float, string, ...)
# - __optional_fields__: any number of arbitrary fields can be added when creating a stream and will appear here
# - ...
# - __annotations__: Currently unused but designed to link streams together such as a **data quality** and the corresponding **raw** stream
# - __input_streams__: Currently unused but designed to specify which streams were utilized to generate this stream
# - __modules__: Metadata about the algorithm/code module the generated this data
# - __name__: The name of the code module
# - __version__: The version of the code module
# - __attributes__: Arbitrary attributes specified by _key-value_ pairs
# - __authors__: A set of author names and emails
#
# + colab={"base_uri": "https://localhost:8080/"} id="j5g9DJz5KnzJ" executionInfo={"status": "ok", "timestamp": 1627422379082, "user_tz": 300, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="ec939ec7-bfc2-495a-cc05-f86f47871c91"
metadata = wrist_accel.get_metadata()
print(metadata)
# + [markdown] id="zU4CG1MEKnzJ"
# ## Filter Data
#
# Cerebral Cortex returns all data associated with a stream name, which is great for performing operations and intial exploration; however, it allows for the filtering of these streams of data to isolate certain criterias such as value ranges or specific columns or users.
# + [markdown] id="ROF4QN-1KnzJ"
# ### Filter data by data column
# The first major filtering capability allows for named columns to have logical operations applied to them. The `filter` method is applicable to the data stream object and accepts three parameters.
# - column name: (e.g. battery_level)
# - operation: (e.g. >, <, ==, >=, ...)
# - criteria: (e.g. 97)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="0KdbvvI3KnzK" executionInfo={"status": "ok", "timestamp": 1627422379857, "user_tz": 300, "elapsed": 777, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="ca79eb18-33b2-4a16-97b0-ea30ee3d16d5"
filtered_data = wrist_accel.filter("x>62")
filtered_data.show(3,truncate=False)
# + [markdown] id="cmPSlEo-KnzK"
# ### Filter data by user
# User filtering is a special case due to the way Cerebral Cortex stores data and a dedicated method, `filter_user`, is provided which accepts a single `USER_ID` as input. This example illustrates filtering by the prior user id.
# + colab={"base_uri": "https://localhost:8080/"} id="1x3p8B6kKnzK" executionInfo={"status": "ok", "timestamp": 1627422380251, "user_tz": 300, "elapsed": 396, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="39c65185-1e0c-459e-b66a-e1366a90df42"
filtered_user_data = wrist_accel.filter_user("s2")
filtered_user_data.show(3,truncate=False)
# + [markdown] id="DgSFhNAbKnzK"
# ### Filter data by version
# Version filtering is a special case due to the way Cerebral Cortex stores data. A dedicated method, `filter_version`, is provided which accepts a single version as input.
# + colab={"base_uri": "https://localhost:8080/"} id="fLJmC3dpKnzL" executionInfo={"status": "ok", "timestamp": 1627422380677, "user_tz": 300, "elapsed": 429, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="86e58859-b205-4a6e-8372-f0d48a1dad9e"
filtered_version_data = wrist_accel.filter_version(1)
filtered_version_data.show(3,truncate=False)
# + [markdown] id="Sxvw8_kNKnzL"
# ## Convert datastream object into Pandas dataframe
# The data representations and visualizations that have been shown so far provide a way for basic data inspections; however, these are not directly suitable for more complex interactions or analysis. Cerebral Cortex provide a `to_pandas` method to transform the datastream data into a [Pandas](https://pandas.pydata.org/) dataframe object. From this point, anything that Pandas can do is supported.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 422} id="pI6DL4GzKnzL" executionInfo={"status": "ok", "timestamp": 1627422393935, "user_tz": 300, "elapsed": 13260, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="503fa666-d088-499e-9538-85c41575c006"
pdf = wrist_accel.toPandas()
pdf
# + [markdown] id="L9-1vmu9KnzL"
# ## Perform windowing operation on data
# Many times it is preferable to group the data into windows before applying an algorithm or computation to the data. The basic windowing function groups data into non-overlapping chunks and returns a data stream with each cell containing all the data associated with that particular window.
# + id="BDX6ZwQsKnzM"
windowed_data = wrist_accel.window(windowDuration=60)
# + [markdown] id="nIXkoQg0KnzM"
# ### Sliding windows
# Another common windowing technique can be accomplished by adding an `offset` parameter to the parameter list which causes the windows to move by a partial window size instead of the whole window.
# + id="VhYDBuPqKnzM"
windowed_data = wrist_accel.window(windowDuration=60, slideDuration=5)
# + [markdown] id="pab_P0z8KnzN"
# ## Compute some basic stats of windowed data
# Cerebral Cortex provides computationally efficient helper functions for generating basic statistics over the datastream. These functions include: _average, sqrt, sum, variance, stdev, min, max_
# + colab={"base_uri": "https://localhost:8080/"} id="Ojv2enUaKnzN" executionInfo={"status": "ok", "timestamp": 1627422406391, "user_tz": 300, "elapsed": 12459, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="9fe82c07-ed21-43c5-fb2b-14e04098585d"
from cerebralcortex.algorithms.stats.features import statistical_features
stats_features = statistical_features(windowed_data)
stats_features.show(4, False)
# + [markdown] id="uDNSZrJE-RxN"
# ## Basic Plot examples
# Visualization is a key part to gaining an understanding of the data and performing data analysis. Cerebral Cortex contains a set of basic plotting operations that can be used for timeseries based DataStream objects. You may pass `CC DataStream` object or `Pandas DataFrame` object to plot the data.
#
# These plots are interactive; try using your mouse to explore the data.
# + id="XeFuoL8CLqQQ"
from cerebralcortex.plotting.basic.plots import plot_timeseries, plot_histogram, plot_box
# + [markdown] id="sOMv5bT1-utg"
# ### Timeseries Line Plot
# + colab={"base_uri": "https://localhost:8080/", "height": 655, "output_embedded_package_id": "1F_9jK-wkz7B0TJuOaCn_dMheMJdeXpaF"} id="R6rdRDzO-sad" executionInfo={"status": "ok", "timestamp": 1627422430756, "user_tz": 300, "elapsed": 24367, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="b866b5c6-8ec7-41f3-b98b-d2ea1fc141bf"
plot_timeseries(pdf)
# + [markdown] id="6NUM5P1q-5cT"
# ### Histogram Plot
# + colab={"base_uri": "https://localhost:8080/", "height": 517, "output_embedded_package_id": "1aVCdysoX45BiVIJPVSy-eBwjIbG_oiGy"} id="iYYf8Lat--hj" executionInfo={"status": "ok", "timestamp": 1627422430757, "user_tz": 300, "elapsed": 25, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="bd51a8b6-7087-4882-b30f-4d22b762a578"
plot_histogram(pdf)
# + [markdown] id="wwdauF9D_HU0"
# ### Box Plot
# + colab={"base_uri": "https://localhost:8080/", "height": 517, "output_embedded_package_id": "18HFobzwlkOjz3ZNkRjGzcze9W6yIy4m6"} id="fBNM7J-t_IiY" executionInfo={"status": "ok", "timestamp": 1627422430757, "user_tz": 300, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh79SoXWdWnHQd17nkp3P-NbcrexOPu5iMhtyuaWg=s64", "userId": "06621486372133451647"}} outputId="0f7bebe7-0678-401a-a96b-5fece1812af0"
plot_box(pdf)
| examples/datastream_operation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b> <font size =5> Calculate Distance from Each Facility to Monitoring Locations - Facilities with 'High' Emissions </b></font>
# This iPython Notebook calculates the distance from each facility obtained from the National Emissions Inventory located in Oakland to the monitoring data points from the EDF dataset. Distances for facilities that were categorized as "high" emitting facilities are estimated here. Facilities of the same category located close to each other are combined into one single point based on their centroid to avoid multi-colinearity issues. Grouping of facilities was done in ArcGIS.
#
# <b> Two Input Files: </b>
#
# PM_Facilities_High_Centroid.csv
# NO2_Facilities_High_Centroid.csv
#
# +
#Import basic python packages for data analysis and plotting
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import descartes
import geopandas as gpd
from shapely.geometry import Point, Polygon
from shapely.ops import nearest_points
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
import math
import time
from matplotlib import cm
import matplotlib.lines as mlines
# %matplotlib inline
# -
# # <b> <font size = 5> Load Air Pollution Monitoring Data </b> </font>
df = pd.read_csv('EDF_Data.csv', header = 1)
df.tail()
# ## <b> <font size = 4> Split dataset into BC and NO2 </b> </font>
BC_df = df[['Longitude', 'Latitude', 'BC Value']]
NO2_df = df[['Longitude', 'Latitude', 'NO2 Value']]
# # <b> <font size = 5> Load Facility Level Data </b> </font>
Facility_PM = pd.read_csv("Data/PM_Facilities_High_Centroid.csv")
Facility_PM.head()
Facility_NO2 = pd.read_csv("Data/NO2_Facilities_High_Centroid.csv")
Facility_NO2.head()
# # <b> <font size = 5> Calculate distance from monitoring location to each facility for PM </b> </font>
# ## <b> <font size = 4> Create a new dataframe to store the distance values </b> </font>
#Make a copy of the Facility_PM dataframe
Facility_PM_All = Facility_PM.copy()
Facility_PM.head()
### Create a column as eis-source-type by combining the source_type and EIS ID for each facility
Facility_PM['eis-source'] = Facility_PM['source_typ'] + '-' + Facility_PM['EIS'].apply(str)
### Create a columns a eis-source-ems-type by combining emissions type with eis-source
Facility_PM['eis-source-ems'] = Facility_PM['ems_type'] + '-' + Facility_PM['eis-source']
Facility_PM.head(100)
### Add an empty column for distance
Facility_PM['dist'] = 0
Facility_PM['dist'].astype(float)
# **Next, create a new dataframe with a set of columns for each industrial facility. The set of columns for each industrial facility will include one column for latitude, longitude and PM2.5 emissions value. The PM10 emissions column is dropped since Black Carbon tracks better with PM2.5 emissions**
# Create individual dataframes containing the latitude, longitude and distance
Oak_PM_lat = Facility_PM[['eis-source-ems', 'Centroid_X']]
Oak_PM_long = Facility_PM[['eis-source-ems', 'Centroid_Y']]
Oak_PM_dist = Facility_PM[['eis-source-ems', 'dist']]
# Transpose all the dataframes
Oak_PM_lat = Oak_PM_lat.T
Oak_PM_long = Oak_PM_long.T
Oak_PM_dist = Oak_PM_dist.T
## Make the header as the first row in each transposed dataframe. The header will be the 'eis-source-ems' row
Oak_PM_lat = Oak_PM_lat.rename(columns=Oak_PM_lat.iloc[0]).drop(Oak_PM_lat.index[0])
Oak_PM_long = Oak_PM_long.rename(columns=Oak_PM_long.iloc[0]).drop(Oak_PM_long.index[0])
Oak_PM_dist = Oak_PM_dist.rename(columns=Oak_PM_dist.iloc[0]).drop(Oak_PM_dist.index[0])
## Add suffix to column header based on the dataframe type
Oak_PM_lat.columns = [str(col) + '_latitude' for col in Oak_PM_lat.columns]
Oak_PM_long.columns = [str(col) + '_longitude' for col in Oak_PM_long.columns]
Oak_PM_dist.columns = [str(col) + '_dist' for col in Oak_PM_dist.columns]
## Remove index for each dataframe
Oak_PM_lat.reset_index(drop=True, inplace=True)
Oak_PM_long.reset_index(drop=True, inplace=True)
Oak_PM_dist.reset_index(drop=True, inplace=True)
### Combine individual dataframes into one
Oak_PM_combined = Oak_PM_lat.join(Oak_PM_long).join(Oak_PM_dist)
### Sort based on column names
Oak_PM_combined = Oak_PM_combined.reindex(columns=sorted(Oak_PM_combined.columns))
Oak_PM_combined
#Create a datafram where each row contains emissions of PM2.5 for each facility
Oak_PM_combined = Oak_PM_combined.loc[Oak_PM_combined.index.repeat(21488)].reset_index(drop=True)
combined_BC_Facility = BC_df.join(Oak_PM_combined)
combined_BC_Facility.head()
# +
# Convert distance or emissions distance column to float type
for idx, col in enumerate(combined_BC_Facility.columns):
if "_dist" in col:
combined_BC_Facility[col] = pd.to_numeric(combined_BC_Facility[col], downcast="float")
# -
# ## <b> <font size = 4> Calculate distance between point of measurement and each facility and add it to the '_dist' column </b> </font>
### Defining a function to calculate the distance between two GPS coordinates (latitude and longitude)
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
# +
time1 = time.time()
for index, row in combined_BC_Facility.iterrows():
for idx, col in enumerate(combined_BC_Facility.columns):
if "_dist" in col:
combined_BC_Facility.at[index,col] = float(distance((row.iloc[1], row.iloc[0]), (row.iloc[idx+1], row.iloc[idx+2])))*0.621
#BC_Facility.at[index,col] = float(row.iloc[idx])
time2 = time.time()
print(time2 - time1)
# -
# ## <b> <font size = 4> Write to a CSV file </b> </font>
# ##### Write this to a dataframe
# combined_BC_Facility.to_csv("Data/BC_PM_Facilities_High_Dist.csv")
#
#
# # <b> <font size = 5> Calculate distance from monitoring location to each facility for NO2 </b> </font>
# ## <b> <font size = 4> Create a new dataframe to store the distance values </b> </font>
#Make a copy of the Facility_NO2 dataframe
Facility_NO2_All = Facility_NO2.copy()
Facility_NO2.head()
### Create a column as eis-source-type by combining the source_type and EIS ID for each facility
Facility_NO2['eis-source'] = Facility_NO2['source-typ'] + '-' + Facility_NO2['eis facili'].apply(str)
### Create a columns a eis-source-ems-type by combining emissions type with eis-source
Facility_NO2['eis-source-ems'] = Facility_NO2['ems-type'] + '-' + Facility_NO2['eis-source']
### Add an empty column for distance
Facility_NO2['dist'] = 0
Facility_NO2['dist'].astype(float)
Facility_NO2.head(100)
# **Next, create a new dataframe with a set of column for each industrial facility. The set of columns for each industrial facility will include one column for latitude, longitude and NO2 emissions value.**
# Create individual dataframes
Oak_NO2_lat = Facility_NO2[['eis-source-ems', 'Centroid_X']]
Oak_NO2_long = Facility_NO2[['eis-source-ems', 'Centroid_Y']]
Oak_NO2_dist = Facility_NO2[['eis-source-ems', 'dist']]
# Transpose all the dataframes
Oak_NO2_lat = Oak_NO2_lat.T
Oak_NO2_long = Oak_NO2_long.T
Oak_NO2_dist = Oak_NO2_dist.T
## Make the header as the first row in each transposed dataframe
Oak_NO2_lat = Oak_NO2_lat.rename(columns=Oak_NO2_lat.iloc[0]).drop(Oak_NO2_lat.index[0])
Oak_NO2_long = Oak_NO2_long.rename(columns=Oak_NO2_long.iloc[0]).drop(Oak_NO2_long.index[0])
Oak_NO2_dist = Oak_NO2_dist.rename(columns=Oak_NO2_dist.iloc[0]).drop(Oak_NO2_dist.index[0])
## Add suffix to column header based on the dataframe type
Oak_NO2_lat.columns = [str(col) + '_latitude' for col in Oak_NO2_lat.columns]
Oak_NO2_long.columns = [str(col) + '_longitude' for col in Oak_NO2_long.columns]
Oak_NO2_dist.columns = [str(col) + '_dist' for col in Oak_NO2_dist.columns]
## Remove index for each dataframe
Oak_NO2_lat.reset_index(drop=True, inplace=True)
Oak_NO2_long.reset_index(drop=True, inplace=True)
Oak_NO2_dist.reset_index(drop=True, inplace=True)
### Combine individual dataframes into one
Oak_NO2_combined = Oak_NO2_lat.join(Oak_NO2_long).join(Oak_NO2_dist)
### Sort based on column names
Oak_NO2_combined = Oak_NO2_combined.reindex(columns=sorted(Oak_NO2_combined.columns))
Oak_NO2_combined
#Create a datafram where each row contains emissions of PM2.5 for each facility
Oak_NO2_combined = Oak_NO2_combined.loc[Oak_NO2_combined.index.repeat(21488)].reset_index(drop=True)
combined_NO2_Facility = NO2_df.join(Oak_NO2_combined)
combined_NO2_Facility.head()
# +
# Convert distance or emissions distance column to float type
for idx, col in enumerate(combined_NO2_Facility.columns):
if "_dist" in col:
combined_NO2_Facility[col] = pd.to_numeric(combined_NO2_Facility[col], downcast="float")
# -
# ## <b> <font size = 4> Calculate distance between point of measurement and each facility and add it to the '_dist' column </b> </font>
# +
time1 = time.time()
for index, row in combined_NO2_Facility.iterrows():
for idx, col in enumerate(combined_NO2_Facility.columns):
if "_dist" in col:
combined_NO2_Facility.at[index,col] = float(distance((row.iloc[1], row.iloc[0]), (row.iloc[idx+1], row.iloc[idx+2])))*0.621
#BC_Facility.at[index,col] = float(row.iloc[idx])
time2 = time.time()
print(time2 - time1)
# -
# ## <b> <font size = 4> Write to a CSV file </b> </font>
# ##### Write this to a dataframe
# combined_NO2_Facility.to_csv("Data/NO2_NO2_Facilities_High_Dist.csv")
#
#
| Notebooks/Calculate-Distance-To-High-Facilities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ud_py_nd] *
# language: python
# name: conda-env-ud_py_nd-py
# ---
# +
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */Aaipnd-project-master/train_commonfns.py
#
# PROGRAMMER: <NAME>
# DATE CREATED: 01/01/2020
# REVISED DATE:
# PURPOSE: common support needed for train program
#
# AND
# Common functions. The functions are described later in this file
##
# Imports python modules
import argparse
# Define get_train_input_args function to return with parser.parse_args() parsed argument
# collection that you created with this function
#
#
def get_train_input_args():
"""
Retrieves and parses the 3 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to created and defined these 3 command line arguments. If
the user fails to provide some or all of the 3 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Training Image Folder as --data_dir with default value 'flowers'
2. CNN Model Architecture as --arch with default value 'vgg16'
3. Check point Save Directory as --save_dir with default value null and means current folder
4. Learning Rate as --learning_rate with default value 0.001
5. epoch as --epoch with default value 1
6. If to use GPU as --gpu. If proided, True. Default is False ( means cpu)
7. Hidden Units as -hidden_unit if nulll use [1000, 500].
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
Train a new network on a data set with train.py
Basic usage: python train.py data_directory
Prints out training loss, validation loss, and validation accuracy as the network trains
Options:
Set directory to save checkpoints: python train.py data_dir --save_dir save_directory
Choose architecture: python train.py data_dir --arch "vgg13"
Set hyperparameters: python train.py data_dir --learning_rate 0.01 --hidden_units 512 --epochs 20
Use GPU for training: python train.py data_dir --gpu
Example
train.py --data_dir flowers --arch vgg16 --learning_rate 0.001 --gpu cuda
"""
# Create Parse using ArgumentParser
chImagesParser = argparse.ArgumentParser()
chImagesParser.add_argument('--data_dir', type = str, default = 'flowers', help = 'Path to the folder of flower images')
chImagesParser.add_argument('--arch', type = str, default = 'vgg16', help = 'CNN Model Architecture')
chImagesParser.add_argument('--save_dir', type = str, default = '', help = 'The Checkpoint file folder to save the model')
chImagesParser.add_argument('--learning_rate', type = float, default = 0.001, help = 'The learning rate to be used for training the model')
chImagesParser.add_argument('--epoc', type = int, default = 1, help = 'The number of epocs to use for training')
chImagesParser.add_argument('--gpu', type = str, default = 'cpu', help = 'If to use CUDA or not. If not provided then use cpu. Even if GPU is given, if the system does not have a GPU, then cpu is used ')
return chImagesParser.parse_args()
def check_command_line_arguments(in_arg):
"""
Check if proper command lines are provided. If not, proper defaults are set.
See documentation on function "get_train_input_args" for the details of expected command lines
"""
if in_arg is None:
print("* Doesn't Check the Command Line Arguments because 'get_input_args' hasn't been defined.")
return False
else:
# prints command line agrs
print("Command Line Arguments:\n dir =", in_arg.dir,
"\n arch =", in_arg.arch, "\n dogfile =", in_arg.dogfile)
return True
# -
| train_commonfns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ts
# language: python
# name: ts
# ---
#hide
# %load_ext autoreload
# %autoreload 2
# +
# default_exp model
# default_cls_lvl 3
# +
#export
import pandas as pd
import numpy as np
import string
import lightgbm as lgb
from sklearn.model_selection import KFold
from task_substitution.data import *
# -
# # Model
#
# > LightGBM model that would help solve auxiliary task.
#hide
from nbdev.showdoc import *
# ### Model Class
#export
class Model:
def __init__(self, **model_kwargs):
self.params = model_kwargs
self.num_boost_round = self.params['num_boost_round']
# remove num estimators key from the model parameters
del self.params['num_boost_round']
def fit(self, X:pd.DataFrame, y:pd.Series):
ltrain = lgb.Dataset(X, y)
self.model = lgb.train(self.params, ltrain, self.num_boost_round)
return self.model
def cv(self, X:pd.DataFrame, y:pd.Series, perf_fn, **cv_params)->np.ndarray:
kf = KFold(**cv_params)
fold_perfs = []
for index, (itr, ite) in enumerate(kf.split(X)):
print(f'Fold: {index}')
Xtr, ytr = X.iloc[itr], y.iloc[itr]
Xval, yval = X.iloc[ite], y.iloc[ite]
ltrain = lgb.Dataset(Xtr, ytr)
model = lgb.train(self.params, ltrain, self.num_boost_round)
preds = model.predict(Xval)
fold_perf = perf_fn(yval, preds)
print(f'Performance: {fold_perf}')
fold_perfs.append(fold_perf)
print(f'Mean performance: {np.mean(fold_perfs)}, Std performance: {np.std(fold_perfs)}')
return np.array(fold_perfs)
def predict(self, Xtest)->np.ndarray:
preds = self.model.predict(Xtest)
return np.array(preds)
# ### Tests
SIZE = 10000
NUM_NANS = 500
example_df = pd.DataFrame({'c1': np.random.rand(SIZE, ),
'c2': [string.ascii_lowercase[np.random.randint(low=0, high=26)] for i in range(SIZE)],
'c3': np.random.permutation([np.nan] * NUM_NANS + list(np.random.rand(SIZE - NUM_NANS, )))
})
example_df.head()
# +
data = Dataset(example_df, target_fld='c3', cat_flds=['c2'], ignore_flds=None)
proc_example_df = data.preprocess()
train, test = Dataset.split_train_test_by_null(proc_example_df, target_fld='c3')
# +
from sklearn.metrics import mean_squared_error
params = {'num_boost_round': 100,
'objective': 'regression',
'num_leaves': 31,
'verbosity': -1,
'seed': 41
}
model = Model(**params)
y_train = train['c3']
X_train = train.drop('c3', axis=1)
X_valid = test.drop('c3', axis=1)
cv_params = {'n_splits': 5,
'shuffle': True,
'random_state': True
}
perf_fn = lambda tr, pe: np.sqrt(mean_squared_error(tr, pe))
model.cv(X_train, y_train, perf_fn, **cv_params)
# -
SIZE = 10000
NUM_NANS = 500
example_df = pd.DataFrame({'c1': np.random.rand(SIZE, ),
'c2': [string.ascii_lowercase[np.random.randint(low=0, high=26)] for i in range(SIZE)],
'c3': [string.ascii_lowercase[np.random.randint(low=0, high=26)] for i in range(SIZE)],
'c4': np.random.permutation([np.nan] * NUM_NANS + list(np.random.rand(SIZE - NUM_NANS, )))
})
example_df.head()
# +
data = Dataset(example_df, target_fld='c4', cat_flds=['c2', 'c3'], ignore_flds=None)
proc_example_df = data.preprocess()
train, test = Dataset.split_train_test_by_null(proc_example_df, target_fld='c4')
# +
params = {'num_boost_round': 100,
'objective': 'regression',
'num_leaves': 31,
'verbosity': -1,
'seed': 41
}
model = Model(**params)
y_train = train['c4']
X_train = train.drop('c4', axis=1)
X_valid = test.drop('c4', axis=1)
cv_params = {'n_splits': 5,
'shuffle': True,
'random_state': True
}
perf_fn = lambda tr, pe: np.sqrt(mean_squared_error(tr, pe))
model.cv(X_train, y_train, perf_fn, **cv_params)
| 02_model.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Hidden State Activation : Ungraded Lecture Notebook
#
# In this notebook you'll take another look at the hidden state activation function. It can be written in two different ways.
#
# I'll show you, step by step, how to implement each of them and then how to verify whether the results produced by each of them are same or not.
#
# ## Background
#
# 
#
#
# This is the hidden state activation function for a vanilla RNN.
#
# $h^{<t>}=g(W_{h}[h^{<t-1>},x^{<t>}] + b_h)$
#
# Which is another way of writing this:
#
# $h^{<t>}=g(W_{hh}h^{<t-1>} \oplus W_{hx}x^{<t>} + b_h)$
#
# Where
#
# - $W_{h}$ in the first formula is denotes the *horizontal* concatenation of $W_{hh}$ and $W_{hx}$ from the second formula.
#
# - $W_{h}$ in the first formula is then multiplied by $[h^{<t-1>},x^{<t>}]$, another concatenation of parameters from the second formula but this time in a different direction, i.e *vertical*!
#
# Let us see what this means computationally.
#
# ## Imports
# %%
import numpy as np
# %% [markdown]
# ## Joining (Concatenation)
#
# ### Weights
#
# A join along the vertical boundary is called a *horizontal concatenation* or *horizontal stack*.
#
# Visually, it looks like this:- $W_h = \left [ W_{hh} \ | \ W_{hx} \right ]$
#
# I'll show you two different ways to achieve this using numpy.
#
# __Note: The values used to populate the arrays, below, have been chosen to aid in visual illustration only. They are NOT what you'd expect to use building a model, which would typically be random variables instead.__
#
# * Try using random initializations for the weight arrays.
# %% tags=[]
# Create some dummy data
w_hh = np.full((3, 2), 1) # illustration purposes only, returns an array of size 3x2 filled with all 1s
w_hx = np.full((3, 3), 9) # illustration purposes only, returns an array of size 3x3 filled with all 9s
### START CODE HERE ###
# Try using some random initializations, though it will obfuscate the join. eg: uncomment these lines
w_hh = np.random.standard_normal((3,2))
w_hx = np.random.standard_normal((3,3))
### END CODE HERE ###
print("-- Data --\n")
print("w_hh :")
print(w_hh)
print("w_hh shape :", w_hh.shape, "\n")
print("w_hx :")
print(w_hx)
print("w_hx shape :", w_hx.shape, "\n")
# Joining the arrays
print("-- Joining --\n")
# Option 1: concatenate - horizontal
w_h1 = np.concatenate((w_hh, w_hx), axis=1)
print("option 1 : concatenate\n")
print("w_h :")
print(w_h1)
print("w_h shape :", w_h1.shape, "\n")
# Option 2: hstack
w_h2 = np.hstack((w_hh, w_hx))
print("option 2 : hstack\n")
print("w_h :")
print(w_h2)
print("w_h shape :", w_h2.shape)
# %% [markdown]
# ### Hidden State & Inputs
# Joining along a horizontal boundary is called a vertical concatenation or vertical stack. Visually it looks like this:
#
# $[h^{<t-1>},x^{<t>}] = \left[ \frac{h^{<t-1>}}{x^{<t>}} \right]$
#
#
# I'll show you two different ways to achieve this using numpy.
#
# *Try using random initializations for the hiddent state and input matrices.*
#
# %% tags=[]
# Create some more dummy data
h_t_prev = np.full((2, 1), 1) # illustration purposes only, returns an array of size 2x1 filled with all 1s
x_t = np.full((3, 1), 9) # illustration purposes only, returns an array of size 3x1 filled with all 9s
# Try using some random initializations, though it will obfuscate the join. eg: uncomment these lines
### START CODE HERE ###
h_t_prev = np.random.standard_normal((2,1))
x_t = np.random.standard_normal((3,1))
### END CODE HERE ###
print("-- Data --\n")
print("h_t_prev :")
print(h_t_prev)
print("h_t_prev shape :", h_t_prev.shape, "\n")
print("x_t :")
print(x_t)
print("x_t shape :", x_t.shape, "\n")
# Joining the arrays
print("-- Joining --\n")
# Option 1: concatenate - vertical
ax_1 = np.concatenate(
(h_t_prev, x_t), axis=0
) # note the difference in axis parameter vs earlier
print("option 1 : concatenate\n")
print("ax_1 :")
print(ax_1)
print("ax_1 shape :", ax_1.shape, "\n")
# Option 2: vstack
ax_2 = np.vstack((h_t_prev, x_t))
print("option 2 : vstack\n")
print("ax_2 :")
print(ax_2)
print("ax_2 shape :", ax_2.shape)
# %% [markdown]
# ## Verify Formulas
# Now you know how to do the concatenations, horizontal and vertical, lets verify if the two formulas produce the same result.
#
# __Formula 1:__ $h^{<t>}=g(W_{h}[h^{<t-1>},x^{<t>}] + b_h)$
#
# __Formula 2:__ $h^{<t>}=g(W_{hh}h^{<t-1>} \oplus W_{hx}x^{<t>} + b_h)$
#
#
# To prove:- __Formula 1__ $\Leftrightarrow$ __Formula 2__
#
# We will ignore the bias term $b_h$ and the activation function $g(\ )$ because the transformation will be identical for each formula. So what we really want to compare is the result of the following parameters inside each formula:
#
# $W_{h}[h^{<t-1>},x^{<t>}] \quad \Leftrightarrow \quad W_{hh}h^{<t-1>} \oplus W_{hx}x^{<t>} $
#
# We'll see how to do this using matrix multiplication combined with the data and techniques (stacking/concatenating) from above.
#
# * Try adding a sigmoid activation function and bias term to the checks for completeness.
#
# %% tags=[]
# Data
w_hh = np.full((3, 2), 1) # returns an array of size 3x2 filled with all 1s
w_hx = np.full((3, 3), 9) # returns an array of size 3x3 filled with all 9s
h_t_prev = np.full((2, 1), 1) # returns an array of size 2x1 filled with all 1s
x_t = np.full((3, 1), 9) # returns an array of size 3x1 filled with all 9s
# If you want to randomize the values, uncomment the next 4 lines
w_hh = np.random.standard_normal((3,2))
w_hx = np.random.standard_normal((3,3))
h_t_prev = np.random.standard_normal((2,1))
x_t = np.random.standard_normal((3,1))
# Results
print("-- Results --")
# Formula 1
stack_1 = np.hstack((w_hh, w_hx))
stack_2 = np.vstack((h_t_prev, x_t))
print("\nFormula 1")
print("Term1:\n",stack_1)
print("Term2:\n",stack_2)
formula_1 = np.matmul(np.hstack((w_hh, w_hx)), np.vstack((h_t_prev, x_t)))
print("Output:")
print(formula_1)
# Formula 2
mul_1 = np.matmul(w_hh, h_t_prev)
mul_2 = np.matmul(w_hx, x_t)
print("\nFormula 2")
print("Term1:\n",mul_1)
print("Term2:\n",mul_2)
formula_2 = np.matmul(w_hh, h_t_prev) + np.matmul(w_hx, x_t)
print("\nOutput:")
print(formula_2, "\n")
# Verification
# np.allclose - to check if two arrays are elementwise equal upto certain tolerance, here
# https://numpy.org/doc/stable/reference/generated/numpy.allclose.html
print("-- Verify --")
print("Results are the same :", np.allclose(formula_1, formula_2))
### START CODE HERE ###
# Try adding a sigmoid activation function and bias term as a final check
# Activation
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Bias and check
b = np.random.standard_normal((formula_1.shape[0],1))
print("Formula 1 Output:\n",sigmoid(formula_1+b))
print("Formula 2 Output:\n",sigmoid(formula_2+b))
all_close = np.allclose(sigmoid(formula_1+b), sigmoid(formula_2+b))
print("Results after activation are the same :",all_close)
### END CODE HERE ###
# %% [markdown]
# ## Summary
# That's it! We've verified that the two formulas produce the same results, and seen how to combine matrices vertically and horizontally to make that happen. We now have all the intuition needed to understand the math notation of RNNs.
| 3. Natural Language Processing with Sequence Models/Week 2 Recurrent Neural Networks for Language Modeling/Lab_1_Hidden State Activation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit (conda)
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/mrdbourke/pytorch-deep-learning/blob/v0/00_pytorch_fundamentals.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="jSNK7duj5SeU"
# # 00. PyTorch Fundamentals
#
# # TK - intro to PyTorch
#
# ## TK - What is PyTorch?
#
# ## TK - What can PyTorch be used for?
#
# ## TK - Who uses PyTorch?
#
# ## TK - Why use PyTorch?
#
# Researchers love using PyTorch -- https://paperswithcode.com/trends
#
# ## TK - What we're going to cover in this module...
#
# * Introduction to Tensors
# * Creating tensors
# * Getting information from tensors
# * Manipulating tensors
# * Tensor operations (neural networks involve manipulating tensors...)
# * Tensors and NumPy
# * Running tensors on GPU
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="1VxEOik46Y4i" outputId="8b131929-d8f9-4d1e-ea1b-f2cb0d5447a4"
import torch
print(torch.__version__)
# + [markdown] id="i-33BKR16iWc"
# ## TK - Introduction to tensors
#
# + [markdown] id="gFF0N2TU7S7Q"
# ### TK - Creating tensors
#
# All you need to know - https://pytorch.org/docs/stable/generated/torch.tensor.html#torch.tensor
#
# Many different ops for PyTorch tensors - https://pytorch.org/docs/stable/torch.html
# + colab={"base_uri": "https://localhost:8080/"} id="YUDgG2zk7Us5" outputId="b53cc251-6458-47a2-ae77-3006c045a2da"
# Scalar
scalar = torch.tensor(7)
scalar
# + colab={"base_uri": "https://localhost:8080/"} id="lV98Yz868bav" outputId="a571d544-d615-48ef-db49-80021dbe8efb"
scalar.ndim
# + colab={"base_uri": "https://localhost:8080/"} id="-k4cyKumPfbE" outputId="0756a55c-9b9b-4f52-a832-9abbafe689e0"
# Get the Python number within a tensor (only works with one-element tensors)
scalar.item()
# + colab={"base_uri": "https://localhost:8080/"} id="-IZF6ASs8QH9" outputId="43f9e4b0-e836-4b13-9ebd-94c689a12e86"
# Vector
vector = torch.tensor([7, 7])
vector
# + colab={"base_uri": "https://localhost:8080/"} id="03hm3VVv8kr4" outputId="c0b58040-8c82-49b3-a46f-02a80859f4dc"
vector.ndim
# + colab={"base_uri": "https://localhost:8080/"} id="D5iNwCYL8QO9" outputId="24e6d331-943c-42dc-e258-6a4780ee97f9"
# Matrix
matrix = torch.tensor([[7, 8],
[9, 10]])
matrix
# + colab={"base_uri": "https://localhost:8080/"} id="8LREUbeb8r8j" outputId="575a9f1c-d552-469f-9678-71c17f35794e"
matrix.ndim
# + colab={"base_uri": "https://localhost:8080/"} id="wEMDQr188QWW" outputId="0394c6bf-4dde-41e0-80ed-3b0d4ca7c85d"
# Tensor
tensor = torch.tensor([[[3, 6, 9],
[3, 6, 9],
[3, 6, 9]]])
tensor
# + colab={"base_uri": "https://localhost:8080/"} id="8dhuEsjS8QcT" outputId="2a08eb90-3625-4b03-b004-bc8f3556669b"
tensor.ndim
# + colab={"base_uri": "https://localhost:8080/"} id="xRnzgN2F9K0B" outputId="40a5889e-a82e-4037-d7fb-b7ceb0b1710e"
# Note: torch.tensor() always makes a copy
tensor_A = torch.tensor([1, 2, 3])
tensor_B = torch.tensor(tensor_A) # Creating a tensor like this produces a warning
tensor_A, tensor_B
# + colab={"base_uri": "https://localhost:8080/"} id="bK4B-xpT9op8" outputId="8d43c184-de2d-497a-8b93-8192053b31fd"
# Change tensor_B, doesn't change tensor_A
tensor_B[0] = 4
tensor_A, tensor_B
# + colab={"base_uri": "https://localhost:8080/"} id="GCbBkdKa96aO" outputId="5ba5a1eb-5a4e-438e-8271-6ca886738901"
# Change tensor_A, doesn't change tensor_B
tensor_A[1] = 4
tensor_A, tensor_B
# + colab={"base_uri": "https://localhost:8080/"} id="EOJEtDx--GnK" outputId="a717476d-4199-4ba9-859c-0fd788c456c5"
# Random
random_tensor = torch.rand(size=(3, 4))
random_tensor, random_tensor.dtype
# + colab={"base_uri": "https://localhost:8080/"} id="oCzhd0hl9Vp6" outputId="ffcbcc86-ef8d-4284-d6ca-c1c6df7c5bff"
# Zeros
zeros = torch.zeros(size=(3, 4))
zeros, zeros.dtype
# + colab={"base_uri": "https://localhost:8080/"} id="1IqUs81d9W4W" outputId="20c17956-9d58-4757-c786-cb089d2a9551"
# torch.arange(), torch.range() is deprecated
zero_to_ten = torch.arange(0, 10)
zero_to_ten
# + colab={"base_uri": "https://localhost:8080/"} id="ZvXwUut5BhHq" outputId="516baed2-790a-440a-92c0-c9201d5675e2"
# Can also create a tensor of zeros similar to another tensor
ten_zeros = torch.zeros_like(zero_to_ten) # will have same shape
ten_zeros
# + colab={"base_uri": "https://localhost:8080/"} id="q3MoGnpw9XaF" outputId="82963d1f-92ee-484b-cc1c-e4b7a4dda135"
# Default datatype for tensors is float32
float_32_tensor = torch.tensor([3.0, 6.0, 9.0],
dtype=None, # defaults to None, which is torch.float32 or whatever datatype is passed
device=None, # defaults to None, which uses the default tensor type
requires_grad=False) # if True, operations perfromed on the tensor are recorded
float_32_tensor.shape, float_32_tensor.dtype, float_32_tensor.device
# + colab={"base_uri": "https://localhost:8080/"} id="PKSuajld_09s" outputId="00a90334-bd19-4759-eea9-6f92ec278e92"
float_16_tensor = torch.tensor([3.0, 6.0, 9.0],
dtype=torch.float16)
float_16_tensor.dtype
# + [markdown] id="gUjkB2AX7Upz"
# ## TK - Getting information from tensors
# + colab={"base_uri": "https://localhost:8080/"} id="hd_X4D0j7Umq" outputId="368cde26-906b-4682-a401-e59d06e6952a"
some_tensor = torch.rand(3, 4)
print(some_tensor)
print(f"Shape of tensor: {some_tensor.shape}")
print(f"Datatype of tensor: {some_tensor.dtype}")
print(f"Device tensor is stored on: {some_tensor.device}") # will default to CPU
# + [markdown] id="BdiWvoAi7UjL"
# ## TK - Manipulating tensors (tensor operations)
# + [markdown] id="Sk_6Dd7L7Uce"
# ### Basic operations
# + colab={"base_uri": "https://localhost:8080/"} id="X71WpQoPD7a4" outputId="f7eab2bf-ba9b-4612-9cfa-e8d80093cc20"
tensor = torch.tensor([1, 2, 3])
tensor + 10
# + colab={"base_uri": "https://localhost:8080/"} id="Sp4TlTWWEFeO" outputId="db58e2e0-fe7e-4670-f61a-a6ff00f33250"
tensor * 10
# + colab={"base_uri": "https://localhost:8080/"} id="XuB1UjCIEJIA" outputId="063b6c17-8d2f-4627-8e7d-932af58b0cd5"
# Tensors don't change unless reassigned
tensor
# + colab={"base_uri": "https://localhost:8080/"} id="U4iWKoLsENry" outputId="c8751a66-fa34-40bd-db94-2a3c81ebc99a"
tensor = tensor - 10
tensor
# + colab={"base_uri": "https://localhost:8080/"} id="tFgZY-PaFNXa" outputId="160484d4-768b-4a30-f3a9-c5ae4ded8735"
tensor = tensor + 10
tensor
# + colab={"base_uri": "https://localhost:8080/"} id="uVysdk3kFWbY" outputId="0c5f56c5-869b-4a05-819c-4467772ee62f"
# Can also use torch functions
torch.multiply(tensor, 10)
# + colab={"base_uri": "https://localhost:8080/"} id="IxuPJIpNFbqO" outputId="8c6df0c4-dc15-4540-8dde-1c54a867b986"
# Original tensor is still unchanged
tensor
# + colab={"base_uri": "https://localhost:8080/"} id="S5v3RkR0F2Jq" outputId="9d7a6757-29c3-43e5-8389-f21907bcd89e"
# Element-wise multiplication
tensor * tensor
# + [markdown] id="TT5fVuyu7q5z"
# ### Matrix multiplcation
#
# Neural networks are mostly matrix multiplcation - https://marksaroufim.substack.com/p/working-class-deep-learner
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="ZE7loucmDlEM" outputId="49dd001e-f472-43c9-cead-8fdb3d380689"
import torch
tensor = torch.tensor([1, 2, 3])
tensor
# + colab={"base_uri": "https://localhost:8080/"} id="PvCBiiTTDk8y" outputId="6f4633ca-41af-48f7-a664-fd033a3633c1"
torch.matmul(tensor, tensor)
# + colab={"base_uri": "https://localhost:8080/"} id="m4E_pROBDk2r" outputId="80bdc547-e6c6-4047-d9f2-e88dc0a26f18"
tensor @ tensor
# + colab={"base_uri": "https://localhost:8080/"} id="2u4IZnFSFxKQ" outputId="e11ee2aa-f22e-4c0d-bb82-7ec83dfd1878"
sum(tensor * tensor.T)
# + colab={"base_uri": "https://localhost:8080/"} id="6qMSaLOoJscL" outputId="debdac67-169b-4217-f654-f9b0fdfc2c87"
# By hand (avoid doing operations with for loops at all cost, they are computationally expensive)
value = 0
for i in range(len(tensor)):
value += tensor[i] * tensor[i]
value
# + colab={"base_uri": "https://localhost:8080/", "height": 196} id="rN5RcoD4Jo6y" outputId="459e3ab8-1233-43ac-d097-aa7ad10d8f58"
# Shapes need to be in the right way (this will error)
tensor_A = torch.tensor([[1, 2],
[3, 4],
[5, 6]], dtype=torch.float32)
tensor_B = torch.tensor([[7, 8],
[9, 10],
[11, 12]], dtype=torch.float32)
torch.matmul(tensor_A, tensor_B)
# + colab={"base_uri": "https://localhost:8080/"} id="35rEIu-NKtVE" outputId="b1672c80-9b01-4f97-db34-74403b8a9f40"
torch.matmul(tensor_A, tensor_B.T)
# -
# torch.mm is a shortcut for matmul
torch.mm(tensor_A, tensor_B.T)
# + colab={"base_uri": "https://localhost:8080/"} id="mC_MjKW1LX7T" outputId="78a3c4f6-ddb3-4bf9-f25f-241f6b436192"
# This uses matrix mutliplcation...
linear = torch.nn.Linear(in_features=3, out_features=2)
input = tensor_A
output = linear(input.T)
output, output.shape
# + colab={"base_uri": "https://localhost:8080/"} id="qATzlPYsNgMD" outputId="ea5c7d99-213e-4308-a7e0-50d086ec3a28"
tensor_A.T.shape
# + [markdown] id="pjMmrJOOPv5e"
# ### TK - Finding the min, max, mean, sum, etc (aggregation)
#
# See here for more operations - https://pytorch.org/docs/stable/tensors.html
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="jrFQbe5fP1Rk" outputId="166a15a9-7f55-42f4-f51f-06f3b0f35c78"
x = torch.arange(0, 100, 10)
x
# + colab={"base_uri": "https://localhost:8080/"} id="e5wSP9YKP3Lb" outputId="68b7b5da-2fd4-49a9-9298-1e685a4b0257"
print(f"Minimum: {x.min()}")
print(f"Maximum: {x.max()}")
print(f"Mean: {x.type(torch.float32).mean()}") # won't work without float datatype
print(f"Sum: {x.sum()}")
# + colab={"base_uri": "https://localhost:8080/"} id="0Cr23Y9uP3HO" outputId="92dd559f-c8bf-4a15-c8b0-503a2303ad7c"
torch.max(x), torch.min(x), torch.mean(x.type(torch.float32)), torch.sum(x)
# + [markdown] id="i7ApCaZjDkvp"
# ### TK - Positional min/max
# + colab={"base_uri": "https://localhost:8080/"} id="FzNBl9JSGlHi" outputId="42b84dc7-15f2-4b19-8c4d-cb161343e52e"
# Returns index of max value
tensor = torch.arange(10, 100, 10)
tensor, tensor.argmax()
# + colab={"base_uri": "https://localhost:8080/"} id="QsnWtzWuOIPe" outputId="24d76bd3-1671-43b8-d6ad-768b830bdfaa"
# Returns index of min value
tensor = torch.arange(10, 100, 10)
tensor, tensor.argmin()
# + [markdown] id="QBu33WihOXBk"
# ### TK - Change tensor datatype
#
# See torch datatypes here - https://pytorch.org/docs/stable/tensors.html
# + colab={"base_uri": "https://localhost:8080/"} id="rY2FEsCAOaLu" outputId="fa6b90bf-60a4-4769-b6a9-b29c080a6df6"
tensor = torch.arange(10., 100., 10.)
tensor.dtype
# + colab={"base_uri": "https://localhost:8080/"} id="Cac8gRYjOeab" outputId="e160bb02-9961-4c34-9ef3-d0acfcd4ca4d"
tensor_float16 = tensor.type(torch.float16)
tensor_float16
# + colab={"base_uri": "https://localhost:8080/"} id="8Yqovld2Oj6s" outputId="f501e583-feed-447c-f8ee-9fe56bf6c2e5"
tensor_int32 = tensor.type(torch.int32)
tensor_int32
# + [markdown] id="7CkCtAYmGsHY"
# ### TK - Reshaping, stacking, squeezing and unsqueezing
#
# Why do any of these?
#
# Avoid for loops in your tensor code.
#
# For loops slow things down.
# + colab={"base_uri": "https://localhost:8080/"} id="EYjRTLOzG4Ev" outputId="207416f6-9862-4a20-c216-290f04c84408"
import torch
x = torch.arange(1., 8.)
x
# + colab={"base_uri": "https://localhost:8080/"} id="US4WjpQ3SG-8" outputId="44e3fb5c-2cbf-439e-e293-ddc7475a3660"
# Add an extra dimension
x_reshaped = x.reshape(1, 7)
x_reshaped, x_reshaped.shape
# -
# Change view (keeps same data as original but changes view)
# See more: https://stackoverflow.com/a/54507446/7900723
z = x.view(1, 7)
z, z.shape
# Changing z changes x
z[:, 0] = 5
z, x
# + colab={"base_uri": "https://localhost:8080/"} id="pX5Adf3ORiTK" outputId="60b3ae0d-b819-4765-9987-70a71ec4bc78"
# Stack tensors on top of each other
x_stacked = torch.stack([x, x, x, x], dim=0)
x_stacked
# + colab={"base_uri": "https://localhost:8080/"} id="w2Y2HEoDRxJZ" outputId="7fdf9ae2-250f-463c-9b93-38246e992fbf"
# Remove extra dimension from x_reshaped
x_squeezed = x_reshaped.squeeze()
x_squeezed, x_squeezed.shape
# + colab={"base_uri": "https://localhost:8080/"} id="CUC-DEEwSYv7" outputId="adc36fde-58ce-472f-af8f-2e6362f0a360"
# Add an extra dimension with unsqueeze
x_unsqueezed = x_squeezed.unsqueeze(dim=0)
x_unsqueezed, x_unsqueezed.shape
# -
# ## TK - Indexing (selecting data from tensors)
import torch
tensor = torch.arange(0, 9, 1).view(1, 3, 3)
tensor
tensor[0]
tensor[:, 0]
tensor[:, :, 1]
tensor[:, 1, 1]
tensor[:, :, 0]
# ## TK - Changing values
#
# Use `scatter()` to change values for things like one-hot encoding.
#
# https://pytorch.org/docs/stable/generated/torch.Tensor.scatter_.html#torch.Tensor.scatter_
#
# +
# TODO - add in example of scatter
# + [markdown] id="h8ZaW0Bq7rCm"
# ## TK - PyTorch tensors & NumPy
# + colab={"base_uri": "https://localhost:8080/"} id="yDrDCnvY7rKS" outputId="0344f8be-632b-40d9-9d48-afdeebf1b804"
# NumPy array to tensor
import numpy as np
array = np.arange(1, 8)
tensor = torch.from_numpy(array)
array, tensor
# + colab={"base_uri": "https://localhost:8080/"} id="ovwl7VCREv8L" outputId="5c91968e-a9f9-4434-d950-a22a78d5a12e"
# Change the array, keep the tensor
array = array + 1
array, tensor
# + colab={"base_uri": "https://localhost:8080/"} id="xw_7ZyVaTKxQ" outputId="340fc9a4-2a46-48af-fd9e-1bc1d6d6c7e5"
# Tensor to NumPy array
tensor = torch.ones(7)
numpy_tensor = tensor.numpy()
tensor, numpy_tensor
# + colab={"base_uri": "https://localhost:8080/"} id="mMp6ZSkET4_Y" outputId="a9d5e467-b35e-4aca-b1d7-3fc7034ebd20"
# Change the tensor, keep the array the same
tensor = tensor + 1
tensor, numpy_tensor
# + [markdown] id="7gU3ubCrUkI-"
# ## TK - Reproducibility
#
# * Create tensors with same values...
# * Show PyTorch random seed so we can all use similar values
#
# See: https://pytorch.org/docs/stable/notes/randomness.html
# +
# No seed
import torch
random_tensor_A = torch.rand(3, 4)
random_tensor_B = torch.rand(3, 4)
random_tensor_A == random_tensor_B
# +
# With seed
import torch
# Set the random seed
torch.random.manual_seed(42)
random_tensor_C = torch.rand(3, 4)
# Have to reset the seed every time a new rand() is called
torch.random.manual_seed(42) # Without this, tensor_D would be different to tensor_C
random_tesnor_D = torch.rand(3, 4)
random_tensor_C == random_tesnor_D
# + [markdown] id="hxIIM7t27rQ-"
# ## TK - Running tensors on GPUs
#
# The cuda module is what you'll want:
#
# * https://pytorch.org/docs/stable/cuda.html
# * Best practices for running device-agnostic PyTorch - https://pytorch.org/docs/master/notes/cuda.html#device-agnostic-code
# + colab={"base_uri": "https://localhost:8080/"} id="OweDLgwjEvZ2" outputId="959a6350-9eee-4a02-d66c-1a870f9f1d9b"
# Check for GPU
import torch
torch.cuda.is_available()
# + id="j92HBCKB7rYa"
# Set device type
device = "cuda" if torch.cuda.is_available() else "cpu"
device
# -
# Count number of devices
torch.cuda.device_count()
# + [markdown] id="XqQLcuj68OA-"
# You can put tensors on the GPU using [`to()`](https://pytorch.org/docs/stable/generated/torch.Tensor.to.html).
#
# GPUs offer far faster numerical computing than CPUs do.
#
# > **Note:** Putting a tensor on GPU using `to(device)` (e.g. `some_tensor.to(device)`) returns a copy of that tensor, e.g. the same tensor will be on CPU and GPU. To overwrite tensors, reassign them:
# >
# > `some_tensor = some_tensor.to(device)`
# + id="FhI3srFXEHfP"
# Create tensor (default on CPU)
tensor = torch.tensor([1, 2, 3])
# Tensor not on GPU
print(tensor, tensor.device)
# Move tensor to GPU
tensor_on_gpu = tensor.to(device)
tensor_on_gpu
# -
# What if we wanted to move the tensor back to CPU?
#
# For example, you'll want to do this if you want to interact with your tensors with NumPy.
#
# Let's try using the `numpy()` method on our `tensor_on_gpu`.
# If tensor is on GPU, can't transform it to NumPy (this will error)
tensor_on_gpu.numpy()
# Instead, to get a tensor back to CPU and usable with NumPy we can use [`Tensor.cpu()`](https://pytorch.org/docs/stable/generated/torch.Tensor.cpu.html).
#
# This copies the tensor to CPU memory so it's usable with CPUs.
# Instead, copy the tensor back to cpu
tensor_back_on_cpu = tensor_on_gpu.cpu().numpy()
tensor_back_on_cpu
# The above returns a copy of the GPU tensor in CPU memory so the original tensor is still on GPU.
tensor_on_gpu
# + [markdown] id="_QTHdTruUUy4"
# UPTOHERE:
# * add in scatter (for changing values)
# * go back through headings and make sure cells are complete
#
| 00_pytorch_fundamentals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # K-Nearest Neighbors Classification Demo
#
# K-nearest neighbors classification uses the labels of neighborhoods around data samples to classify unseen data samples.
# The model can take array-like objects, either in host as NumPy arrays or in device (as Numba or cuda_array_interface-compliant), as well as cuDF DataFrames as the input.
#
# For information on converting your dataset to cuDF format, refer to the cuDF documentation: https://rapidsai.github.io/projects/cudf/en/0.11.0/
#
# For additional information on cuML's Nearest Neighbors implementation: https://rapidsai.github.io/projects/cuml/en/0.11.0/api.html#cuml.dask.decomposition.TruncatedSVD
# +
import os
import numpy as np
from sklearn.datasets import make_blobs
import pandas as pd
import cudf as gd
from sklearn.neighbors import KNeighborsClassifier as skKNC
from cuml.neighbors import KNeighborsClassifier as cumlKNC
# -
# ## Define Parameters
# +
n_samples = 2**17
n_features = 40
n_query = 5000
n_neighbors = 4
# -
# ## Generate Data
#
# ### Host
# +
# %%time
X_host_train, y_host_train = make_blobs(
n_samples=n_samples, n_features=n_features, centers=5, random_state=0)
X_host_train = pd.DataFrame(X_host_train)
y_host_train = pd.DataFrame(y_host_train)
# +
# %%time
X_host_test, y_host_test = make_blobs(
n_samples=n_query, n_features=n_features, centers=5, random_state=0)
X_host_test = pd.DataFrame(X_host_test)
y_host_test = pd.DataFrame(y_host_test)
# -
# ### Device
X_device_train = gd.DataFrame.from_pandas(X_host_train)
y_device_train = gd.DataFrame.from_pandas(y_host_train)
X_device_test = gd.DataFrame.from_pandas(X_host_test)
y_device_test = gd.DataFrame.from_pandas(y_host_test)
# ## Scikit-learn Model
# +
# %%time
knn_sk = skKNC(algorithm="brute", n_neighbors=n_neighbors, n_jobs=-1)
knn_sk.fit(X_host_train, y_host_train)
sk_result = knn_sk.predict(X_host_test)
# -
# ## cuML Model
# +
# %%time
knn_cuml = cumlKNC(n_neighbors=n_neighbors)
knn_cuml.fit(X_device_train, y_device_train)
cuml_result = knn_cuml.predict(X_device_test)
# -
# ## Compare Results
passed = np.array_equal(np.asarray(cuml_result.as_gpu_matrix())[:,0], sk_result)
print('compare knn: cuml vs sklearn classes %s'%('equal'if passed else 'NOT equal'))
| cuml/kneighbors_classifier_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # RedGrease Demos
#
# Quick demonstration of how to create and run Redis Gears functions, using RedGrease.
#
# ## [Demos](#Demos):
# 1. [The Basics](#1.-The-Basics)
# 2. [Complex Query](#2.-Complex-Query)
# 3. [Transaction Stream Processing](#3.-Transaction-Stream-Processing)
# 4. [Custom Command](#4.-Custom-Command)
#
# + [markdown] tags=[]
# # Preparations
# Before running the demos, make sure that the prerequisites are met and that the preparation steps have successfully been executed.
# Some preparation steps, particularly the downloads, may take quite some time.
# -
# ## 1. Prerequisites
# - Python3.7
# - Pip
# - Docker
# - Jupyter
#
# Run the cell below tho validate your prerequisites.
# +
# Run cell to test your environment requirements
import sys
import re
pyver = !{sys.executable} --version # type: ignore
pipver = !{sys.executable} -m pip --version # type: ignore
# dockver = !docker --version # type: ignore
if not re.match("Python 3.7", pyver[0]):
raise SystemExit(f"This demo only supports Python 3.7. You are running {pyver[0]}.")
if not re.match(".*\(python 3.7\)", pipver[0]):
raise SystemExit("Please install Pip for yout Python 3.7 environment.")
if not re.match("Docker version", dockver[0]):
raise SystemExit("Please install Docker")
print("Requirements all look good!")
# -
# ## 2. Python Requirements
#
# Install the Python packages required for the demo:
#
# - `redgrease[client]` - The RedGrease client library for Redis Gears. This is what is being demonstrated.
#
# - `ipywidgets` - Jupyter notebook exetension, for displaying widgets, e.g. buttons, in this notebook.
# - `requests` - For downloading content.
#
# Run the cell below to install the requirements.
# # %%capture reqs_install_output
# !{sys.executable} -m pip install redgrease[client] ipywidgets requests
# !jupyter nbextension enable --py widgetsnbextension
# ## 3. Download Datasets
# Some of the demos requiere a portion of the [COCO Dataset](https://cocodataset.org) to be uploaded into the Redis Gears Cluster.
# The COCO Dataset (Common Objects in Context) is a fairly large set of (~247,000) images and corresponding annotations of what tey are depicting.
#
# ### Example:
# <img src="coco_example.jpg" > [COCO Example](coco_example.jpg)
#
# ```
# a man riding a snowboard down a ski slope.
# a snowboarder sailing down a snowy hillside on a mountain.
# a man is snowboarding past blue markers on a mountain.
# a man on a snowboard in the snow.
# a man snow boarding in the snow on a slope.
# ```
#
#
# For the demo we will only pre-download the annotations (json), not the images (jpeg), but it is still between 250 - 500 MB of data, depending on which portions you choose.
#
# There are two annotation packages to choose from.
# - **COCO Train/Cal 2014** - Annotations for 124,000 images (241 MB)
# - **COCO Train/Val 2017** - Annotations for 123,000 images (241 MB)
#
# Either or both may be used.
# Run the cell below and select using the buttons which dataset(s) to download.
# +
# This code is just for preparation of the demo.
# It is NOT part of the demo itself
#
# Download COCO Annotations
# Run the cell, then:
# - Validate or modify the Download directory
# - Click the button, or buttons for the annotations to download
import ipywidgets as widgets
import os
import requests
coco_annotations_url = "http://images.cocodataset.org/annotations"
annotations_file_pattern = "annotations_trainval{}.zip"
layout = widgets.Layout(width="30%")
output = widgets.Output()
def get_download_path():
download_dir = "."
if os.name == 'nt':
import winreg
sub_key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
downloads_guid = '{374DE290-123F-4565-9164-39C4925E467B}'
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, sub_key) as key:
download_dir = winreg.QueryValueEx(key, downloads_guid)[0]
else:
download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
return os.path.join(download_dir, "COCO")
download_location = widgets.Text(
value=get_download_path(),
placeholder="Download directory",
description="Directory to download annotations to.",
layout=layout,
)
display(download_location)
def dl_state(button, downloading=None):
year = button.value
annotations_file_name = annotations_file_pattern.format(year)
destination = os.path.join(download_location.value, annotations_file_name)
is_downloaded = os.path.isfile(destination)
button.disabled = is_downloaded or downloading is not None
if downloading:
button.description=f"Downloading COCO {year} annotations (241 MB): {downloading}%. Please wait!"
elif is_downloaded:
button.description=f"Congrats! COCO {year} annotataions is downloaded!"
else:
button.description=f"Download COCO {year} annotations (241 MB)"
return is_downloaded, annotations_file_name, destination
def download_button_pressed(btn):
downloaded, file_name, destination = dl_state(btn)
if downloaded:
return
if not os.path.isdir(download_location.value):
os.mkdir(download_location.value)
try:
response = requests.get(
f"{coco_annotations_url}/{file_name}",
stream=True
)
total_length = response.headers.get('content-length')
with open(destination, "wb") as f:
if total_length is None: # no content length header
dl_state(btn, "???")
f.write(response.content)
return
total_length = int(total_length)
dl = 0
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
dl_state(btn, int(100*(dl/total_length)))
except Exception:
try:
os.remove(destination)
except Exception:
pass
finally:
dl_state(btn)
for year in ["2014", "2017"]:
download_button = widgets.Button(
tooltip='Start download of selected datasets into the selected download directory.',
layout=layout
)
download_button.value = year
dl_state(download_button)
download_button.on_click(download_button_pressed)
display(download_button)
display(output)
# -
# ## 4. Download and run Redis Gears Cluster Docker image
# Run the cell below to download a Redis Gears Cluster Docker image (~605 MB), if not already present, and run it.
# +
redis_gears_cluster_image = "redislabs/rgcluster:1.2.1"
redis_gears_cluster_container_name = "demo_gears_cluster"
redis_gears_single_image = "redislabs/redisgears:1.2.1"
redis_gears_single_container_name = "demo_gears_single"
# Get the correct Redis Gears Images
# !docker pull {redis_gears_single_image}
# !docker pull {redis_gears_cluster_image}
# Check if the single container is already running.
# container_info = !docker container inspect {redis_gears_single_container_name}
if container_info[0] == "[]":
print("Starting Redis Gears single instance")
# !docker run --name {redis_gears_single_container_name} --rm -d -p 6379:6379 {redis_gears_single_image}
# Check if the cluster container is already running.
# container_info = !docker container inspect {redis_gears_cluster_container_name}
if container_info[0] == "[]":
print("Starting Redis Gears cluster instance")
# !docker run --name {redis_gears_cluster_container_name} --rm -d -p 30001:30001 -p 30002:30002 -p 30003:30003 {redis_gears_cluster_image}
print("Redis Gears containers are running!")
# -
# ## 5. Load Annotation Data into Redis cluster
# By running the cell below, the COCO annotations downloaded above will be loaded into the Redis Cluster.
# +
import glob
import itertools
import json
import os
import re
import redgrease
import zipfile
annotation_archive_files = os.path.join(download_location.value, "annotations_trainval*.zip")
annotation_archives = glob.glob(annotation_archive_files)
if not annotation_archives:
print("no archives")
raise SystemExit("Please download either or both COCO annotations as per instructions above.")
r = redgrease.RedisGears(host="localhost", port=30001)
annotation_json_pattern = re.compile("annotations/(\w+)_([a-zA-Z]+)([0-9]+).json")
annotation_types = ["instances"] #, "person_keypoints", "captions"]
years = ["2014", "2017"]
purpose = ["val", "train"]
output = widgets.Output()
progress = widgets.Text("", layout=layout)
def load_annotation_info(base_key, info):
annotation_info_key = f"{base_key}/info"
r.hset(annotation_info_key, mapping=info)
return annotation_info_key
def load_license_info(base_key, license):
license_key = f"{base_key}/license/{license['id']}"
if not r.exists(license_key):
r.hset(license_key, mapping=license)
return license_key
def load_image_info(base_key, image_info):
img_info_key = f"{base_key}/image/{image_info['id']}/info"
if not r.exists(img_info_key):
r.hset(img_info_key, mapping=image_info)
return img_info_key
def load_keypoint_names(base_key, keypoints):
keypoints_key = f"{base_key}/keypoints"
r.lpush(keypoints_key, *keypoints)
return keypoints_key
def load_list_of_str(base_key, sequence):
list_key = f"{base_key}/skeleton"
r.lpush(list_key, *map(str, sequence))
return list_key
def load_category(base_key, category):
category_key = f"{base_key}/category/{category['id']}"
if "keypoints" in category:
category["keypoints"] = load_keypoint_names(category_key, category["keypoints"])
if "skeleton" in category:
category["skeleton"] = load_list_of_str(category_key, category["skeleton"])
r.hset(category_key, mapping=category)
return category_key
def load_segmentation(base_key, segmentation):
segmentation_key = f"{base_key}/segmentation"
if not r.exists(segmentation_key):
for i, segment in enumerate(segmentation):
segment_key = f"{segmentation_key}/{i}"
r.lpush(segment_key, *segment)
r.rpush(segmentation_key, segment_key)
return segmentation_key
def load_annotation(base_key, annotation):
annotation_key = f"{base_key}/image/{annotation['image_id']}/annotation/{annotation['id']}"
if not r.exists(annotation_key):
if "segmentation" in annotation:
# Replace the 'segmentation' list-of-lists, with a key with a list of keys, that in turn point to the inner lists :)
annotation["segmentation"] = load_segmentation(annotation_key, annotation["segmentation"])
if "bbox" in annotation:
# Replace the 'bbox' with a string reepresentaton.load_segmentation
annotation["bbox"] = str(annotation["bbox"])
if "keypoints" in annotation:
annotation["keypoints"] = load_list_of_str(annotation_key, annotation["keypoints"])
r.hset(annotation_key, mapping=annotation)
return annotation_key
def load_annotation_jsons_from_zip(zip_file):
with zipfile.ZipFile(zip_file) as archive:
for file_name in archive.namelist():
is_annotation_file = annotation_json_pattern.match(file_name)
if not is_annotation_file:
continue
annotation_type = is_annotation_file.group(1)
dataset_purpose = is_annotation_file.group(2)
dataset_year = is_annotation_file.group(3)
if not annotation_type in annotation_types:
continue
if not dataset_purpose in purpose:
continue
if not dataset_year in years:
continue
with archive.open(file_name) as json_file:
contents = json.load(json_file)
base_key = f"/dataset/coco/{dataset_year}"
info_key = f"{base_key}/general/{annotation_type}/{dataset_purpose}"
# info
if "info" in contents:
progress.value = f"Loading info for {dataset_purpose} {dataset_year} {annotation_type}"
load_annotation_info(info_key, contents["info"])
# licenses
if "licenses" in contents:
progress.value = f"Loading licenses for {dataset_purpose} {dataset_year} {annotation_type}"
for lic in contents["licenses"]:
load_license_info(info_key, lic)
# images
if "images" in contents:
progress.value = f"Loading images for {dataset_purpose} {dataset_year} {annotation_type}"
for image_info in contents["images"]:
load_image_info(base_key, image_info)
# annotations
if "annotations" in contents:
progress.value = f"Loading annotations for {dataset_purpose} {dataset_year} {annotation_type}"
for annotation in contents["annotations"]:
load_annotation(base_key, annotation)
# categories (for "instances" and "person_keypoints")
if "categories" in contents:
progress.value = f"Loading categories for {dataset_purpose} {dataset_year} {annotation_type}"
for category in contents["categories"]:
load_category(base_key, category)
display(progress)
for archive in annotation_archives:
progress.value = f"Unzipping {archive}"
load_annotation_jsons_from_zip(archive)
progress.value = "Done!"
# -
# # Demos
# This is the actual Demo section. Everything above is just preparations.
#
# 1. [The Basics](#1.-The-Basics)
# 2. [Complex Query](#2.-Complex-Query)
# 3. [Transaction Stream Processing](#3.-Transaction-Stream-Processing)
# 4. [Custom Command](#4.-Custom-Command)
#
# <a id="demm-basics"></a>
# ## 1. The Basics
# Showcasing some of the basic features and commands of the redgrease package.
# Instantiation of client / connection to Redis Gears engines
# +
import redgrease
import redgrease.utils
from IPython.display import Image
from IPython.core.display import HTML
# Create connection / client for single instance Redis
single = redgrease.RedisGears()
# Create connection / client for Redis cluster
cluster = redgrease.RedisGears(port=30001)
# Create using existing Redis Connection
import redis
r = redis.Redis()
gears = redgrease.Gears(r)
print(f"single: {single.ping()}")
print(f"cluster:\n{cluster.ping()}")
print(f"gears: {gears.pystats()}")
# -
# Redis v6 commands are accessible
# +
a = single.flushall()
b = single.set("Foo", 21)
c = single.hset("Bar", mapping={"spam":"eggs", "meaning":8})
d = single.hincrby("Bar", "meaning", 34)
e = single.xadd("clogs::0", {"msg":"START", "from":0, "to":0, "amount":0})
a, b, c, d, e
# -
# Gears-specific commands can be accessed through the `gears` property.
#
# Examples:
# +
cluster_pystats = cluster.gears.pystats()
print(f"Cluster Redis - Python Stats:\n{cluster_pystats}\n")
# +
cluster_info = cluster.gears.infocluster()
print(f"Cluster Redis - Cluster Info:\n{cluster_info}\n")
print(f"Number of shards: {len(cluster_info.shards)}")
# +
cluster_refreshed = cluster.gears.refreshcluster()
print(f"Cluster Redis - Cluster Refresh Response:\n{cluster_refreshed}\n")
# -
# Gear functions can be invoked as strings
# +
#> Iterate through all Redis key-value records, and return all record data.
all_records_gear = single.gears.pyexecute("GearsBuilder().run()")
print("Single-node Redis - All-records gear:")
for result in all_records_gear:
print(f" {result}")
# +
#> Iterate through all Redis key-value records, and return just the key and type
key_type_gear = single.gears.pyexecute(
"GearsBuilder().map(lambda record:(record['key'], record['type'])).run()"
)
print("Single-node Redis - Key-types gear:")
for result in key_type_gear:
print(f" {result}")
# +
#> Count the total number of keys / records
single_record_count = single.gears.pyexecute("GearsBuilder().count().run()")
print(f"Single-node Redis - Record count: {int(single_record_count)}")
# -
# ### GearFunction objects
# RedGrease allows for cunstruction of GearFuntion objects instread of function strings.
# +
### Programatic / dynamic definition of Gears functions
record_count = redgrease.KeysOnlyReader().count().run()
cluster_record_count = cluster.gears.pyexecute(record_count)
print(f"Cluster Redis - Total records: {cluster_record_count}")
# -
# Open RedGrease GearFunction objects can be composed and reused
# +
#>
images = redgrease.KeysReader("/dataset/coco/*/image/*/info").values()
image_count = images.count()
square_images = images.filter(lambda img: img['height'] == img['width'])
some_square_image_urls = (
square_images
.collect()
.limit(4)
.map(lambda record: record['coco_url'])
)
type(images), type(image_count), type(square_images), type(some_square_image_urls)
# -
# You can dynamically create parameterized open RedGrease GearFunctions in normal functions
#> Normal functions can create paramenterized Gear Functions
def instance_annotations(year="*"):
return redgrease.KeysReader(
f"/dataset/coco/{year}/image/*/annotation/*"
).values(type="hash")
# Gears Functions can be exectuted in a number of different ways
# +
### The "textbook" way
img_cnt = cluster.gears.pyexecute(image_count.run())
print(f"Total number of images: {img_cnt}\n")
# +
### As an open function, i.e. without a closing 'run' or 'register' (Run is inferred)
annotation_cnt = cluster.gears.pyexecute(instance_annotations().count())
print(f"Total number of annotations: {annotation_cnt}\n")
# +
#> Directly in the closing `run` or `register` operation, using the `on` argument
img_urls = some_square_image_urls.run(on=cluster)
print(f"Some square images")
for img_url in img_urls:
display(Image(url=img_url))
# -
# <a id="demo-query"></a>
# ## 2. Complex Query
#
# Let's construct a query GearFunction that can take a number of annotation category names and for each an optional min and max count,
# and then finds images that fit those constraint.
# Firstly, notice how annotations are stored
a1 = cluster.hgetall("/dataset/coco/2017/image/22222/annotation/2027787")
a2 = cluster.hgetall("/dataset/coco/2017/image/22222/annotation/1727529")
a1, a2
# Lets create some lookup tables for the annotation categories and their IDs
# +
#> Merging dicttionaries
def dict_merge(d1, d2):
return {**d1, **d2}
# Lookup from category name to category id
category_id_lookup = (
redgrease.KeysReader("/dataset/*/category/*")
.values(type="hash")
.map(lambda annotation: {annotation['name']:annotation['id']})
.aggregate({},dict_merge, dict_merge)
.run(on=cluster)
)
# Lookup from category id to category name
category_name_lookup = {cat_id:cat_name for cat_name, cat_id in category_id_lookup.items() }
print(f"Number of categories: {len(category_id_lookup)}")
print(f"Errors: {category_id_lookup.errors}")
print()
print(f"Lookup id by name:\n{category_id_lookup}")
# -
# Collect the number of annotations of each category per image
# +
### for each annotation we add one the accumulator for the image, under the category of the annotation
def accumulate_categories(image_id, accumulator, annotation):
if 'category_id' in annotation:
annotation_category_id = annotation['category_id']
accumulator[annotation_category_id] = accumulator.get(annotation_category_id, 0) + 1
return accumulator
# add the previousls accumlated counts from eace shard to a global ccumulator for the image
def accumulate_category_counts(image_id, accumulator, category_count):
for category, count in category_count.items():
accumulator[category] = accumulator.get(category, 0) + count
return accumulator
# Just renaming fields so it's clearer what they contain
def format_img_stats(img_stats):
return {
'image_id': img_stats['key'],
'instances': img_stats['value']
}
# GearFunction that counts the number of annotations of each category in each image
category_count_by_image = instance_annotations(2017).aggregateby(
extractor = lambda annotation : annotation.get('image_id', -1), # Group the annotatioms by image_id
zero = {}, # For each group we use a dict to accumulate the counts of each category of annotaion
seqOp = accumulate_categories, # Accumulate/reduce the category counts locally on each shard
combOp = accumulate_category_counts # Accumulate/reduce the local results globally
).map(format_img_stats)
# Run the GearFunction, but limit to 10 results per shard (for sanity)
cats_by_img = category_count_by_image.limit(10).run(on=cluster)
# -
#>
print(f"Number of results: {len(cats_by_img)}")
print(f"Errors: {cats_by_img.errors}")
print()
for img in cats_by_img:
inst = { category_name_lookup.get(cid, cid):cnt for cid, cnt in img['instances'].items() }
print(f"Image #{img['image_id']} instances: {inst}")
print("...")
print("and so on")
# +
### Querying
def contstrain(constraints):
# return a predicate for instance counts where the count of a set of categories
# Constraints is a dict from category name to a tuple of (min_count, max_count)
id_constraints = { category_id_lookup[cat_name]:x for cat_name, x in constraints.items()}
def predicate(record):
instances = record['instances']
# iterate through each of the constraints, to check if any fails
for cat_id, constraint in id_constraints.items():
min_count, max_count = constraint
if cat_id not in instances:
if min_count is not ... and min_count > 0:
return False
continue
if min_count is not ... and instances[cat_id] < min_count:
return False
if max_count is not ... and instances[cat_id] > max_count:
return False
return True
return predicate
# Our query params is a dict of category name to a min and max count tuple (... meaninig any)
query_params = {
'truck': (1, 2),
'banana': (5, ...),
'person': (1, 1),
'bottle': (..., 0),
}
query = category_count_by_image.filter(contstrain(query_params))
query_result = query.limit(30).run(on=cluster)
query_result
# -
#> Show the resulting images
for image in query_result:
image_url = cluster.hget(
f"/dataset/coco/2017/image/{image['image_id']}/info",
"coco_url",
)
display(Image(url=image_url.decode()))
# <a id="demo-stream"></a>
# ## 3. Transaction Stream Processing
# +
import random
import datetime
user_count = 5
min_start_balance = 100
max_start_balance = 1000
# Create some 'user' accounts with some existing balance
for user_id in range(user_count):
start_balance = random.randint(min_start_balance, max_start_balance)
single.hset(
f"/user/{user_id}",
mapping={
"id": user_id,
"balance": start_balance,
"start_balance": start_balance,
}
)
# Helper function for sending transaction requests.
def attempt_random_transaction(channel, max_amount=100, message="This is a random transaction",):
single.xadd(
f"transactions:{channel}",
{
"msg": message,
"from": random.randint(0, user_count-1),
"to": random.randint(0, user_count-1),
"amount": random.randint(1, max_amount),
}
)
# Print a summary balance sheet for all users
def balance_sheet():
sum_balance = 0
for user_id in range(user_count):
current_balance, start_balance = map(
int,
single.hmget(f"/user/{user_id}", "balance", "start_balance")
)
print(f"User {user_id} balance: {current_balance} ({current_balance-start_balance})")
sum_balance += current_balance
print("----------------------------")
print(f"Total balance : {sum_balance}")
return sum_balance
start_total_balance = balance_sheet()
# +
# Transform a key-space event to a transaction
def initialize_transaction(event):
transaction = event['value']
transaction['timestamp'] = datetime.datetime.utcnow().isoformat()
transaction['channel'] = event['key']
transaction['id'] = event['id']
transaction['status'] = "pending"
return transaction
# Handle the transaction safely
def handle_transaction(transaction):
# Log the transaction event to the Redis engine log
redgrease.log(f"Procesing transaction {transaction['id']}: {transaction}")
sender = transaction['from']
recipient = transaction['to']
# Perform a sequence of commands atomically
with redgrease.atomic():
# Check if the 'sender' has sufficient balance
sender_balance = redgrease.cmd.hget(
f"/user/{sender}",
"balance"
)
amount = int(transaction.get('amount', 0))
if not sender_balance or amount > int(sender_balance):
# If balance is not sufficient, the transaction is marked as failed.
transaction['status'] = f"FAILED: Missing {int(sender_balance)-amount}"
else:
# If there is sufficient balance,
# remove the amount from sender and add it to the recipient
# and mark as successful
redgrease.cmd.hincrby(
f"/user/{sender}",
"balance",
-amount
)
redgrease.cmd.hincrby(
f"/user/{recipient}",
"balance",
amount
)
transaction['status'] = "successful"
# If successful, add the transaction to the statement of the recipient
redgrease.cmd.xadd(f"/user/{recipient}/statement", transaction)
# Regardless of status, add the transaction to the statement of the sender
redgrease.cmd.xadd(f"/user/{sender}/statement", transaction)
redgrease.log(f"Done processing transaction {transaction['id']}: {transaction['status']}")
return transaction
# Transaction processing pipeline
transsaction_pipe = (
redgrease.StreamReader() # Listen to streams
.map(initialize_transaction) # Map stream events to a 'transaction' dict, and adds default.
.map(handle_transaction) # Execute the transaction
.register(prefix="transactions:*", batch=10, duration=30) # Listen to transaction stream and use batching
)
# Register the processing pipeline
transsaction_pipe.on(single)
# -
for registration in single.gears.dumpregistrations():
print(
f"Registered Gear function {registration.id} has been "
f"triggered {registration.RegistrationData.numTriggered} times."
)
attempt_random_transaction("sample")
balance_sheet()
# + tags=[]
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
# Run a bunch of transactions in parallell
parallell_transaction_job_count = 100
sequential_transactions_count = 100
max_transaction_amount = 500
def sequential_transactions(channel="foo"):
def attempt_transactions():
for transaction_id in range(sequential_transactions_count):
attempt_random_transaction(
channel,
max_amount=max_transaction_amount,
message=f"This is a transaction #{transaction_id} on channel {channel}",
)
return attempt_transactions
def run_in_parallell(jobs):
with ThreadPoolExecutor() as worker:
tasks = [worker.submit(job) for job in jobs]
run_in_parallell(
[sequential_transactions(nm) for nm in range(parallell_transaction_job_count)]
)
# + tags=[]
end_total_balance = balance_sheet()
print(f"Total difference: {start_total_balance - end_total_balance}")
print()
for registration in single.gears.dumpregistrations():
print(
f"Registered Gear function {registration.id} has been "
f"triggered {registration.RegistrationData.numTriggered} times."
)
# -
statement = single.xrange("/user/3/statement","-", "+", 5)
statement
# ### Cleanup
# +
# Unregister all registrations
for reg in single.gears.dumpregistrations():
single.gears.unregister(reg.id)
# Remove all executions
for exe in single.gears.dumpexecutions():
single.gears.dropexecution(str(exe.executionId))
# Clear all keys
single.flushall()
# Check that there are no keys
single.keys()
# -
# <a id="demo-command"></a>
# ## 4. Custom Command
#
# A simple image cache
# +
import requests
def cache_get(url):
if redgrease.cmd.exists(url):
return bytes(redgrease.cmd.get(url))
response = requests.get(url)
if response.status_code != 200:
return bytes()
response_data = bytes(response.content)
redgrease.cmd.set(url, response_data)
return response_data
get_image = (
redgrease.CommandReader()
.map(lambda trigger: trigger[1])
.map(cache_get, requirements=["requests"])
.register(trigger="cache_get", on=single, convertToStr=False)
)
# +
# %%time
image_urls_1 = [
"http://images.cocodataset.org/train2017/000000246070.jpg",
"http://images.cocodataset.org/train2017/000000167133.jpg",
"http://images.cocodataset.org/train2017/000000559366.jpg",
"http://images.cocodataset.org/train2017/000000156242.jpg",
"http://images.cocodataset.org/train2017/000000169188.jpg",
"http://images.cocodataset.org/train2017/000000135016.jpg",
"http://images.cocodataset.org/train2017/000000248334.jpg",
"http://images.cocodataset.org/train2017/000000445906.jpg",
"http://images.cocodataset.org/train2017/000000318733.jpg",
"http://images.cocodataset.org/train2017/000000316672.jpg",
]
for image_url in image_urls_1:
image_data = single.gears.trigger("cache_get", image_url)
display(Image(data=image_data.value))
for registration in single.gears.dumpregistrations():
print(
f"Registered Gear function {registration.id} has been "
f"triggered {registration.RegistrationData.numTriggered} times."
)
# -
# ### An even shorter version
# Only the command function with a function decorator
@redgrease.trigger(on=single, convertToStr=False, requirements=["requests"], replace=True)
def cache_get(url):
if redgrease.cmd.exists(url):
return bytes(redgrease.cmd.get(url))
response = requests.get(url)
if response.status_code != 200:
return bytes()
response_data = bytes(response.content)
redgrease.cmd.set(url, response_data)
return response_data
# +
# %%time
image_urls_2 = [
"http://images.cocodataset.org/train2017/000000483381.jpg",
"http://images.cocodataset.org/train2017/000000237137.jpg",
"http://images.cocodataset.org/train2017/000000017267.jpg",
"http://images.cocodataset.org/train2017/000000197756.jpg",
"http://images.cocodataset.org/train2017/000000451278.jpg",
"http://images.cocodataset.org/train2017/000000193332.jpg",
"http://images.cocodataset.org/train2017/000000475564.jpg",
"http://images.cocodataset.org/train2017/000000247368.jpg",
]
for image_url in image_urls_2:
image_data = cache_get(image_url)
display(Image(data=image_data.value))
for registration in single.gears.dumpregistrations():
print(
f"Registered Gear function {registration.id} has been "
f"triggered {registration.RegistrationData.numTriggered} times."
)
# -
| examples/Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys ; sys.path.append('../')
import torchdyn; from torchdyn.models import *; from torchdyn.datasets import *
import torch ; import torch.utils.data as data
import pytorch_lightning as pl
from src import *
import matplotlib.pyplot as plt
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# -
# In this notebook, we train a `ffjord` model using the `torchdyn` library
# +
dataset = ToyDataset()
n_samples = 1 << 8
n_gaussians = 5
def target_sample(n_samples):
X, _ = dataset.generate(n_samples // n_gaussians, 'gaussians', n_gaussians=n_gaussians, std_gaussians=0.5, radius=4, dim=2)
X = (X - X.mean())/X.std()
return X
x = target_sample(n_samples)
plt.figure(figsize=(5, 5))
plt.scatter(x[:,0], x[:,1], c='black', alpha=0.2, s=1.)
# -
# ### Define ffjord
# +
hdim = 128
f = nn.Sequential(
nn.Linear(2, hdim),
nn.Softplus(),
nn.Linear(hdim, hdim),
nn.Softplus(),
nn.Linear(hdim, hdim),
nn.Softplus(),
nn.Linear(hdim, hdim),
nn.Tanh(),
nn.Linear(hdim, 2))
cnf = CNF(f, trace_estimator=autograd_trace)
nde = NeuralDE(cnf, solver='dopri5', s_span=torch.linspace(1, 0, 2), sensitivity='adjoint', atol=1e-5, rtol=1e-5)
model = nn.Sequential(Augmenter(augment_idx=1, augment_dims=1), nde).to(device)
# -
from torch.distributions import MultivariateNormal, Uniform, TransformedDistribution, SigmoidTransform, Categorical
prior = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
# ### Define Learner
# +
# dummy trainloader
trainloader = data.DataLoader(data.TensorDataset(torch.Tensor(1).to(device), torch.Tensor(1).to(device)), batch_size=1024, shuffle=True)
# learner
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module):
super().__init__()
self.model = model
self.iters = 0
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
self.iters += 1
xT = target_sample(1024).to(device)
x0_trJ = self.model(xT)
logprob = prior.log_prob(x0_trJ[:,1:]).to(xT) - x0_trJ[:,0]
reg_loss = 0.01*torch.norm(self.model[1].defunc.m.net(x0_trJ[:,1:]), p=2, dim=1).mean()
loss = -torch.mean(logprob)
nde.nfe = 0
if not self.iters%100:
plot(model=self.model, target=xT, prior=prior, step=self.iters)
self.model = self.model.to(device)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters(), lr=1e-3, weight_decay=1e-6)
def train_dataloader(self):
return trainloader
# -
# ### Train model
learn = Learner(model)
trainer = pl.Trainer(max_epochs=3000, gradient_clip_val=0.5)
trainer.fit(learn);
plot(model=model, target=x, prior=prior, step=0, show=True)
model = model.to(device)
torch.save(model.state_dict(), '../pretrained_models/ffjord_gaussians')
| hypersolver/density_estimation/train_ffjord.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `TensorFlow`/`Keras`
#
# [Keras](https://keras.io/) is a high-level neural networks API, written in Python and capable of running on top of [TensorFlow](https://www.tensorflow.org/), [CNTK](https://docs.microsoft.com/de-de/cognitive-toolkit/), or [Theano](http://www.deeplearning.net/software/theano/). It was developed with a focus on enabling fast experimentation. *Being able to go from idea to result with the least possible delay is key to doing good research.*
#
# **Note 1:** This is not an introduction to deep neural networks as this would explode the scope of this notebook. But we want to show you how you can implement a convoluted neural network to classify neuroimages, in our case fMRI images.
# **Note 2:** We want to thank [<NAME>](https://github.com/akeshavan) as a lot of the content in this notebook is coming from here [introduction notebook](http://nbviewer.jupyter.org/github/brainhack101/IntroDL/blob/master/IntroToKeras.ipynb) about Keras.
# ## Setup
from nilearn import plotting
# %matplotlib inline
import numpy as np
import nibabel as nb
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
# ## Load machine learning dataset
#
# Let's again load the dataset we prepared in the machine learning preparation notebook, plus the anatomical template image (we will need this for visualization).
anat = nb.load('/home/neuro/workshop/notebooks/data/templates/MNI152_T1_1mm.nii.gz')
func = nb.load('/home/neuro/workshop/notebooks/data/dataset_ML.nii.gz')
from nilearn.image import mean_img
from nilearn.plotting import plot_anat
plot_anat(mean_img(func), cmap='magma', colorbar=False, display_mode='x', vmax=2, annotate=False,
cut_coords=range(0, 49, 12), title='Mean value of machine learning dataset');
# As a reminder, the shape of our machine learning dataset is as follows:
# # Specifying labels and chunks
#
# As in the `nilearn` and `PyMVPA` notebook, we need some chunks and label variables to train the neural network. The labels are important so that we can predict what we want to classify. And the chunks are just an easy way to make sure that the training and test dataset are split in an equal/balanced way.
#
# So, as before, we specify again which volumes of the dataset were recorded during eyes **closed** resting state and which ones were recorded during eyes **open** resting state recording.
#
# From the [Machine Learning Preparation](machine_learning_preparation.ipynb) notebook, we know that we have a total of 384 volumes in our `dataset_ML.nii.gz` file and that it's always 4 volumes of the condition `eyes closed`, followed by 4 volumes of the condition `eyes open`, etc. Therefore our labels should be as follows:
labels = np.ravel([[['closed'] * 4, ['open'] * 4] for i in range(48)])
labels[:20]
# ***Second***, the `chunks` variable should not switch between subjects. So, as before, we can again specify 6 chunks of 64 volumes (8 subjects), each:
chunks = np.ravel([[i] * 64 for i in range(6)])
chunks[:150]
# # Keras - 2D Example
#
# Convoluted neural networks are very powerful (as you will see), but the computation power to train the models can be incredibly demanding. For this reason, it's sometimes recommended to try to reduce the input space if possible.
#
# In our case, we could try to not train the neural network only on one very thin slab (a few slices) of the brain. So, instead of taking the data matrix of the whole brain, we just take 2 slices in the region that we think is most likely to be predictive for the question at hand.
#
# We know (or suspect) that the regions with the most predictive power are probably somewhere around the eyes and in the visual cortex. So let's try to specify a few slices that cover those regions.
#
# So, let's try to just take a few slices around the eyes:
plot_anat(mean_img(func).slicer[...,5:-25], cmap='magma', colorbar=False,
display_mode='x', vmax=2, annotate=False, cut_coords=range(0, 49, 12),
title='Slab of the machine learning mean image');
# Hmm... That doesn't seem to work. We want to cover the eyes and the visual cortex. Like this, we're too far down in the back of the head (at the Cerebellum). One solution to this is to rotate the volume.
#
# So let's do that:
# +
# Rotation parameters
phi = 0.35
cos = np.cos(phi)
sin = np.sin(phi)
# Compute rotation matrix around x-axis
rotation_affine = np.array([[1, 0, 0, 0],
[0, cos, -sin, 0],
[0, sin, cos, 0],
[0, 0, 0, 1]])
new_affine = rotation_affine.dot(func.affine)
# -
# Rotate and resample image to new orientation
from nilearn.image import resample_img
new_img = nb.Nifti1Image(func.get_fdata(), new_affine)
img_rot = resample_img(new_img, func.affine, interpolation='continuous')
# Delete zero-only rows and columns
from nilearn.image import crop_img
img_crop = crop_img(img_rot)
# Let's check if the rotation worked.
plot_anat(mean_img(img_crop), cmap='magma', colorbar=False, display_mode='x', vmax=2, annotate=False,
cut_coords=range(-20, 30, 12), title='Rotated machine learning dataset');
# Perfect! And which slab should we take? Let's try the slices 12, 13 and 14.
from nilearn.plotting import plot_stat_map
img_slab = img_crop.slicer[..., 12:15, :]
plot_stat_map(mean_img(img_slab), cmap='magma', bg_img=mean_img(img_crop), colorbar=False,
display_mode='x', vmax=2, annotate=False, cut_coords=range(-20, 30, 12),
title='Slab of rotated machine learning dataset');
# Perfect, the slab seems to contain exactly what we want. Now that the data is ready we can continue with the actual machine learning part.
# ## Split data into a training and testing set
#
# First things first, we need to define a training and testing set. This is *really* important because we need to make sure that our model can generalize to new, unseen data. Here, we randomly shuffle our data, and reserve 80% of it for our training data, and the remaining 20% for testing.
#
# So let's first get the data in the right structure for keras. For this, we need to swap some of the dimensions of our data matrix.
data = np.rollaxis(img_slab.get_fdata(), 3, 0)
data.shape
# As you can see, the goal is to have in the first dimension, the different volumes, and then the volume itself. Keep in mind, that the last dimension (here of size 2), are considered as `channels` in the keras model that we will be using below.
# **Note:** To make this notebook reproducible, i.e. always leading to the "same" results. Let's set a seed point for the random split of the dataset. This should only be done for teaching purposes, but not for real research as randomness and chance are a crucial part of machine learning.
from numpy.random import seed
seed(0)
# As a next step, let's create a index list that we can use to split the data and labels into training and test sets:
# +
# Create list of indices and shuffle them
N = data.shape[0]
indices = np.arange(N)
np.random.shuffle(indices)
# Cut the dataset at 80% to create the training and test set
N_80p = int(0.8 * N)
indices_train = indices[:N_80p]
indices_test = indices[N_80p:]
# Split the data into training and test sets
X_train = data[indices_train, ...]
X_test = data[indices_test, ...]
print(X_train.shape, X_test.shape)
# -
# ## Create outcome variable
#
# We need to define a variable that holds the outcome variable (1 or 0) that indicates whether or not the resting-state images were recorded with eyes opened or closed. Luckily we have this information already stored in the `labels` variable above. So let's split these labels in training and test set:
y_train = labels[indices_train] == 'open'
y_test = labels[indices_test] == 'open'
# ## Data Scaling
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
scaler = StandardScaler()
pca = PCA()
tsne = TSNE()
X_scaled = scaler.fit_transform(X_train.reshape(len(X_train), -1))
X_pca = pca.fit_transform(X_scaled)
plt.plot(pca.explained_variance_ratio_.cumsum())
y_train
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_train, cmap='bwr')
X_tsne = tsne.fit_transform(X_pca)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=y_train, cmap='bwr')
mean = X_train.mean(axis=0)
mean.shape
std = X_train.std(axis=0)
std.shape
plt.hist(np.ravel(std), bins=100);
plt.vlines(0.05, 0, 1000, colors='red')
std[std<0.05] = 0
plt.hist(np.ravel(mean), bins=100);
plt.vlines(0.25, 0, 1000, colors='red')
mean[mean<0.05] = 0
mask = (mean*std)!=0
X_zscore_tr = (X_train-mean)/std
X_zscore_te = (X_test-mean)/std
X_zscore_tr.shape
X_zscore_tr[np.isnan(X_zscore_tr)]=0
X_zscore_te[np.isnan(X_zscore_te)]=0
X_zscore_tr[np.isinf(X_zscore_tr)]=0
X_zscore_te[np.isinf(X_zscore_te)]=0
# And now we're good to go.
# ## Creating a Sequential Model
#
# Now come the fun and tricky part. We need to specify the structure of our convoluted neural network. As a quick reminder, a convoluted neural network consists of some convolution layers, pooling layers, some flattening layers and some full connect layers:
#
# <img src="data/deep_neural_networks.png"/>
#
# Taken from: https://www.mathworks.com/videos/introduction-to-deep-learning-what-are-convolutional-neural-networks--1489512765771.html
# So as a first step, let's import all modules that we need to create the keras model:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AvgPool2D, BatchNormalization
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.optimizers import Adam, SGD
# As a next step, we should specify some of the model parameters that we want to be identical throughout the model:
# +
# Get shape of input data
data_shape = tuple(X_train.shape[1:])
# Specify shape of convolution kernel
kernel_size = (3, 3)
# Specify number of output categories
n_classes = 2
# -
# Now comes the big part... the model, i.e. the structure of the neural network! We want to make clear that we're no experts in deep neural networks and therefore, the model below might not necessarily be a good model. But we chose it as it can be rather quickly estimated and has rather few parameters to estimate.
# +
# Specify number of filters per layer
filters = 32
model = Sequential()
model.add(Conv2D(filters, kernel_size, activation='relu', input_shape=data_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D())
filters *= 2
model.add(Conv2D(filters, kernel_size, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D())
filters *= 2
model.add(Conv2D(filters, kernel_size, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D())
filters *= 2
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(1024, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam', # swap out for sgd
metrics=['accuracy'])
model.summary()
# -
# That's what our model looks like! Cool!
# ## Fitting the Model
#
# The next step is now, of course, to fit our model to the training data. In our case we have two parameters that we can work with:
#
# *First*: How many iterations of the model fitting should be computed
nEpochs = 125 # Increase this value for better results (i.e., more training)
# *Second*: How many elements (volumes) should be considered at once for the updating of the weights?
batch_size = 32 # Increasing this value might speed up fitting
# So let's test the model:
# %time fit = model.fit(X_zscore_tr, y_train, epochs=nEpochs, batch_size=batch_size, validation_split=0.2)
# ## Performance during model fitting
#
# Let's take a look at the loss and accuracy values during the different epochs:
fig = plt.figure(figsize=(10, 4))
epoch = np.arange(nEpochs) + 1
fontsize = 16
plt.plot(epoch, fit.history['accuracy'], marker="o", linewidth=2,
color="steelblue", label="accuracy")
plt.plot(epoch, fit.history['val_accuracy'], marker="o", linewidth=2,
color="orange", label="val_accuracy")
plt.xlabel('epoch', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.legend(frameon=False, fontsize=16);
fig = plt.figure(figsize=(10, 4))
epoch = np.arange(nEpochs) + 1
fontsize = 16
plt.plot(epoch, fit.history['loss'], marker="o", linewidth=2,
color="steelblue", label="loss")
plt.plot(epoch, fit.history['val_loss'], marker="o", linewidth=2,
color="orange", label="val_loss")
plt.xlabel('epoch', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.legend(frameon=False, fontsize=16);
# Great, it seems that accuracy is constantly increasing and the loss is continuing to drop. But how well is our model doing on the test data?
# ## Evaluating the model
evaluation = model.evaluate(X_zscore_te, y_test)
print('Loss in Test set: %.02f' % (evaluation[0]))
print('Accuracy in Test set: %.02f' % (evaluation[1] * 100))
# # Confusion Matrix
y_pred = np.argmax(model.predict(X_zscore_te), axis=1)
y_pred
y_true = y_test * 1
y_true
from sklearn.metrics import confusion_matrix
import pandas as pd
class_labels = ['closed', 'open']
cm = pd.DataFrame(confusion_matrix(y_true, y_pred), index=class_labels, columns=class_labels)
sns.heatmap(cm/cm.sum(axis=1), square=True, annot=True);
# ## Analyze prediction values
# What are the predicted values of the test set?
y_pred = model.predict(X_zscore_te)
y_pred[:10,:]
# As you can see, those values can be between 0 and 1.
fig = plt.figure(figsize=(6, 4))
fontsize = 16
plt.hist(y_pred[:,0], bins=16, label='eyes closed')
plt.hist(y_pred[:,1], bins=16, label='eyes open');
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.legend(frameon=False, fontsize=16);
# The more both distributions are distributed around chance level, the weaker your model is.
# **Note:** Keep in mind that we trained the whole model only on one split of test and training data. Ideally, you would repeat this process many times so that your results become less dependent on what kind of split you did.
# ## Visualizing Hidden Layers
#
# Finally, as a cool additional feature: We can now visualize the individual filters of the hidden layers. So let's get to it:
# Aggregate the layers
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# +
from tensorflow.keras import backend as K
# Specify a function that visualized the layers
def show_activation(layer_name):
layer_output = layer_dict[layer_name].output
fn = K.function([model.input], [layer_output])
inp = X_train[0:1]
this_hidden = fn([inp])[0]
# plot the activations, 8 filters per row
plt.figure(figsize=(16,8))
nFilters = this_hidden.shape[-1]
nColumn = 8 if nFilters >= 8 else nFilters
for i in range(nFilters):
plt.subplot(nFilters / nColumn, nColumn, i+1)
plt.imshow(this_hidden[0,:,:,i], cmap='magma', interpolation='nearest')
plt.axis('off')
return
# -
# Now we can plot the filters of the hidden layers:
show_activation('conv2d')
show_activation('conv2d_1')
show_activation('conv2d_2')
# ## Conclusion of 2D example
#
# The classification of the training set gets incredibly high, while the validation set also reaches a reasonable accuracy level above 80. Nonetheless, by only investigating a slab of our fMRI dataset, we might have missed out on some important additional parameters.
#
# An alternative solution might be to use 3D convoluted neural networks. But keep in mind that they will have even more parameters and probably take much longer to fit the model to the training data. Having said so, let's get to it.
| notebooks/05c_machine_learning_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Spyder)
# language: python3
# name: python3
# ---
# # Module 2.8 Dictionaries, Summarized
#
# Created By: <NAME>
#
# Each code block is designed to be an independent program for ease of use!
#
# ---
#
# ***Disclaimer***
#
# > Copyright (c) 2020 <NAME>
#
# > Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# > The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ## Part 1: Dictionaries Explained
#
# In Python, Dictionaries are the built in structure for how we store `key: value` pairs!
#
# Key value pairs are a different data structure than tabular data, which we often run into with data science!
#
# It's really difficult to represent key-value pairs in tables, so there's specialized databases that store these types of relationships!
#
# In Python, these are functionally similar to JSON (JavaScript Object Notation). And in Python, if you request an API call, the result is most likely a dictionary from a JSON response!
# ## Part 2: Declaring Dictionaries
# ### Simple Dictionary Storing Student Names & ID Numbers
# +
student_info = {1: "Matt", 2: "Alex", 3: "Mike", 4: "Lisa"}
print(student_info)
# -
# ### Complex Dictionary Storing Subdictionary of Student Information
#
# Notice how each key can have it's own independent data structure!
# +
complex_info = {1: {"name": "Matt", "age": 23, "school": "Simon"},
2: {"name": "Alex", "age": 25, "school": "NYU"},
3: {"name": "Lisa", "age": 20, "school": "NYU", "notes": "Habitually truant."},
4: {"name": "Stacy", "age": 21, "school": "Unknown", "notes": "Excellent student!"}}
print(complex_info)
# -
# ## Part 3: Accessing Entries of Dictionary by Key
#
# Dictionaries are *unindexed* in Python, so we access values through the key! Keys have to be unique.
# ### Accessing Entries of Simple Dictionary
#
# Entries can be accessed generally by `dictionary[key]`
# +
student_info = {1: "Matt", 2: "Alex", 3: "Mike", 4: "Lisa"}
print(student_info[2])
# -
# ### Accessing Entries of Complex Dictionary
#
# #### Highest Level in Dictionary
#
# `dictionary[key]` returns the value associated with the key!
# +
complex_info = {1: {"name": "Matt", "age": 23, "school": "Simon"},
2: {"name": "Alex", "age": 25, "school": "NYU"},
3: {"name": "Lisa", "age": 20, "school": "NYU", "notes": "Habitually truant."},
4: {"name": "Stacy", "age": 21, "school": "Unknown", "notes": "Excellent student!"}}
print(complex_info[4])
# -
# #### Accessing Information Further in Dictionary
#
# To get a subset of the information, use the syntax `dictionary[key1][key2]`
#
# In the following example, we're grabbing student with `id 4` and the value in the following level of `notes`
#
# Remember that `4` is a key, not an index! Our keys can be anything as long as it's unique!
# +
complex_info = {1: {"name": "Matt", "age": 23, "school": "Simon"},
2: {"name": "Alex", "age": 25, "school": "NYU"},
3: {"name": "Lisa", "age": 20, "school": "NYU", "notes": "Habitually truant."},
4: {"name": "Stacy", "age": 21, "school": "Unknown", "notes": "Excellent student!"}}
print(complex_info[4]["notes"])
# -
# ## Part 4: Some Essential Dictionary Methods
#
# | Method | Description |
# | --- | --- |
# | `.keys()` | Returns list of keys |
# | `.pop()` | Removes entry of key provided |
# | `.update()` | Updates value of given key |
# ### Fetch Keys in Dictionary Using `.keys()`
# +
student_info = {1: "Matt", 2: "Alex", 3: "Mike", 4: "Lisa"}
print(student_info.keys())
# -
# ### Remove Specified Entry Using `.pop()`
#
# Follows general syntax `dictionary.pop(key)`
# +
student_info = {1: "Matt", 2: "Alex", 3: "Mike", 4: "Lisa"}
student_info.pop(3)
print(student_info)
# -
# ### Update Value in Key with `.update()`
#
# Follows general syntax `dictionary.update({key: value})`
# +
student_info = {1: "Matt", 2: "Alex", 3: "Mike", 4: "Lisa"}
student_info.update({2: "Alexandria"})
print(student_info)
# -
# ### Create New Key-Value Pair with `.update()`
#
# Follows general syntax `dictionary.update({key: value})`
# +
student_info = {1: "Matt", 2: "Alex", 3: "Mike", 4: "Lisa"}
student_info.update({5: "John"})
print(student_info)
# -
# ## There is a lot more to dictionaries! They are very powerful!
#
# However, this is just to serve as an introduction!
| modules/module-02/module2-dictionaries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### install & docs
pip install qgrid
# +
# solving problems: https://github.com/quantopian/qgrid/issues/253
# jupyter nbextension enable --py --sys-prefix qgrid
# jupyter nbextension enable --py --sys-prefix widgetsnbextension
"""
qgrid method 'on':
https://qgrid.readthedocs.io/en/v1.1.0/#qgrid.QgridWidget.on
events:
[
'cell_edited',
'selection_changed',
'viewport_changed',
'row_added',
'row_removed',
'filter_dropdown_shown',
'filter_changed',
'sort_changed',
'text_filter_viewport_changed',
'json_updated'
]
"""
# -
# ### window & df table setup
# +
import pandas as pd
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', 100)
# -
# ### qgrid with 'on' method example
# +
import qgrid
import pandas as pd
from traitlets import All
def make_action(row, new_state):
print('[*] some action with row data')
return None
def handle_column_update(event, qgrid_widget):
# exclude 'viewport_changed' events since that doesn't change the DataFrame
if event['column'] == 'thing':
print(event)
changed_df = qgrid_widget.get_changed_df()
row = changed_df.iloc[event['index']]
new_state = event['new']
make_action(row, new_state)
# think of making some changes in qgrid programmatically
# after action was taken e.g. changing cell bg color
# to signalize to the user
return None
if __name__ == "__main__":
# ********* create or read data frame *********
data_set = [
['some', 'thing', 'here'],
[11, True, 'text'],
[22, False, 'new'],
[33, True, 'next'],
[44, False, 'line'],
[55, False, 'here'],
[66, True, 'text'],
]
headers = data_set.pop(0)
df = pd.DataFrame(data_set, columns=headers)
# ********* make qgrid object with event handler *********
widget = qgrid.show_grid(df)
widget.on('cell_edited', handle_column_update)
# ********* display qgrid *********
display(widget)
# -
| qgrid_on_method/qgrid_on_method.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
import os
import numpy as np
import pandas as pd
from typing import List, Set, Dict, Optional
from kbc_pul.project_info import data_dir
from artificial_bias_experiments.evaluation.confidence_comparison.df_utils import ColumnNamesInfo
from kbc_pul.experiments_utils.load_df_ground_truth import get_df_ground_truth
from artificial_bias_experiments.noisy_prop_scores.scar.experiment_info import \
NoisyPropScoresSCARExperimentInfo
from artificial_bias_experiments.noisy_prop_scores.scar.image_generation.load_rule_wrappers import \
NoisyPropScoresDataFrames, load_noisy_prop_scores_scar_rule_wrapper_dfs
from artificial_bias_experiments.noisy_prop_scores.scar.noisy_prop_scores_scar_file_naming import \
NoisyPropScoresSCARFileNamer
from kbc_pul.project_info import project_dir as kbc_e_metrics_project_dir
from kbc_pul.confidence_naming import ConfidenceEnum
from pylo.language.lp import Clause as PyloClause
# -
# # Noisy SCAR fancy table generation
# + pycharm={"name": "#%%\n"}
dataset_name: str = "yago3_10"
true_label_frequency_list = [0.3, .7]
# true_prop_scores = PropScoresTwoSARGroups(
# in_filter=true_prop_score_in_filter,
# other=true_prop_score_other
# )
available_label_frequency_list: List[float] = [0.1, 0.2, .3, .4, .5, .6, .7, .8, .9, 1]
filename_ground_truth_dataset: str = os.path.join(
data_dir, dataset_name, 'cleaned_csv', 'train.csv'
)
separator_ground_truth_dataset = "\t"
df_ground_truth: pd.DataFrame = get_df_ground_truth(filename_ground_truth_dataset, separator_ground_truth_dataset)
target_relation_list: List[str] = list(sorted(df_ground_truth["Rel"].unique()))
is_pca_version: bool = False
# + pycharm={"name": "#%%\n"}
df_rule_wrapper_list: List[pd.DataFrame] = []
for true_label_frequency in true_label_frequency_list:
for target_relation in target_relation_list:
try:
experiment_info=NoisyPropScoresSCARExperimentInfo(
dataset_name=dataset_name,
target_relation=target_relation,
true_label_frequency=true_label_frequency,
available_label_frequency_list=available_label_frequency_list,
is_pca_version=is_pca_version
)
root_dir_specific_experiment_settings: str = NoisyPropScoresSCARFileNamer.get_dir_experiment_specific(
experiment_info=experiment_info
)
noisy_prop_scores_dfs: NoisyPropScoresDataFrames = load_noisy_prop_scores_scar_rule_wrapper_dfs(
experiment_dir=root_dir_specific_experiment_settings
)
df_rule_wrappers: pd.DataFrame = noisy_prop_scores_dfs.rule_wrappers
df_rule_wrappers["target_relation"] = pd.Series(
data=[target_relation] * len(df_rule_wrappers), index=df_rule_wrappers.index
)
df_noisy_prop_scores_to_metrics_map: pd.DataFrame = noisy_prop_scores_dfs.noisy_prop_scores_to_pu_metrics_map
df_rule_wrappers:pd.DataFrame = df_rule_wrappers.merge(
df_noisy_prop_scores_to_metrics_map,
left_on=["Rule", "random_trial_index", "true_label_frequency"],
right_on=["Rule", "random_trial_index", "true_label_frequency"],
)
df_rule_wrapper_list.append(df_rule_wrappers)
except Exception as err:
print(target_relation)
print(err)
df_rule_wrappers_all_targets: pd.DataFrame = pd.concat(df_rule_wrapper_list, axis=0)
df_rule_wrappers_all_targets.head()
# + pycharm={"name": "#%%\n"}
df_rule_wrappers_all_targets.columns
# + pycharm={"name": "#%%\n"}
all_columns: List[str] = [
'target_relation',
'true_label_frequency',
'noisy_label_frequency',
'Rule',
'random_trial_index',
'Nb supported predictions',
'Body size',
'$conf$',
'CWA',
'$f^{*} \cdot conf$ (S)', 'PCA(S)', '$f^{*} \cdot conf$ (O)', 'PCA(O)',
'IPW', 'IPW-PCA(S)', 'IPW-PCA(O)', 'ICW'
]
column_names_logistics = [
'target_relation',
'true_label_frequency',
'noisy_label_frequency',
'Rule',
'random_trial_index',
]
columns_to_select = column_names_logistics + [
col for col in set([conf.value for conf in ConfidenceEnum])
]
# -
# ## Drop body size & nb of predictions + reorder
# + pycharm={"name": "#%%\n"}
df_rule_wrappers_all_targets = df_rule_wrappers_all_targets[columns_to_select]
df_rule_wrappers_all_targets.head()
# -
# ## 2. Only keep a subset of rules
# ### 2.1. Only keep the non-recursive rules; drop recursive rules
# + pycharm={"name": "#%%\n"}
from kbc_pul.data_structures.rule_wrapper import get_pylo_rule_from_string, is_pylo_rule_recursive
def is_rule_recursive(rule_string: str) -> bool:
pylo_rule: PyloClause = get_pylo_rule_from_string(rule_string)
is_rule_recursive = is_pylo_rule_recursive(pylo_rule)
return is_rule_recursive
mask_recursive_rules = df_rule_wrappers_all_targets.apply(
lambda row: is_rule_recursive(row["Rule"]),
axis=1
)
# + pycharm={"name": "#%%\n"}
print(len(df_rule_wrappers_all_targets))
df_rule_wrappers_all_targets: pd.DataFrame = df_rule_wrappers_all_targets[~mask_recursive_rules]
print(len(df_rule_wrappers_all_targets))
# -
# ### 2.3 Drop the Pair-positive columns (both directions)
# + pycharm={"name": "#%%\n"}
df_rule_wrappers_all_targets.drop(
[ConfidenceEnum.TRUE_CONF_BIAS_YS_ZERO_S_TO_O.value,
ConfidenceEnum.TRUE_CONF_BIAS_YS_ZERO_O_TO_S.value],
axis=1,
inplace=True,
errors='ignore'
)
df_rule_wrappers_all_targets.head()
# -
# ### 2.4 Drop the IPW-PCA columns (both directions)
# + pycharm={"name": "#%%\n"}
df_rule_wrappers_all_targets.drop(
[ConfidenceEnum.IPW_PCA_CONF_S_TO_O.value,
ConfidenceEnum.IPW_PCA_CONF_O_TO_S.value],
axis=1,
inplace=True,
errors='ignore'
)
df_rule_wrappers_all_targets.head()
# -
# ### 2.4 Drop the ICW column
# + pycharm={"name": "#%%\n"}
df_rule_wrappers_all_targets.drop(
[ConfidenceEnum.ICW_CONF.value],
axis=1,
inplace=True,
errors='ignore'
)
df_rule_wrappers_all_targets.head()
# -
# ### 2.4 Only use rules with 10 trials
# + pycharm={"name": "#%%\n"}
df_count_trials: pd.DataFrame = df_rule_wrappers_all_targets[
[
"target_relation",
'true_label_frequency',
'noisy_label_frequency',
"Rule",
"random_trial_index"
]
].groupby(
[
"target_relation",
'true_label_frequency',
'noisy_label_frequency',
"Rule",
]
).count().reset_index()
# + pycharm={"name": "#%%\n"}
df_less_than_ten_trials: pd.DataFrame = df_count_trials[df_count_trials["random_trial_index"].values != 10]
df_less_than_ten_trials
# + pycharm={"name": "#%%\n"}
df_less_than_ten_trials_filter_info = df_less_than_ten_trials[["target_relation", "Rule"]].drop_duplicates()
df_less_than_ten_trials_filter_info.head()
# + pycharm={"name": "#%%\n"}
for row_index, row in df_less_than_ten_trials_filter_info.iterrows():
target_relation = row["target_relation"]
rule = row["Rule"]
df_rule_wrappers_all_targets = df_rule_wrappers_all_targets[
~(
(df_rule_wrappers_all_targets["target_relation"] == target_relation)
&
(df_rule_wrappers_all_targets["Rule"]==rule)
)
]
df_rule_wrappers_all_targets.head()
# + pycharm={"name": "#%%\n"}
# -
# **Now, we have the full dataframe**
#
# ****
# ## Calculate $[conf(R) - \widehat{conf}(R)]$
# + pycharm={"name": "#%%\n"}
true_conf: ConfidenceEnum = ConfidenceEnum.TRUE_CONF
conf_estimators_list: List[ConfidenceEnum] = [
ConfidenceEnum.CWA_CONF,
# ConfidenceEnum.ICW_CONF,
ConfidenceEnum.PCA_CONF_S_TO_O,
ConfidenceEnum.PCA_CONF_O_TO_S,
ConfidenceEnum.IPW_CONF,
]
all_confs_list: List[ConfidenceEnum] = [ConfidenceEnum.TRUE_CONF ] + conf_estimators_list
column_names_all_confs: List[str] = [
conf.get_name()
for conf in all_confs_list
]
# + pycharm={"name": "#%%\n"}
df_rule_wrappers_all_targets = df_rule_wrappers_all_targets[
column_names_logistics + column_names_all_confs
]
df_rule_wrappers_all_targets.head()
# + pycharm={"name": "#%%\n"}
df_conf_estimators_true_other = df_rule_wrappers_all_targets[
df_rule_wrappers_all_targets["true_label_frequency"]
== df_rule_wrappers_all_targets["noisy_label_frequency"]
]
df_conf_estimators_true_other.head()
# + pycharm={"name": "#%%\n"}
column_names_info = ColumnNamesInfo(
true_conf=true_conf,
column_name_true_conf=true_conf.get_name(),
conf_estimators=conf_estimators_list,
column_names_conf_estimators=[
col.get_name()
for col in conf_estimators_list
],
column_names_logistics=column_names_logistics
)
# + pycharm={"name": "#%%\n"}
def get_df_rulewise_squared_diffs_between_true_conf_and_conf_estimator(
df_rule_wrappers: pd.DataFrame,
column_names_info: ColumnNamesInfo
) -> pd.DataFrame:
df_rulewise_diffs_between_true_conf_and_conf_estimator: pd.DataFrame = df_rule_wrappers[
column_names_info.column_names_logistics
]
col_name_estimator: str
for col_name_estimator in column_names_info.column_names_conf_estimators:
df_rulewise_diffs_between_true_conf_and_conf_estimator \
= df_rulewise_diffs_between_true_conf_and_conf_estimator.assign(
**{
col_name_estimator: (
(df_rule_wrappers[column_names_info.column_name_true_conf]
- df_rule_wrappers[col_name_estimator]) ** 2
)
}
)
return df_rulewise_diffs_between_true_conf_and_conf_estimator
df_conf_squared_errors: pd.DataFrame = get_df_rulewise_squared_diffs_between_true_conf_and_conf_estimator(
df_rule_wrappers=df_rule_wrappers_all_targets,
column_names_info = column_names_info
)
df_conf_squared_errors.head()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## AVERAGE the PCA(S) and PCA(O)
# + pycharm={"name": "#%%\n"}
df_conf_squared_errors.columns
# + pycharm={"name": "#%%\n"}
df_conf_squared_errors["PCA"] = (
(
df_conf_squared_errors[ConfidenceEnum.PCA_CONF_S_TO_O.value]
+
df_conf_squared_errors[ConfidenceEnum.PCA_CONF_O_TO_S.value]
) / 2
)
df_conf_squared_errors.head()
# + pycharm={"name": "#%%\n"}
df_conf_squared_errors = df_conf_squared_errors.drop(
columns=[
ConfidenceEnum.PCA_CONF_S_TO_O.value,
ConfidenceEnum.PCA_CONF_O_TO_S.value
],
axis=1,
errors='ignore'
)
df_conf_squared_errors.head()
# + [markdown] pycharm={"name": "#%% md\n"}
# # Now start averaging
# + pycharm={"name": "#%%\n"}
df_conf_squared_errors_avg_over_trials: pd.DataFrame = df_conf_squared_errors.groupby(
by=["target_relation", 'true_label_frequency', "noisy_label_frequency", "Rule"],
sort=True,
as_index=False
).mean()
df_conf_squared_errors_avg_over_trials.head()
# + pycharm={"name": "#%%\n"}
df_conf_squared_errors_avg_over_trials_and_rules: pd.DataFrame = df_conf_squared_errors_avg_over_trials.groupby(
by=["target_relation", 'true_label_frequency', "noisy_label_frequency",],
sort=True,
as_index=False
).mean()
df_conf_squared_errors_avg_over_trials_and_rules.head()
# + pycharm={"name": "#%%\n"}
len(df_conf_squared_errors_avg_over_trials_and_rules)
# -
# ## Subset of noisy_other
# + pycharm={"name": "#%%\n"}
first_true_label_freq_to_include = 0.3
second_true_label_freq_to_include = 0.7
true_label_frequencies_set: Set[float] = {
first_true_label_freq_to_include, second_true_label_freq_to_include,
}
true_label_frequency_to_estimate_map: Dict[float, Set[float]] = dict()
label_frequency_est_diff: float = 0.1
label_frequencies_to_keep: Set[float] = set(true_label_frequencies_set)
for true_label_freq in true_label_frequencies_set:
true_label_frequency_to_estimate_map[true_label_freq] = {
round(true_label_freq - label_frequency_est_diff, 1),
round(true_label_freq + label_frequency_est_diff, 1)
}
label_frequencies_to_keep.update(true_label_frequency_to_estimate_map[true_label_freq])
# + pycharm={"name": "#%%\n"}
df_conf_errors_avg_over_trials_and_rules_subset = df_conf_squared_errors_avg_over_trials_and_rules[
df_conf_squared_errors_avg_over_trials_and_rules["noisy_label_frequency"].isin(label_frequencies_to_keep)
]
df_conf_errors_avg_over_trials_and_rules_subset.head()
# + pycharm={"name": "#%%\n"}
len(df_conf_errors_avg_over_trials_and_rules_subset)
# -
# ## Count the rules per $p$
# + pycharm={"name": "#%%\n"}
df_n_rules_per_target = df_rule_wrappers_all_targets[["target_relation", "Rule"]].groupby(
by=['target_relation'],
# sort=True,
# as_index=False
)["Rule"].nunique().to_frame().reset_index().rename(
columns={"Rule" : "# rules"}
)
df_n_rules_per_target.head()
# -
# ****
# # Format pretty table
#
# Goal:
# * put smallest value per row in BOLT
# * per target: mean_value 0.3 / 0.4
# + pycharm={"name": "#%%\n"}
true_label_freq_to_noisy_to_df_map: Dict[float, Dict[float, pd.DataFrame]] = dict()
for true_label_freq in true_label_frequencies_set:
df_true_tmp: pd.DataFrame = df_conf_errors_avg_over_trials_and_rules_subset[
df_conf_errors_avg_over_trials_and_rules_subset["true_label_frequency"] == true_label_freq
]
noisy_label_freq_to_df_map = dict()
true_label_freq_to_noisy_to_df_map[true_label_freq] = noisy_label_freq_to_df_map
df_true_and_noisy_tmp = df_true_tmp[
df_true_tmp["noisy_label_frequency"] == true_label_freq
]
noisy_label_freq_to_df_map[true_label_freq] = df_true_and_noisy_tmp[
[col for col in df_true_and_noisy_tmp.columns if col != "noisy_label_frequency" and col != "true_label_frequency"]
]
for noisy_label_freq in true_label_frequency_to_estimate_map[true_label_freq]:
df_true_and_noisy_tmp = df_true_tmp[
df_true_tmp["noisy_label_frequency"] == noisy_label_freq
]
noisy_label_freq_to_df_map[noisy_label_freq] = df_true_and_noisy_tmp[
[col for col in df_true_and_noisy_tmp.columns if col != "noisy_label_frequency" and col != "true_label_frequency"]
]
true_label_freq_to_noisy_to_df_map[first_true_label_freq_to_include][0.2].head()
# + pycharm={"name": "#%%\n"}
from typing import Iterator
true_label_freq_to_df_map = dict()
label_freq_estimators: Iterator[float]
for true_label_freq in true_label_frequencies_set:
noisy_to_df_map: Dict[float, pd.DataFrame] = true_label_freq_to_noisy_to_df_map[true_label_freq]
df_true_label_freq: pd.DataFrame = noisy_to_df_map[true_label_freq]
lower_est: float = round(true_label_freq - label_frequency_est_diff, 1)
higher_est: float = round(true_label_freq + label_frequency_est_diff, 1)
df_lower: pd.DataFrame = noisy_to_df_map[lower_est][
['target_relation', ConfidenceEnum.IPW_CONF.value]
].rename(
columns={
ConfidenceEnum.IPW_CONF.value: f"{ConfidenceEnum.IPW_CONF.value}_lower"
}
)
df_true_label_freq = pd.merge(
left=df_true_label_freq,
right=df_lower,
on="target_relation"
)
df_higher = noisy_to_df_map[higher_est][
['target_relation', ConfidenceEnum.IPW_CONF.value]
].rename(
columns={
ConfidenceEnum.IPW_CONF.value: f"{ConfidenceEnum.IPW_CONF.value}_higher"
}
)
df_true_label_freq = pd.merge(
left=df_true_label_freq,
right=df_higher,
on="target_relation"
)
true_label_freq_to_df_map[true_label_freq] = df_true_label_freq
true_label_freq_to_df_map[0.3].head()
# + pycharm={"name": "#%%\n"}
for key, df in true_label_freq_to_df_map.items():
true_label_freq_to_df_map[key] = df.drop(
columns=["random_trial_index"],
axis=1,
errors='ignore'
)
# + pycharm={"name": "#%%\n"}
df_one_row_per_target = pd.merge(
left=true_label_freq_to_df_map[first_true_label_freq_to_include],
right=true_label_freq_to_df_map[second_true_label_freq_to_include],
on="target_relation",
suffixes=(f"_{first_true_label_freq_to_include}", f"_{second_true_label_freq_to_include}")
)
df_one_row_per_target.head()
# -
# ## What is the smallest value?
# + pycharm={"name": "#%%\n"}
all_values: np.ndarray = df_one_row_per_target[
[ col
for col in df_one_row_per_target.columns
if col != "target_relation"
]
].values
min_val = np.amin(all_values)
min_val
# + pycharm={"name": "#%%\n"}
min_val * 10000
# + pycharm={"name": "#%%\n"}
max_val = np.amax(all_values)
max_val
# + pycharm={"name": "#%%\n"}
max_val * 10000
# + pycharm={"name": "#%%\n"}
df_one_row_per_target.head() * 10000
# + pycharm={"name": "#%%\n"}
df_one_row_per_target.dtypes
# + pycharm={"name": "#%%\n"}
exponent = 4
multiplication_factor = 10 ** exponent
multiplication_factor
# + pycharm={"name": "#%%\n"}
df_one_row_per_target[
df_one_row_per_target.select_dtypes(include=['number']).columns
] *= multiplication_factor
df_one_row_per_target
# -
# ## Output files definitions
# + pycharm={"name": "#%%\n"}
df_one_row_per_target.head()
# + pycharm={"name": "#%%\n"}
dir_latex_table: str = os.path.join(
kbc_e_metrics_project_dir,
"paper_latex_tables",
'known_prop_scores',
'scar'
)
if not os.path.exists(dir_latex_table):
os.makedirs(dir_latex_table)
filename_tsv_rule_stats = os.path.join(
dir_latex_table,
"conf_error_stats_v4.tsv"
)
filename_tsv_single_row_summary = os.path.join(
dir_latex_table,
"noisy_scar_single_row_summary.tsv"
)
print(filename_tsv_single_row_summary)
# -
# ## Create single-row summary
# + pycharm={"name": "#%%\n"}
df_one_row_in_total: pd.Series = df_one_row_per_target.mean(
)
df_one_row_in_total
# + pycharm={"name": "#%%\n"}
df_n_rules_per_target.head()
# + pycharm={"name": "#%%\n"}
df_one_row_in_total["# rules"] = int(df_n_rules_per_target["# rules"].sum())
df_one_row_in_total
# + pycharm={"name": "#%%\n"}
type(df_one_row_in_total)
# + pycharm={"name": "#%%\n"}
df_one_row_in_total.to_csv(
filename_tsv_single_row_summary,
sep = "\t",
header=None
)
# -
# ### Now create a pretty table
# + pycharm={"name": "#%%\n"}
column_names_info.column_names_conf_estimators
# + pycharm={"name": "#%%\n"}
simplified_column_names_conf_estimators = ['CWA', 'PCA', 'IPW',]
# + pycharm={"name": "#%%\n"}
multi_index_columns = [
("$p$", ""),
("\# R", "")
]
from itertools import product
# conf_upper_cols = column_names_info.column_names_conf_estimators + [
# f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=-" + f"{label_frequency_est_diff}" + "$)",
# f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=" + f"{label_frequency_est_diff}" + "$)",
# ]
conf_upper_cols = simplified_column_names_conf_estimators + [
f"{ConfidenceEnum.IPW_CONF.value} " + "($-\Delta$)",
f"{ConfidenceEnum.IPW_CONF.value} " + "($+\Delta$)",
]
c_subcols = ["$c=0.3$", "$c=0.7$"]
multi_index_columns = multi_index_columns + list(product(c_subcols, conf_upper_cols))
# multi_index_list
multi_index_columns = pd.MultiIndex.from_tuples(multi_index_columns)
multi_index_columns
# + pycharm={"name": "#%%\n"}
rule_counter: int = 1
rule_str_to_rule_id_map: Dict[str, int] = {}
float_precision: int = 1
col_name_conf_estimator: str
pretty_rows: List[List] = []
row_index: int
row: pd.Series
# columns_to_use = [
# "$p$",
# "\# rules"
# ] + column_names_info.column_names_conf_estimators + [
# f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=-" + f"{label_frequency_est_diff}" + "$)",
# f"{ConfidenceEnum.IPW_CONF.value} " + "($\Delta c=" + f"{label_frequency_est_diff}" + "$)",
# ]
LabelFreq = float
def get_dict_with_smallest_estimator_per_label_freq(row: pd.Series) -> Dict[LabelFreq, Set[str]]:
# Find estimator with smallest mean value for label frequency###################
label_freq_to_set_of_smallest_est_map: Dict[LabelFreq, Set[str]] = dict()
for label_freq in [first_true_label_freq_to_include, second_true_label_freq_to_include]:
o_set_of_col_names_with_min_value: Optional[Set[str]] = None
o_current_smallest_value: Optional[float] = None
# Find smallest squared error
for col_name_conf_estimator in simplified_column_names_conf_estimators:
current_val: float = row[f"{col_name_conf_estimator}_{label_freq}"]
# print(current_val)
if o_set_of_col_names_with_min_value is None or o_current_smallest_value > current_val:
o_set_of_col_names_with_min_value = {col_name_conf_estimator}
o_current_smallest_value = current_val
elif current_val == o_current_smallest_value:
o_set_of_col_names_with_min_value.update(col_name_conf_estimator)
label_freq_to_set_of_smallest_est_map[label_freq] = o_set_of_col_names_with_min_value
return label_freq_to_set_of_smallest_est_map
def format_value_depending_on_whether_it_is_smallest(
value: float,
is_smallest: bool,
float_precision: float,
use_si: bool = False
)-> str:
if is_smallest:
if not use_si:
formatted_value = "$\\bm{" + f"{value:0.{float_precision}f}" + "}$"
# formatted_value = "$\\bm{" + f"{value:0.{float_precision}e}" + "}$"
else:
formatted_value = "\\textbf{$" + f"\\num[round-precision={float_precision},round-mode=figures,scientific-notation=true]"+\
"{"+ str(value) + "}"+ "$}"
else:
if not use_si:
formatted_value = f"${value:0.{float_precision}f}$"
# formatted_value = f"${value:0.{float_precision}e}$"
else:
formatted_value = "$" + f"\\num[round-precision={float_precision},round-mode=figures,scientific-notation=true]"+\
"{"+ str(value) + "}"+ "$"
return formatted_value
estimator_columns = simplified_column_names_conf_estimators + [
f"{ConfidenceEnum.IPW_CONF.value}_lower",
f"{ConfidenceEnum.IPW_CONF.value}_higher"
]
# For each row, i.e. for each target relation
for row_index, row in df_one_row_per_target.iterrows():
# Find estimator with smallest mean value for label frequency###################
label_freq_to_set_of_smallest_est_map: Dict[float, Set[str]] = get_dict_with_smallest_estimator_per_label_freq(
row=row
)
##################################################################################
# Construct the new row
######################
target_relation = row["target_relation"]
nb_of_rules = df_n_rules_per_target[df_n_rules_per_target['target_relation'] == target_relation][
"# rules"
].iloc[0]
new_row: List[str] = [
target_relation,
nb_of_rules
]
# For each Confidence estimator, get the value at c 0.3 and 0.7
# for col_name_conf_estimator in estimator_columns:
# mean_val_03:float = row[f"{col_name_conf_estimator}_0.3"]
# mean_val_07:float = row[f"{col_name_conf_estimator}_0.7"]
#
# new_row_value = (
# format_value_depending_on_whether_it_is_smallest(
# value=mean_val_03,
# is_smallest=col_name_conf_estimator == label_freq_to_smallest_est_map[0.3],
# float_precision=float_precision
# )
# + " / "
# + format_value_depending_on_whether_it_is_smallest(
# value=mean_val_07,
# is_smallest=col_name_conf_estimator == label_freq_to_smallest_est_map[0.7],
# float_precision=float_precision
# )
# )
# new_row.append(new_row_value)
for col_name_conf_estimator in estimator_columns:
mean_val_03:float = row[f"{col_name_conf_estimator}_0.3"]
new_row_value_03 = format_value_depending_on_whether_it_is_smallest(
value=mean_val_03,
is_smallest=(
col_name_conf_estimator in label_freq_to_set_of_smallest_est_map[first_true_label_freq_to_include]
),
float_precision=float_precision
)
new_row.append(new_row_value_03)
for col_name_conf_estimator in estimator_columns:
mean_val_07:float = row[f"{col_name_conf_estimator}_0.7"]
new_row_value_07 = format_value_depending_on_whether_it_is_smallest(
value=mean_val_07,
is_smallest=(
col_name_conf_estimator in label_freq_to_set_of_smallest_est_map[second_true_label_freq_to_include]
),
float_precision=float_precision
)
new_row.append(new_row_value_07)
pretty_rows.append(new_row)
df_pretty: pd.DataFrame = pd.DataFrame(
data=pretty_rows,
columns=multi_index_columns
)
df_pretty.head()
# + pycharm={"name": "#%%\n"}
df_pretty: pd.DataFrame = df_pretty.sort_values(
by=["$p$"]
)
df_pretty.head()
# -
# # To file
# + pycharm={"name": "#%%\n"}
# dir_latex_table: str = os.path.join(
# kbc_e_metrics_project_dir,
# "paper_latex_tables",
# 'known_prop_scores',
# 'scar'
# )
#
# if not os.path.exists(dir_latex_table):
# os.makedirs(dir_latex_table)
filename_latex_table: str = os.path.join(
dir_latex_table,
"confidence-error-table-scar-rerun-agg-per-p.tex"
)
filename_tsv_table: str = os.path.join(
dir_latex_table,
"confidence-error-table-scar-rerun-agg-per-p.tsv"
)
with open(filename_latex_table, "w") as latex_ofile:
with pd.option_context("max_colwidth", 1000):
latex_ofile.write(
df_pretty.to_latex(
column_format="lr|llllll|llllll",
index=False,
float_format="{:0.3f}".format,
escape=False,
# caption="$[widehat{conf}-conf]^2$ for SCAR. "
# "std=standard confidence, "
# "PCA (S) = PCA confidence with $s$ as domain, "
# "PCA (O) = PCA confidence with $o$ as domain, "
# "IPW = PCA confidence with $\hat{e}=e$, "
# "IPW +/- $" + f"{label_frequency_est_diff:0.1}" + "$ = IPW confidence with $\hat{e}=e+/-" + f"{label_frequency_est_diff:0.1}" + "$."
)
)
with open(filename_tsv_table, "w") as tsv_ofile:
tsv_ofile.write(df_pretty.to_csv(
index=False,
sep="\t"
))
print(filename_latex_table)
# + pycharm={"name": "#%%\n"}
# + pycharm={"name": "#%%\n"}
| notebooks/artificial_bias_experiments/noisy_prop_scores/scar/table/noisy_prop_scores_scar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (pytorchenv)
# language: python
# name: pytorch
# ---
### This is supposed to be a demo for how to do inference using the model and a new line of text
# +
#General Imports
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader
import random
import matplotlib.pyplot as plt
import ctcdecode
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
#Load fake, non handwritten generator
from fake_texts.pytorch_dataset_fake_2 import Dataset
#Import the loss from baidu
from torch_baidu_ctc import CTCLoss
#Import the model
from fully_conv_model import cnn_attention_ocr
#Helper to count params
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
from evaluation import wer_eval,preds_to_integer,show,my_collate,AverageMeter
ds=Dataset()
batch_size=1
width=-1
alignment=1
ds=Dataset()
elem=ds[0]
plt.imshow(elem[0][0,:,:,:])
plt.figure(figsize=(10,10))
plt.show()
print("".join([ds.decode_dict[x] for x in elem[1][0:elem[3]]]))
# -
'''plt.figure(figsize=(10,10))
elem=ds[0]
plt.imshow(elem[0][0,:,:,:])
plt.show()
print("".join([ds.decode_dict[j] for j in elem[1]] ))
ds.decode_dict'''
###Set up model.
cnn=cnn_attention_ocr(model_dim=128,nclasses=67,n_layers=8)
cnn=cnn.eval().cpu()#.cuda()
cnn.load_state_dict(torch.load("400ksteps_augment_new_gen_e15.pt"))
#count_parameters(cnn)
from PIL import Image
import numpy as np
from glob import glob
input_folder="/home/leander/AI/data/test_seg/"
files=glob(input_folder+"*.png")
from skimage.color import gray2rgb
from skimage.transform import resize
import cv2
imglist=[]
for j in files:
img = Image.open(j).convert('L')
img=cv2.cvtColor(np.array(img),cv2.COLOR_GRAY2RGB)
#img=np.array(img)
#img=img.astype(float)
img=img/255
resize_shape=(32,int(32*img.shape[1]/img.shape[0]))
img = resize(img,resize_shape,mode="constant")
img=np.expand_dims(img,0)
img=torch.tensor(img).cuda().float().permute((0,3,1,2))
imglist.append(img)
# +
img = Image.open("/home/leander/Pictures/test.png").convert('L')
img=cv2.cvtColor(np.array(img),cv2.COLOR_GRAY2RGB)
#img=np.array(img)
#img=img.astype(float)
img=img/255
resize_shape=(32,int(32*img.shape[1]/img.shape[0]))
img = resize(img,resize_shape,mode="constant")
img=np.expand_dims(img,0)
img=torch.tensor(img).cuda().float().permute((0,3,1,2))
#imglist.append(img)
# +
#"".join(
# -
len(list(ds.decode_dict.values()))
import sys
sys.path.append("/home/leander/AI/repos/CTCDecoder/src")
from BestPath import ctcBestPath
from BeamSearch import ctcBeamSearch
example_image=img
#example_image=example_image[:,:,:,0:750]
show(example_image.detach().cpu()[0,:,:,:])
plt.show()
log_probs = cnn(example_image.cpu()).permute((2,0,1))[:,0,:]
#preds_to_integer(log_probs)
"".join([ds.decode_dict[j] for j in preds_to_integer(log_probs)])
mat = np.array([[0.4, 0, 0.6], [0.4, 0, 0.6]])
log_probs=log_probs[:,0:67]
log_probs=torch.nn.Softmax(dim=1)(log_probs)
log_probs=log_probs.cpu().detach().numpy()
"".join(list(ds.decode_dict.values())[:-1])
"".join(list(ds.dictionary.keys()))
mat=log_probs
maxT, maxC = mat.shape
label = ''
classes = "".join([" "]+list(ds.dictionary.keys()))
"".join([" "]+list(ds.dictionary.keys())+[" "])
ctcBeamSearch(log_probs,"".join([" "]+list(ds.dictionary.keys())+[" "]),None)
log_probs.shape
blankIdx = 0
lastMaxIdx = maxC
# +
#'len(classes)
lastMaxIdx = maxC # init with invalid label
for t in range(maxT):
maxIdx = np.argmax(mat[t, :])
if maxIdx != lastMaxIdx and maxIdx != blankIdx:
label += classes[maxIdx]
lastMaxIdx = maxIdx
# -
label
classes[66]
label
ctcBestPath(log_probs,"".join(list(ds.decode_dict.values())))
log_probs = cnn(example_image.cpu()).permute((2,0,1))[:,0,:]
#preds_to_integer(log_probs)
"".join([ds.decode_dict[j] for j in preds_to_integer(log_probs)])
log_probs = cnn(example_image.cpu()).permute((2,0,1))[:,0,:]
#preds_to_integer(log_probs)
"".join([ds.decode_dict[j] for j in preds_to_integer(log_probs)])
show(example_image.detach().cpu()[0,:,:,:])
plt.show()
log_probs = cnn(example_image).permute((2,0,1))[:,0,:]
#preds_to_integer(log_probs)
"".join([ds.decode_dict[j] for j in preds_to_integer(log_probs)])
| inference_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="UKxnCTGBNAmz"
# ### Load and clean subtitles text
# + colab={"base_uri": "https://localhost:8080/"} id="-cddBQxOOikp" outputId="9ee8a33f-ac44-47ac-e113-39d701849ace"
import nltk
nltk.download('stopwords')
# + id="aFy9-jz7NAm0"
import glob
ROOT_PATH = "../data/tratamento-precoce-top-200"
data = glob.glob(ROOT_PATH + "/relatedVideoSubtitles/*.pt.txt" )
len(data)
# +
import text
subtitles = []
for f in data:
with open(f) as file:
cleaned = text.c(file.read())
subtitles.append(cleaned)
# + [markdown] id="EJZVjy95NAm2"
# ### Subtitle example
# + colab={"base_uri": "https://localhost:8080/"} id="NdVvLGAuNAm3" outputId="c0211fe4-b4e8-44db-dd0b-fc5a9b8c75a2"
print(subtitles[8])
# + [markdown] id="R9UvJkFVNAm7"
# ## Kmeans Clustering
# + id="nG1EUq2mNAm8"
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
# + id="5HNUkPckNAm9"
tfidf = TfidfVectorizer(max_df=1.0, min_df=2, use_idf=True)
count = CountVectorizer(binary=True)
X_tfidf = tfidf.fit_transform(subtitles)
X_count = count.fit_transform(subtitles)
# + id="7pcIgSVMNAm9"
svd = TruncatedSVD(100)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
# + id="4xiMJoKIUec_"
L = lsa.fit_transform(X_tfidf)
# + id="T1FLU4a3PeGg"
from sklearn.cluster import KMeans
# + id="8RkMQjfpUY2h"
x = []
y = []
for k in [2, 4, 8, 16, 32, 64, 128, 256, 512]:
km = KMeans(n_clusters=k, init='k-means++')
km.fit(L)
x.append(k)
y.append(km.inertia_)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="WY7hVptDPpz0" outputId="dd06124e-7eb3-4de4-cc18-2c871a66ee1d"
import matplotlib.pyplot as plt
plt.plot(x, y)
# + colab={"base_uri": "https://localhost:8080/"} id="zDVyVnSjPwkj" outputId="0fc6143a-2597-4c15-a015-8634571faece"
k = 8
km = KMeans(n_clusters=k, init='k-means++')
km.fit(L)
# + id="mG3CBFS0P1z4"
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
# + colab={"base_uri": "https://localhost:8080/"} id="NXYLdD3qULGi" outputId="cb1d65b5-dbaf-46c4-839a-6f028d6142a2"
import pandas as pd
f = open(ROOT_PATH + "/notebooks/clustering.csv", "w")
terms = tfidf.get_feature_names()
for i in range(k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
where = np.where(km.labels_ == i)[0]
print(len(where))
for d in where:
videoId = data[d].split("/")[-1].split(".")[-3]
print(videoId)
f.write(f"{videoId}, {i+1}\n")
#f.write(videoId + " ," + str(i))
print()
f.close()
# -
| notebooks/kmeans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="slide1.png" width="600" height="600">
# <img src="slide2.png" width="600" height="600">
# ## Learning Objectives
#
# - Learn all of the methods in pandas for data-frame manipulation
# - The dataset we use is Titanic dataset
# - Apply visualization to data-frame
# ### Lets make Pandas dataframe from titanic csv file
# +
import numpy as np
import pandas as pd
df = pd.read_csv('titanic.csv')
# -
# ### Lets look at the first 5 rows of dataframe
df.head()
df.shape
# ### Titanic Dataset Description
"""
VARIABLE DESCRIPTIONS:
survival Survival
(0 = No; 1 = Yes)
pclass Passenger Class
(1 = 1st; 2 = 2nd; 3 = 3rd)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare
cabin Cabin
embarked Port of Embarkation
(C = Cherbourg; Q = Queenstown; S = Southampton)
"""
# ### Plot how many of the passengers were children, youth, middle age and old?
# +
import matplotlib.pyplot as plt
df['Age'].hist(bins=16)
plt.show()
# -
# ### How many of Age values are empty (or null)?
# how many of Age values are null
df['Age'].isna().sum()
# +
# Parch = number of parents or children on board
# SibSp = number of siblings or spouses
# -
# ### Create a new column as gender, when Sex is female it is zero when sex is male it is one
# create a new column as gender, when Sex is female it is zero when sex is male it is one
df['Gender'] = df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
df.head()
# ### We have one more column (check it)
df.shape
# ### Show the majority of Age range
df['Age'].plot.box()
# ### List all of the Ages that are not null
df['Age'].dropna().values
# ### Slice the dataframe for those whose Embarked section was 'C'
df[df['Embarked'] == 'C'].head()
# ### Plot the Age range for those whose Embraked were 'C'
#Activity:
# the age range of passenger whose their Embarked were 'C'
df[df['Embarked'] == 'C']['Age'].hist(bins=16)
# ### Apply couple of Normal Distributions to Histogram obtained above
df[df['Embarked'] == 'C']['Age'].plot(kind='kde')
# ### Describe a specific column
df['Embarked'].describe()
# ### How many unique values does the 'Embraked' have?
df['Embarked'].nunique()
# ### Count the different 'Embarked' values the dataframe has
df['Embarked'].value_counts().plot(kind='bar')
# ### Count the different 'Embarked' values the dataframe has and plot horizontaly
# +
df['Embarked'].value_counts().plot('barh').invert_yaxis()
# Check df['Embarked'].value_counts().plot('barh')
# -
# ### Another way to do the count and plot it
# +
import seaborn as sns
# Bar Chart Example #1 (Simple): Categorical Variables Showing Counts
sns.countplot(x="Embarked", palette="spring", data=df)
# -
df['Embarked'].value_counts()
df['Sex'].value_counts().to_json()
df['Sex'].value_counts().plot(kind='bar')
df['Sex'].value_counts().plot(kind='pie')
# ### Plot how many of the passengers were children, youth, middle age and old based on there Sex for those who 'Embarked' in section 'C'?
for i in df[df['Embarked'] == 'C'].groupby('Sex')['Age']:
print(i)
df[df['Embarked'] == 'C'].groupby('Sex')['Age'].hist(bins=16, alpha=0.5)
df[df['Embarked'] == 'C'].groupby('Sex')['Age'].plot(bins=16, kind='hist', legend=True, alpha=0.5)
df[df['Embarked'] == 'C'].groupby('Sex')['Age'].value_counts()
# +
# # import the pandas library
# import pandas as pd
# import numpy as np
# ipl_data = {'Team': ['Riders', 'Riders', 'Devils', 'Devils', 'Kings',
# 'kings', 'Kings', 'Kings', 'Riders', 'Royals', 'Royals', 'Riders'],
# 'Rank': [1, 2, 2, 3, 3,4 ,1 ,1,2 , 4,1,2],
# 'Year': [2014,2015,2014,2015,2014,2015,2016,2017,2016,2014,2015,2017],
# 'Points':[876,789,863,673,741,812,756,788,694,701,804,690]}
# df = pd.DataFrame(ipl_data)
# grouped = df.groupby('Year')
# df.groupby('Year')['Points'].agg(np.mean)
# https://www.tutorialspoint.com/python_pandas/python_pandas_groupby.htm
# -
# ### What is the average Age for female and male (based on sex) for those who have 'Embarked' on section 'C'?
df[df['Embarked'] == 'C'].groupby('Sex')['Age'].agg(np.mean)
# ### Another way we can do the above task
df[df['Embarked'] == 'C'].groupby('Sex')['Age'].apply(lambda x:np.mean(x))
# ### Which Age is the oldest for female and male (based on sex) for those who have 'Embarked' on section 'C'?
df[df['Embarked'] == 'C'].groupby('Sex')['Age'].agg(np.max)
# ### For different Ages, plot the Fare they have paid?
sns.regplot(x="Age", y="Fare", fit_reg=False, data=df)
df.plot.scatter(x="Age", y="Fare")
# ### Plot how percentage Survived for two Sex group based on the passengers class
sns.barplot(x="Sex", y="Survived", hue="Pclass", data=df)
# ### Plot how many male or female were in different Passenger classes
sns.countplot(x="Sex", hue="Pclass", data=df)
import seaborn as sns
sns.countplot(x="Sex", hue="Survived", data=df)
pd.crosstab(df['Sex'], df['Survived']).to_json()
# ### Verify values obtained for pertentage
df[(df['Sex'] == 'female') & (df['Pclass'] == 1)]['Survived'].value_counts()
91/(91 + 3)
dict(df[(df['Sex'] == 'female') & (df['Pclass'] == 1)]['Survived'].value_counts())
# ### Stack plot of count based on Sex for different Passenger Class
df.groupby(['Sex'])['Pclass'].value_counts().unstack().plot(kind='bar',stacked=True)
# ### Stack plot of count based on Sex and Survival for different Passenger Class
df.groupby(['Sex', 'Survived'])['Pclass'].value_counts().unstack().plot(kind='bar',stacked=True)
# ### Sometimes it is hard to read values from plot, what are the number of female and male at each Passenger Class
# df.groupby(['Sex'])['Pclass'].value_counts().unstack()
# the above and crosstab are the same
pd.crosstab(df['Sex'], df['Pclass'])
pd.crosstab(df['Sex'], df['Survived'])
pd.crosstab(df['Sex'], df['Embarked'])
# ### How to represent the above cross tab in percentage and graphically present
sns.heatmap(pd.crosstab(df['Sex'], df['Embarked'], normalize='index'), cmap="YlGnBu", annot=True)
# ## Question:
#
# What percent of passengers embarked at C?
# +
# Answer:
print(dict(df['Embarked'].value_counts()))
dict(df['Embarked'].value_counts())['C']
# -
sum(dict(df['Embarked'].value_counts()).values())
dict(df['Embarked'].value_counts())['C']/sum(dict(df['Embarked'].value_counts()).values())
# #### OR
len(df[df['Embarked'] == 'C'])/len(df['Embarked'].dropna())
# What percent of female passengers embarked at C?
pd.crosstab(df['Sex'], df['Embarked'])
len(df[(df['Sex'] == 'female') & (df['Embarked'] == 'C')])
len(df[df['Sex'] == 'female'])
73/ 314
len(df[(df['Sex'] == 'female') & (df['Embarked'] == 'C')])/len(df[df['Sex'] == 'female'])
# This question is different from above:
# What percent of passengers embarked at C were female?
| Assignments/HW_3/Pandas_Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Implement selection sort.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Is a naive solution sufficient (ie not stable, not based on a heap)?
# * Yes
# * Are duplicates allowed?
# * Yes
# * Can we assume the input is valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * None -> Exception
# * Empty input -> []
# * One element -> [element]
# * Two or more elements
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/selection_sort/selection_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
class SelectionSort(object):
def sort(self, data):
# TODO: Implement me (recursive)
pass
# ## Unit Test
#
#
#
# **The following unit test is expected to fail until you solve the challenge.**
# +
# # %load test_selection_sort.py
import unittest
class TestSelectionSort(unittest.TestCase):
def test_selection_sort(self, func):
print('None input')
self.assertRaises(TypeError, func, None)
print('Empty input')
self.assertEqual(func([]), [])
print('One element')
self.assertEqual(func([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -10]
self.assertEqual(func(data), sorted(data))
print('Success: test_selection_sort\n')
def main():
test = TestSelectionSort()
selection_sort = SelectionSort()
test.test_selection_sort(selection_sort.sort)
try:
test.test_selection_sort(selection_sort.sort_recursive)
test.test_selection_sort(selection_sort.sor_iterative_alt)
except NameError:
# Alternate solutions are only defined
# in the solutions file
pass
if __name__ == '__main__':
main()
# -
# ## Solution Notebook
#
# Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/selection_sort/selection_sort_solution.ipynb) for a discussion on algorithms and code solutions.
| sorting_searching/selection_sort/selection_sort_challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Introduction to Python
#
# Before we get to know the about the nitty gritty of Python, let us first discuss the actual need of a programming language.
# + [markdown] slideshow={"slide_type": "slide"}
# ### What is a programming language? Why do we need it?
#
# Programming language is just a set of rules that allow us to control machines. We humans are inherently lazy people who want to work less.
#
# That is where machines comes in, we instruct them to perform work so that we can focus on some other important work(sleep :-p).
#
# We need a language so that we can communicate. It helps to have some sort of rules that are shared beforehand, so that communicaters can understand each other.
#
# The two image below try to highlight why we need a common set of rules(we call that as language) to communicate.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Normal Conversation
#
# <img src="../images/normal-conversation.jpg" alt="Normal Conversation" style="width:300px;display:block;margin-left:auto;margin-right:auto;"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Cryptic Conversation
#
# <img alt="Cryptic-conversation" src="../images/cryptic-conversation.jpg" style="width:300px;display:block;margin-left:auto;margin-right:auto;"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Why learn Python?
#
# There are so many programming languages out there. The first obvious question that may come to your mind, why should you learn Python?
#
# Python is one of the most user friendly languages. In many places, you may feel the syntax is pretty similar to the English language.
#
# In addition to it, it is completely free, with an ever growing community that work to improve and add features to it.
#
# In addition to these, Python can be used in the following fields.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Web sites
#
# Some of the most common websites use Python in some way or the other to power them. Instagram, Netflix, Washington Post etc are some of the examples.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Scripting
#
# Suppose you want to do some customization on our machine. You want the LED light on your keyboard to flicker when you receive the next email. No issues, Python is there for you to use.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Artificial Intelligence
#
# Python is one of the most famous languages used in this domain. It has a rich vein of libraries like `pandas`, `scikit-learn`, `numpy` etc that are used in this domain.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Games
#
# Although Python is generally not the most prefered choice by most for games, there are a couple of popular libraries like `PyGame` and `kivy` that you can use to make some enjoyable stuff.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Video/Music Editors
#
# Traditionally python is not the most prefered choice for things related to graphics, there are libraries like `moviepy` and `Pydub` that you may use for video/audio editing.
# + [markdown] slideshow={"slide_type": "slide"}
# Now that we know what kind of stuff we can build with Python, let us get to know what exactly is a program and how we can use them in Python.
#
# ### What is a program?
#
# A program is a set of instructions that we give to the machine.
#
# The image below is one of the recommended ways(:-p) to eat chocolate.
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src="../images/eating-a-chocolate.jpg" alt="Possible instructions to eat a chocolate">
# + [markdown] slideshow={"slide_type": "slide"}
# Now that we know what programs are, let us start learn to write some basic stuff with it.
#
# ### Lets solve basic Mathematics
# + slideshow={"slide_type": "fragment"}
2 ** 8
# + slideshow={"slide_type": "fragment"}
# perimeter of a circle of radius 8
2 * 3.141 * 8
# + slideshow={"slide_type": "fragment"}
# area of a circle of radius 8
3.141 * 8 ** 2
# + slideshow={"slide_type": "slide"}
# simple multiplication
7 * 8
# + [markdown] slideshow={"slide_type": "slide"}
# ### Variables
#
# If you observe closely, we have used the value of `pi` as `3.14` in the above calculations. What if we decide that we want a more precise value of area? We would use `pi` as `3.1415`. This would mean that we now need to make changes at all the places, where we had earlier used `3.14`. This is where variables come in.
#
# They help us assign a name to the values that we can now use in our programs. When using variables, we only need to make changes at the point of assignment.
# + slideshow={"slide_type": "fragment"}
pi = 3.14
# + slideshow={"slide_type": "fragment"}
radius = 9
# + slideshow={"slide_type": "fragment"}
# area
pie * radius ** 2
# + slideshow={"slide_type": "slide"}
# perimeter/circumference
2 * pie * radius
# + [markdown] slideshow={"slide_type": "slide"}
# ### Rules for naming Variables
#
# There are some rules that you need to keep in mind, when naming variables.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Must start with a letter or the underscore*(_)* character.
# + slideshow={"slide_type": "fragment"}
# this is allowed
pie = 3.14
# + slideshow={"slide_type": "fragment"}
# this is allowed
_radius = 3
# + slideshow={"slide_type": "slide"}
# although this works, it is not generally recommended
_8 = 1
# + [markdown] slideshow={"slide_type": "slide"}
# - Cannot start with a number.
# + slideshow={"slide_type": "fragment"}
# this fails
9i = 8
# + [markdown] slideshow={"slide_type": "slide"}
# - Can only contain letters, numbers and underscores *(A-z, 0-9, and _ )*
# + slideshow={"slide_type": "fragment"}
# this doesn't work
u* = 12
# + slideshow={"slide_type": "slide"}
# this works
u8 = 12
# + [markdown] slideshow={"slide_type": "slide"}
# - Are case-sensitive (name, Name and NAME are three different variables).
# + slideshow={"slide_type": "fragment"}
radius = 9
# + slideshow={"slide_type": "fragment"}
radius
# + slideshow={"slide_type": "fragment"}
# this is not defined
RADIUS
# + [markdown] slideshow={"slide_type": "slide"}
# ## DataTypes
#
# Now that we know about variables, we should also try to understand the different types that Python assigns to these values(data). These types gives it the superpower to using different utility functions with the data.
#
# Python dynamically assigns the type(`int`, `float`, `string` etc) depending upon the data. You can use the `type` function to test the type of a value. If coming from a different programming language, this might seem a bit odd.
#
# Let us know about some of the different data types in Python.
#
# ### Numbers
# + [markdown] slideshow={"slide_type": "slide"}
# - Int
#
# This is used for integral values.
# + slideshow={"slide_type": "fragment"}
# type function gives us the type of the variable
type(radius)
# + slideshow={"slide_type": "slide"}
type(45)
# + [markdown] slideshow={"slide_type": "slide"}
# - Float
#
# This is useful for holding decimal values e.g 1.14, 3.13 etc.
# + slideshow={"slide_type": "fragment"}
type(pi)
# + slideshow={"slide_type": "slide"}
type(1.03)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Booleans
#
# These are useful for decision making. Since you have reached here, you have already made the decision to come this far!.
#
# In upcoming posts, we'll also learn more how we may use decision making in our programs
# + slideshow={"slide_type": "fragment"}
decision = True
# + slideshow={"slide_type": "slide"}
type(decision)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Strings
#
# These are useful for storing alpha-numeric(containing symbols, numbers, string etc) values. e.g name, address etc.
# + slideshow={"slide_type": "fragment"}
'name'
# + slideshow={"slide_type": "slide"}
name = 'Tom'
# -
name = "Sita"
sentence = "I can't call you now"
sentence
# + [markdown] slideshow={"slide_type": "slide"}
# - Concatenation
#
# This implies adding of strings.
# + slideshow={"slide_type": "fragment"}
full_name = 'lady' + 'gaga'
# -
"lady" + ' gaga'
'8' + ' street'
# + [markdown] slideshow={"slide_type": "fragment"}
# ##### We will learn more about strings in upcoming lessons
# + [markdown] slideshow={"slide_type": "slide"}
# ### Print function
#
# This is useful to display the output on the screen.
# + slideshow={"slide_type": "fragment"}
print('hello')
# -
area = pie * radius ** 2
# + slideshow={"slide_type": "slide"}
print('The area of circle is:', area)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Script Mode v/s Interactive Mode
#
# When you execute the expressions using Python IDLE(Integrate Development and Learning Environment) you are actually using Interactive Mode.
#
# When writing instructions using a code editor and executing them through an IDE(Visual Studio, PyCharm etc) or terminal etc, we are using script mode.
# + [markdown] slideshow={"slide_type": "slide"}
# To use the script mode, you may follow the following instruction:
#
# ```python
# # save this file as test.py
# name = 'lady'
# print(name)
# ```
# Now run using the command line (inside the directory to which the file has been saved)
#
# ```sh
# python test.py
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ### Comments
#
# Any line that starts with a `#` is considered as comment. It is only useful for the understanding of programmers(those who write programs) and is ignored by the machine.
#
# It is extremely useful to remind people their though process when returning to their code after some time.
# + slideshow={"slide_type": "slide"}
# initialization
x = 2
# + slideshow={"slide_type": "fragment"}
# for multiline-line
# start every line with a #(pound) symbol
# + [markdown] slideshow={"slide_type": "slide"}
# ##### When you don't write comments
#
# <img src="https://img-9gag-fun.9cache.com/photo/agYKEEq_700bwp.webp" style="heigh:200px;;display:block;margin-left:auto;margin-right:auto;" alt="Code comment meme"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ### User Input
#
# Till now, we have been mostly giving values to our programs. In most real scenarios, you would want to take inputs from the user.
#
# For example, consider this sort of form used by websites for taking feedback.
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + slideshow={"slide_type": "slide"}
name = input()
# -
name
# +
username = input('Enter username:')
password = input('Enter password:')
print('Your username is:', username)
print('Your password is:', password)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Integer Input
#
# Till now, we were taking `strings` as input. What to do when we want numerical inputs?
#
# We may use the `int` function in the following manner.
# + slideshow={"slide_type": "fragment"}
radius = int(input('Enter radius:'))
pie = 3.14
area = pie * radius ** 2
print('The area of the circle is:', area)
# + slideshow={"slide_type": "slide"}
int(input('Enter a number:'))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Some Practice exercises
# + [markdown] slideshow={"slide_type": "fragment"}
# - Write a program that asks users their name and greets them with a message.
# + slideshow={"slide_type": "slide"}
# + [markdown] slideshow={"slide_type": "slide"}
# - Build a simple calculator that asks users to input two numbers and prints their sum
# + slideshow={"slide_type": "fragment"}
| chapter_01/introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c469c9ee-3832-4f99-96ee-679bf24df825", "showTitle": false, "title": ""}
# 
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8a104cee-cb11-4845-86f6-17fbe57a4840", "showTitle": false, "title": ""}
# <H1> 6. Context Spell Checker - Medical v3.0 </H1>
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "daf19bd8-d6fa-4bd7-ac3a-3db6c7a82c5c", "showTitle": false, "title": ""}
import os
import json
import string
import numpy as np
import pandas as pd
import sparknlp
import sparknlp_jsl
from sparknlp.util import *
from sparknlp.base import *
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.pretrained import ResourceDownloader
from pyspark.sql import functions as F
from pyspark.ml import Pipeline, PipelineModel
pd.set_option('max_colwidth', 100)
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
print('sparknlp_jsl.version : ',sparknlp_jsl.version())
spark
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d260ffe0-e641-4035-a530-0f100eae8e12", "showTitle": false, "title": ""}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "833444d3-5a30-47dc-a00d-19061e93f585", "showTitle": false, "title": ""}
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f8466d0a-cd9c-4829-a383-157fe8562838", "showTitle": false, "title": ""}
# Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,
#
# _
# __Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet._
#
# _With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__._
#
# _She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds._
#
# _Abdomen is __sort__, nontender, and __nonintended__._
#
# _Patient not showing pain or any __wealth__ problems._
#
# _No __cute__ distress_
#
# Check that some of the errors are valid English words, only by considering the context the right choice can be made.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "47569d82-239e-43e0-a485-2e685951045c", "showTitle": false, "title": ""}
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "97ac90f2-d3fb-4c3d-a2e5-c5226fc0535a", "showTitle": false, "title": ""}
# End of Notebook # 6
| tutorials/Certification_Trainings/Healthcare/databricks_notebooks/6.Clinical_Context_Spell_Checker_v3.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# # Getting Started with Qiskit
#
# Here, we provide an overview of working with Qiskit. Qiskit provides the basic building blocks necessary to program quantum computers. The fundamental unit of Qiskit is the **quantum circuit**. A workflow using Qiskit consists of two stages: **Build** and **Execute**. **Build** allows you to make different quantum circuits that represent the problem you are solving, and **Execute** allows you to run them on different backends. After the jobs have been run, the data is collected. There are methods for putting this data together, depending on the program. This either gives you the answer you wanted, or allows you to make a better program for the next instance.
import numpy as np
from qiskit import *
# %matplotlib inline
# ## Circuit Basics <a id='circuit_basics'></a>
#
#
# ### Building the circuit
#
# The basic elements needed for your first program are the QuantumCircuit, and QuantumRegister.
# +
# Create a Quantum Register with 3 qubits.
q = QuantumRegister(3, 'q')
# Create a Quantum Circuit acting on the q register
circ = QuantumCircuit(q)
# -
# <div class="alert alert-block alert-info">
# <b>Note:</b> Naming the QuantumRegister is optional and not required.
# </div>
#
# After you create the circuit with its registers, you can add gates ("operations") to manipulate the registers. As you proceed through the tutorials you will find more gates and circuits; the below is an example of a quantum circuit that makes a three-qubit GHZ state
#
# $$|\psi\rangle = \left(|000\rangle+|111\rangle\right)/\sqrt{2}.$$
#
# To create such a state, we start with a 3-qubit quantum register. By default, each qubit in the register is initialized to $|0\rangle$. To make the GHZ state, we apply the following gates:
# * A Hadamard gate $H$ on qubit 0, which puts it into a superposition state.
# * A controlled-Not operation ($C_{X}$) between qubit 0 and qubit 1.
# * A controlled-Not operation between qubit 0 and qubit 2.
#
# On an ideal quantum computer, the state produced by running this circuit would be the GHZ state above.
#
# In Qiskit, operations can be added to the circuit one-by-one, as shown below.
# Add a H gate on qubit 0, putting this qubit in superposition.
circ.h(q[0])
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state.
circ.cx(q[0], q[1])
# Add a CX (CNOT) gate on control qubit 0 and target qubit 2, putting
# the qubits in a GHZ state.
circ.cx(q[0], q[2])
# ## Visualize Circuit
#
# You can visualize your circuit using Qiskit `QuantumCircuit.draw()`, which plots circuit in the form found in many textbooks.
circ.draw()
# In this circuit, the qubits are put in order with qubit zero at the top and qubit two at the bottom. The circuit is read left-to-right (meaning that gates which are applied earlier in the circuit show up further to the left).
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> If you dont have matplotlib setup as your default in '~/.qiskit/settings.conf' it will use a text based drawer over matplotlib. To set the default to matplotlib use the folling in the settings.conf
#
# [default]
# circuit_drawer = mpl
#
# For those that want the full latex experience you can also set the circuit_drawer = latex.
#
# </div>
#
#
# ## Simulating circuits using Qiskit Aer <a id='aer_simulation'></a>
#
# Qiskit Aer is our package for simulating quantum circuits. It provides many different backends for doing a simulation. Here we use the basic python version.
#
# ### Statevector backend
#
# The most common backend in Qiskit Aer is the `statevector_simulator`. This simulator returns the quantum
# state which is a complex vector of dimensions $2^n$ where $n$ is the number of qubits
# (so be careful using this as it will quickly get too large to run on your machine).
# <div class="alert alert-block alert-info">
#
#
# When representing the state of a multi-qubit system, the tensor order used in qiskit is different than that use in most physics textbooks. Suppose there are $n$ qubits, and qubit $j$ is labeled as $Q_{j}$. Qiskit uses an ordering in which the $n^{\mathrm{th}}$ qubit is on the <em><strong>left</strong></em> side of the tensor product, so that the basis vectors are labeled as $Q_n\otimes \cdots \otimes Q_1\otimes Q_0$.
#
# For example, if qubit zero is in state 0, qubit 1 is in state 0, and qubit 2 is in state 1, qiskit would represent this state as $|100\rangle$, whereas many physics textbooks would represent it as $|001\rangle$.
#
# This difference in labeling affects the way multi-qubit operations are represented as matrices. For example, qiskit represents a controlled-X ($C_{X}$) operation with qubit 0 being the control and qubit 1 being the target as
#
# $$C_X = \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0 \\ 0 & 1 & 0 & 0 \\\end{pmatrix}.$$
#
# </div>
#
# To run the above circuit using the statevector simulator, first you need to import Aer and then set the backend to `statevector_simulator`.
# +
# Import Aer
from qiskit import BasicAer
# Run the quantum circuit on a statevector simulator backend
backend = BasicAer.get_backend('statevector_simulator')
# -
# Now we have chosen the backend it's time to compile and run the quantum circuit. In Qiskit we provide the `execute` function for this. ``execute`` returns a ``job`` object that encapsulates information about the job submitted to the backend.
#
#
# <div class="alert alert-block alert-info">
# <b>Tip:</b> You can obtain the above parameters in Jupyter. Simply place the text cursor on a function and press Shift+Tab.
# </div>
# Create a Quantum Program for execution
job = execute(circ, backend)
# When you run a program, a job object is made that has the following two useful methods:
# `job.status()` and `job.result()` which return the status of the job and a result object respectively.
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> Jobs run asynchronously but when the result method is called it switches to synchronous and waits for it to finish before moving on to another task.
# </div>
result = job.result()
# The results object contains the data and Qiskit provides the method
# `result.get_statevector(circ)` to return the state vector for the quantum circuit.
outputstate = result.get_statevector(circ, decimals=3)
print(outputstate)
# Qiskit also provides a visualization toolbox to allow you to view these results.
#
# Below, we use the visualization function to plot the real and imaginary components of the state vector.
from qiskit.visualization import plot_state_city
plot_state_city(outputstate)
# ### Unitary backend
# Qiskit Aer also includes a `unitary_simulator` that works _provided all the elements in the circuit are unitary operations_. This backend calculates the $2^n \times 2^n$ matrix representing the gates in the quantum circuit.
# +
# Run the quantum circuit on a unitary simulator backend
backend = BasicAer.get_backend('unitary_simulator')
job = execute(circ, backend)
result = job.result()
# Show the results
print(result.get_unitary(circ, decimals=3))
# -
# ### OpenQASM backend
# The simulators above are useful because they provide information about the state output by the ideal circuit and the matrix representation of the circuit. However, a real experiment terminates by _measuring_ each qubit (usually in the computational $|0\rangle, |1\rangle$ basis). Without measurement, we cannot gain information about the state. Measurements cause the quantum system to collapse into classical bits.
#
# For example, suppose we make independent measurements on each qubit of the three-qubit GHZ state
# $$|\psi\rangle = |000\rangle +|111\rangle)/\sqrt{2},$$
# and let $xyz$ denote the bitstring that results. Recall that, under the qubit labeling used by Qiskit, $x$ would correspond to the outcome on qubit 2, $y$ to the outcome on qubit 1, and $z$ to the outcome on qubit 0.
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> This representation of the bitstring puts the most significant bit (MSB) on the left, and the least significant bit (LSB) on the right. This is the standard ordering of binary bitstrings. We order the qubits in the same way, which is why Qiskit uses a non-standard tensor product order.
# </div>
#
# Recall the probability of obtaining outcome $xyz$ is given by
# $$\mathrm{Pr}(xyz) = |\langle xyz | \psi \rangle |^{2}$$ and as such for the GHZ state probability of obtaining 000 or 111 are both 1/2.
#
# To simulate a circuit that includes measurement, we need to add measurements to the original circuit above, and use a different Aer backend.
# +
# Create a Classical Register with 3 bits.
c = ClassicalRegister(3, 'c')
# Create a Quantum Circuit
meas = QuantumCircuit(q, c)
meas.barrier(q)
# map the quantum measurement to the classical bits
meas.measure(q,c)
# The Qiskit circuit object supports composition using
# the addition operator.
qc = circ+meas
#drawing the circuit
qc.draw()
# -
# This circuit adds a classical register, and three measurements that are used to map the outcome of qubits to the classical bits.
#
# To simulate this circuit, we use the ``qasm_simulator`` in Qiskit Aer. Each run of this circuit will yield either the bitstring 000 or 111. To build up statistics about the distribution of the bitstrings (to, e.g., estimate $\mathrm{Pr}(000)$), we need to repeat the circuit many times. The number of times the circuit is repeated can be specified in the ``execute`` function, via the ``shots`` keyword.
# +
# Use Aer's qasm_simulator
backend_sim = BasicAer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator.
# We've set the number of repeats of the circuit
# to be 1024, which is the default.
job_sim = execute(qc, backend_sim, shots=1024)
# Grab the results from the job.
result_sim = job_sim.result()
# -
# Once you have a result object, you can access the counts via the function `get_counts(circuit)`. This gives you the _aggregated_ binary outcomes of the circuit you submitted.
counts = result_sim.get_counts(qc)
print(counts)
# Approximately 50 percent of the time the output bitstring is 000. Qiskit also provides a function `plot_histogram` which allows you to view the outcomes.
from qiskit.visualization import plot_histogram
plot_histogram(counts)
# The estimated outcome probabilities $\mathrm{Pr}(000)$ and $\mathrm{Pr}(111)$ are computed by taking the aggregate counts and dividing by the number of shots (times the circuit was repeated). Try changing the ``shots`` keyword in the ``execute`` function and see how the estimated probabilities change.
# ## Running circuits using the IBMQ provider <a id='ibmq_provider'></a>
#
# To faciliate access to real quantum computing hardware, we have provided a simple API interface.
# To access IBMQ devices, you'll need an API token. For the public IBM Q devices, you can generate an API token [here](https://quantumexperience.ng.bluemix.net/qx/account/advanced) (create an account if you don't already have one). For Q Network devices, login to the q-console, click your hub, group, and project, and expand "Get Access" to generate your API token and access url.
#
# Our IBMQ provider lets you run your circuit on real devices or on our HPC simulator. Currently, this provider exists within Qiskit, and can be imported as shown below. For details on the provider, see [The IBMQ Provider](the_ibmq_provider.ipynb).
from qiskit import IBMQ
# After generating your API token, call: `IBMQ.save_account('MY_TOKEN')`. For Q Network users, you'll also need to include your access url: `IBMQ.save_account('MY_TOKEN', 'URL')`
#
# This will store your IBMQ credentials in a local file. Unless your registration information has changed, you only need to do this once. You may now load your accounts by calling,
IBMQ.load_accounts(hub=None)
# Once your account has been loaded, you can view the list of backends available to you.
print("Available backends:")
IBMQ.backends()
# ### Running circuits on real devices
#
# Today's quantum information processors are small and noisy, but are advancing at a fast pace. They provide a great opportunity to explore what [noisy, intermediate-scale quantum (NISQ)](https://arxiv.org/abs/1801.00862) computers can do.
# The IBMQ provider uses a queue to allocate the devices to users. We now choose a device with the least busy queue which can support our program (has at least 3 qubits).
# +
from qiskit.providers.ibmq import least_busy
large_enough_devices = IBMQ.backends(filters=lambda x: x.configuration().n_qubits < 10 and
not x.configuration().simulator)
backend = least_busy(large_enough_devices)
print("The best backend is " + backend.name())
# -
# To run the circuit on the backend, we need to specify the number of shots and the number of credits we are willing to spend to run the circuit. Then, we execute the circuit on the backend using the ``execute`` function.
# +
from qiskit.tools.monitor import job_monitor
shots = 1024 # Number of shots to run the program (experiment); maximum is 8192 shots.
max_credits = 3 # Maximum number of credits to spend on executions.
job_exp = execute(qc, backend=backend, shots=shots, max_credits=max_credits)
job_monitor(job_exp)
# -
# ``job_exp`` has a ``.result()`` method that lets us get the results from running our circuit.
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> When the .result() method is called, the code block will wait until the job has finished before releasing the cell.
# </div>
result_exp = job_exp.result()
# Like before, the counts from the execution can be obtained using ```get_counts(qc)```
counts_exp = result_exp.get_counts(qc)
plot_histogram([counts_exp,counts])
# ### Simulating circuits using a HPC simulator
#
# The IBMQ provider also comes with a remote optimized simulator called ``ibmq_qasm_simulator``. This remote simulator is capable of simulating up to 32 qubits. It can be used the
# same way as the remote real backends.
simulator_backend = IBMQ.get_backend('ibmq_qasm_simulator', hub=None)
# +
shots = 1024 # Number of shots to run the program (experiment); maximum is 8192 shots.
max_credits = 3 # Maximum number of credits to spend on executions.
job_hpc = execute(qc, backend=simulator_backend, shots=shots, max_credits=max_credits)
# -
result_hpc = job_hpc.result()
counts_hpc = result_hpc.get_counts(qc)
plot_histogram(counts_hpc)
# ### Retrieving a previously run job
#
# If your experiment takes longer to run then you have time to wait around, or if you simply want to retrieve old jobs back, the IBMQ backends allow you to do that.
# First you would need to note your job's ID:
# +
jobID = job_exp.job_id()
print('JOB ID: {}'.format(jobID))
# -
# Given a job ID, that job object can be later reconstructed from the backend using retrieve_job:
job_get=backend.retrieve_job(jobID)
# and then the results can be obtained from the new job object.
job_get.result().get_counts(qc)
| qiskit/basics/1_getting_started_with_qiskit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="T3tnX9Y7QmzK" colab_type="code" colab={}
"""Code based on R code, available at:
https://github.com/icaroagostino/fun/blob/master/Monty_Hall.R
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import random
# + id="frQZ9vpyQsip" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="846dfd65-9116-41db-da03-294eddaf81c1"
# Illustration of Monty Hall problem
n = 10000 # number of repetitions
victories = [] # vector of victories of strategy 1
doors = np.array([1, 2, 3]) # vector of doors
#####################################
# Strategy 1 - Dont change the door #
#####################################
np.random.seed(1234) # set random seed
for i in range(n):
prize_door = np.random.choice(doors, 1) # door with the prize
choice = np.random.choice(doors, 1) # player's choice
# if the choice is equal to the the prize door, win [1]
if prize_door == choice:
victories[n:n] = [1]
# if the choice is different from the the prize door, loss [0]
else:
victories[n:n] = [0]
# Compute results
rounds = np.array(range(1, n+1, 1))
win_perc = np.cumsum(victories)/rounds
data = pd.DataFrame({'rounds': rounds, 'win_perc': win_perc})
# Plot
plt.figure(figsize=(8, 4.5))
plt.plot('rounds', 'win_perc', data=data,
label='Cumulative winning percentage')
plt.hlines(y=1/3, xmin=0, xmax=n, colors='red', linestyles='dashed',
label='Reference Line (33%)')
plt.ylim(0, 1)
plt.title('Strategy 1', fontsize=12)
plt.ylabel('Winning Percentage', fontsize=12)
plt.xlabel('Rounds', fontsize=12)
plt.legend(loc='best', fontsize=12)
plt.show()
# + id="cOndTB-9Qu2d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="8fa99aee-edb8-4dba-88af-53ff7f85c4ed"
################################
# Strategy 2 - Door is changed #
################################
victories = [] # vector of victories
np.random.seed(1234) # set random seed
for i in range(n):
prize_door = np.random.choice(doors, 1) # door with the prize
choice1 = np.random.choice(doors, 1) # player's choice
x_aux = np.setdiff1d(doors,
[prize_door, choice1]) # auxiliar variable
if len(x_aux) > 1:
excluded_door = np.random.choice(x_aux, 1)
else:
excluded_door = x_aux # exclude one door
choice2 = np.setdiff1d(doors,
[choice1, excluded_door]) # player changes door
# if the choice 2 is equal to the the prize door, win [1]
if prize_door == choice2:
victories[n:n] = [1]
# if the choice 2 is different from the the prize door, loss [0]
else:
victories[n:n] = [0]
# Compute results
rounds = np.array(range(1, n+1, 1))
win_perc = np.cumsum(victories)/rounds
data = pd.DataFrame({'rounds': rounds, 'win_perc': win_perc})
# Plot
plt.figure(figsize=(8, 4.5))
plt.plot('rounds', 'win_perc', data=data,
label='Cumulative winning percentage')
plt.hlines(y=2/3, xmin=0, xmax=n, colors='red', linestyles='dashed',
label='Reference Line (66%)')
plt.ylim(0, 1)
plt.title('Strategy 2', fontsize=12)
plt.ylabel('Winning Percentage', fontsize=12)
plt.xlabel('Rounds', fontsize=12)
plt.legend(loc='best', fontsize=12)
plt.show()
| Monty_Hall/Monty_Hall.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Write a program to identify sub list[1,1,5] is there in the given list in the same order,
# if yes print "it's a match " if no then print "it's gone" in function
lst1=[1,1,5]
print(lst1)
# +
lst2=[]
n=int(input("enter numberof elements:"))
for i in range(0,n):
ele=int(input())
lst2.append(ele)
print("the list is",lst2)
def sublist(lst1,lst2):
for item in lst1:
try:
lst2.index(item)
except ValueError:
print("it's gone")
print("it's a match")
print(sublist(lst1,lst2))
# -
# # Filter Function
# +
# Make a function for prime numbers and use Filter to filter out all the prime numbers from 1-2500
# -
def isprime(x):
for n in range(2,x):
if x%n==0:
return False
else:
return True
fltrob=filter(isprime,range(2500))
print("prime numbers between 1-2500 are :",list(fltrob))
# # Lambda and Mapping
# +
# Make a lambda function for capitalizing the whole sentence passed using arguments.
# and map all the sentences in the list,with the lambda functions
# -
lst=["hey this is me","i am in gurgaon","i love black colour","this is my page"]
print(lst)
# +
capitl=map(lambda name: name.title(),lst)
list(capitl)
# -
| day_5assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Create a gene family network and Entrez Gene mapping
import pandas
import networkx
hgnc_df = (pandas.read_table('download/hgnc_complete_set.txt', low_memory=False)
.dropna(subset=['entrez_id', 'gene_family_id'])
.astype({'entrez_id': int})
[['entrez_id', 'symbol', 'name', 'locus_group', 'gene_family_id']]
)
entrez_id_to_symbol = dict(zip(hgnc_df.entrez_id, hgnc_df.symbol))
hgnc_df.head()
hgnc_df.columns
family_df = (
pandas.read_csv('download/genefamily_db_tables/family.csv')
[['id', 'abbreviation', 'name']]
)
family_df.head()
# +
families = networkx.DiGraph()
# Nodes
for row in family_df.itertuples():
families.add_node(row.id, name=row.name, entrez_gene_ids=set())
# Edges (from superfamily to subfamily)
df = pandas.read_csv('download/genefamily_db_tables/hierarchy.csv')
for row in df.itertuples():
families.add_edge(row.parent_fam_id, row.child_fam_id)
networkx.is_directed_acyclic_graph(families)
# -
for node, data in families.nodes(data=True):
data['root'] = families.in_degree(node) == 0
data['leaf'] = families.out_degree(node) == 0
root_nodes = [n for n, d in families.in_degree().items() if d == 0]
root_df = family_df.query("id in @root_nodes")
# Add propaged entrez gene IDs
for row in hgnc_df.itertuples():
for family_id in row.gene_family_id.split('|'):
family_id = int(family_id)
for family_id in {family_id} | networkx.ancestors(families, family_id):
node_data = families.node[family_id]
entrez_id = int(row.entrez_id)
node_data['entrez_gene_ids'].add(entrez_id)
rows = list()
for family_id, data in families.nodes(data=True):
entrez_gene_ids = sorted(data['entrez_gene_ids'])
for entrez_gene_id in entrez_gene_ids:
rows.append([family_id, data['name'], int(data['root']), int(data['leaf']), entrez_gene_id, entrez_id_to_symbol[entrez_gene_id]])
# Convert entrez_gene_ids to a string for GraphML export
data['entrez_gene_ids'] = '|'.join(map(str, entrez_gene_ids))
columns = ['family_id', 'family_name', 'root', 'leaf', 'entrez_gene_id', 'gene_symbol']
mapping_df = pandas.DataFrame(rows, columns=columns).sort_values(['family_id', 'entrez_gene_id'])
mapping_df.head(3)
# Write family graph to GraphML (XML format)
networkx.write_graphml(families, 'data/families.graphml')
# Export family to gene mapping to TSV
mapping_df.to_csv('data/gene-families.tsv', sep='\t', index=False)
| 2.families.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <div class='bar_title'></div>
#
# *Practical Data Science*
#
# # Introduction to Jupyter Notebooks
#
# <NAME><br>
# Chair of Information Systems and Management
#
# Winter Semester 19/20
# + [markdown] slideshow={"slide_type": "subslide"} toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Jupyter" data-toc-modified-id="Jupyter-1"><span class="toc-item-num">1 </span>Jupyter</a></span><ul class="toc-item"><li><span><a href="#What-is-the-Jupyter-Notebook?" data-toc-modified-id="What-is-the-Jupyter-Notebook?-1.1"><span class="toc-item-num">1.1 </span>What is the Jupyter Notebook?</a></span></li><li><span><a href="#Components" data-toc-modified-id="Components-1.2"><span class="toc-item-num">1.2 </span>Components</a></span></li><li><span><a href="#Running-Code" data-toc-modified-id="Running-Code-1.3"><span class="toc-item-num">1.3 </span>Running Code</a></span></li><li><span><a href="#Help-and-Documentation" data-toc-modified-id="Help-and-Documentation-1.4"><span class="toc-item-num">1.4 </span>Help and Documentation</a></span></li><li><span><a href="#Markdown-Cells" data-toc-modified-id="Markdown-Cells-1.5"><span class="toc-item-num">1.5 </span>Markdown Cells</a></span></li><li><span><a href="#Colab" data-toc-modified-id="Colab-1.6"><span class="toc-item-num">1.6 </span>Colab</a></span></li></ul></li></ul></div>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Jupyter
# + [markdown] slideshow={"slide_type": "subslide"}
#
# <img src="https://github.com/matjesg/AIS_2019/raw/master/notebooks/images/01/jupyter_nature.png" style="width:40%; float:right">
#
# Project Jupyter exists to develop open-source software, open-standards, and services for interactive computing across dozens of programming languages (https://jupyter.org/).
#
# <NAME>. (2018). __Why Jupyter is data scientists' computational notebook of choice__. Nature, 563(7729), 145.
# [Link](https://www.nature.com/articles/d41586-018-07196-1)
#
#
#
#
#
#
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
#
# ### What is the Jupyter Notebook?
# + [markdown] slideshow={"slide_type": "-"}
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/250px-Jupyter_logo.svg.png" style="width:20%; float:right">
#
# The Jupyter Notebook is an **interactive computing environment** that enables users to author notebook documents that include:
# - Live code
# - Interactive widgets
# - Plots
# - Narrative text
# - Equations
# - Images
# - Video
#
# These documents provide a **complete and self-contained record of a computation** that can be converted to various formats and shared with others using email, [Dropbox](https://www.dropbox.com/), version control systems (like git/[GitHub](https://github.com)) or [nbviewer.jupyter.org](https://nbviewer.jupyter.org).
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Components
# -
# The Jupyter Notebook combines three components:
#
# * **The notebook web application**: An interactive web application for writing and running code interactively and authoring notebook documents.
# * **Kernels**: Separate processes started by the notebook web application that runs users' code in a given language and returns output back to the notebook web application. The kernel also handles things like computations for interactive widgets, tab completion and introspection.
# * **Notebook documents**: Self-contained documents that contain a representation of all content visible in the notebook web application, including inputs and outputs of the computations, narrative
# text, equations, images, and rich media representations of objects. Each notebook document has its own kernel.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Notebook web application
# -
# The notebook web application enables users to:
#
# * **Edit code in the browser**, with automatic syntax highlighting, indentation, and tab completion/introspection.
# * **Run code from the browser**, with the results of computations attached to the code which generated them.
# * See the results of computations with **rich media representations**, such as HTML, LaTeX, PNG, SVG, PDF, etc.
# * Create and use **interactive JavaScript widgets**, which bind interactive user interface controls and visualizations to reactive kernel side computations.
# * Author **narrative text** using the [Markdown](https://daringfireball.net/projects/markdown/) markup language.
# * Include mathematical equations using **LaTeX syntax in Markdown**, which are rendered in-browser by [MathJax](https://www.mathjax.org/).
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Kernels
# -
# Through Jupyter's kernel and messaging architecture, the Notebook allows code to be run in a range of different programming languages. For each notebook document that a user opens, the web application starts a kernel that runs the code for that notebook. Each kernel is capable of running code in a single programming language and there are kernels available in the following languages:
#
# * Python(https://github.com/ipython/ipython)
# * Julia (https://github.com/JuliaLang/IJulia.jl)
# * R (https://github.com/IRkernel/IRkernel)
# * Ruby (https://github.com/minrk/iruby)
# * Haskell (https://github.com/gibiansky/IHaskell)
# * Scala (https://github.com/Bridgewater/scala-notebook)
# * node.js (https://gist.github.com/Carreau/4279371)
# * Go (https://github.com/takluyver/igo)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Notebook documents
# -
# Notebook documents contain the **inputs and outputs** of an interactive session as well as **narrative text** that accompanies the code but is not meant for execution. **Rich output** generated by running code, including HTML, images, video, and plots, is embeddeed in the notebook, which makes it a complete and self-contained record of a computation.
# When you run the notebook web application on your computer, notebook documents are just **files on your local filesystem with a `.ipynb` extension**. This allows you to use familiar workflows for organizing your notebooks into folders and sharing them with others.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Running Code
# -
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Code cells allow you to enter and run code
# -
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
a = 10
a
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Managing the Kernel
# -
# __Stop__
#
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# __Restart__
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Cell menu
# -
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] slideshow={"slide_type": "subslide"}
# #### (Large) outputs
# -
# All output is displayed as it is generated in the Kernel
for i in range(100):
print(i)
i
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Help and Documentation
# + [markdown] slideshow={"slide_type": "fragment"}
# #### Accessing the documentation
#
# with '?'
# The question mark is a simple shortcut to get help
#
# ```Python
# # # ?print
# ```
#
# Or `Shift-Tab` to see the docstring
# -
# ?print
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Markdown Cells
# + [markdown] slideshow={"slide_type": "-"}
# Text can be added to Jupyter Notebooks using Markdown cells. You can change the cell type to Markdown by using the `Cell` menu, the toolbar, or the key shortcut `m`. Markdown is a popular markup language that is a superset of HTML. Its specification can be found here:
#
# <https://daringfireball.net/projects/markdown/>
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Markdown basics
# + [markdown] slideshow={"slide_type": "-"}
# You can make text _italic_ or **bold** by surrounding a block of text with a single or double * respectively
# + [markdown] slideshow={"slide_type": "subslide"}
# You can build nested itemized or enumerated lists:
#
# * One
# - Sublist
# - This
# - Sublist
# - That
# - The other thing
# * Two
# - Sublist
# * Three
# - Sublist
# + [markdown] slideshow={"slide_type": "subslide"}
# Now another list:
#
# 1. Here we go
# 1. Sublist
# 2. Sublist
# 2. There we go
# 3. Now this
# + [markdown] slideshow={"slide_type": "subslide"}
# Here is a blockquote:
#
# > Beautiful is better than ugly.
# > Explicit is better than implicit.
# > Simple is better than complex.
# > Complex is better than complicated.
# + [markdown] slideshow={"slide_type": "subslide"}
# And shorthand for links:
#
# [Jupyter's website](https://jupyter.org)
# + [markdown] slideshow={"slide_type": "subslide"}
# You can use backslash \ to generate literal characters which would otherwise have special meaning in the Markdown syntax.
#
# ```
# \*literal asterisks\*
# *literal asterisks*
# ```
#
# Use double backslash \ \ to generate the literal $ symbol.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Headings
# -
# You can add headings by starting a line with one (or multiple) `#` followed by a space, as in the following example:
#
# ```
# # Heading 1
# # Heading 2
# ## Heading 2.1
# ## Heading 2.2
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# #### LaTeX equations
# -
# You can include mathematical expressions both inline:
# $e^{i\pi} + 1 = 0$ and displayed:
#
# $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$
#
# Inline expressions can be added by surrounding the latex code with `$`:
#
# ```
# $e^{i\pi} + 1 = 0$
# ```
#
# Expressions on their own line are surrounded by `$$`:
#
# ```latex
# $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# #### GitHub flavored markdown
# -
# The Notebook webapp supports Github flavored markdown meaning that you can use triple backticks for code blocks:
#
# ```python
# print "Hello World"
# ```
#
# ```javascript
# console.log("Hello World")
# ```
#
# Gives:
#
# ```python
# print "Hello World"
# ```
#
# ```javascript
# console.log("Hello World")
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# And a table like this:
#
# | This | is |
# |------|------|
# | a | table|
#
# A nice HTML Table:
#
# | This | is |
# |------|------|
# | a | table|
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Display Images
# + [markdown] slideshow={"slide_type": "-"}
# You can display images this way:
#
# 
#
# For example, we have the *Universität Würzburg* logo:
#
# 
#
# 
#
# + [markdown] slideshow={"slide_type": "subslide"}
# If you have local files in your Notebook directory, you can refer to these files in Markdown cells directly:
#
# [subdirectory/]<filename>
#
# For example, in the images folder, we have the *Universität Würzburg* logo:
#
# 
# or using HTML
#
# <img src="images/unilogo.png" />
#
# <img src="images/unilogo.png" style="width:20%"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Colab
#
# <img src="https://colab.research.google.com/img/colab_favicon.ico" style="width:20%; float:right">
#
# [Colaboratory](https://colab.research.google.com/) is a free Jupyter notebook environment that requires no setup and runs entirely in the cloud.
#
# With Colaboratory you can
# - write and execute code,
# - save and share your analyses, and
# - access powerful computing resources (GPU and TPU),
#
# all for free from your browser. [More information](https://colab.research.google.com/notebooks/welcome.ipynb)
| Lecture/01_Jupyter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # AWS STAC RTC
#
# Start exploring this dataset with Xarray
# !pip -q install odc.stac
import yaml
import odc.stac
import pystac
import hvplot.xarray
# Paste /proxy/localhost:8787 for cluster diagnostics
from dask.distributed import Client
client = Client()
client
# GDAL environment variables for better performance
import os
os.environ['AWS_REGION']='us-west-2'
os.environ['GDAL_DISABLE_READDIR_ON_OPEN']='EMPTY_DIR'
os.environ['AWS_NO_SIGN_REQUEST']='YES'
# temporary fix https://github.com/opendatacube/odc-stac/issues/9#issuecomment-952363783
cfg = """---
"*":
warnings: ignore # Disable warnings about duplicate common names
sentinel1-rtc-aws:
assets:
'*':
data_type: float32
nodata: 0
'incidence':
data_type: uint16
nodata: 0
# scale: 0.01 #need to do this manually
"""
cfg = yaml.load(cfg, Loader=yaml.CSafeLoader)
cat = pystac.read_file('catalog.json')
items = list(cat.get_all_items())
ds = odc.stac.load(items,
#bands=["gamma0_vv", "gamma0_vh"],
stac_cfg=cfg,
chunks=dict(x=512, y=512, time=1),
)
print('Total dataset size =', ds.nbytes/1e9)
ds
# +
# Subset around Grand Mesa
xmin,xmax,ymin,ymax = [739186, 742748, 4.325443e+06, 4.327356e+06]
daT = ds['gamma0_vv'].sel(x=slice(xmin, xmax),
y=slice(ymax, ymin))
daT
# +
# %%time
# Our area of interest is now small, and will easily fit in-memory
daT = daT.compute()
all_points = daT.where(daT!=0).hvplot.scatter('time', groupby=[], dynspread=True, datashade=True)
mean_trend = daT.where(daT!=0, drop=True).mean(dim=['x','y']).hvplot.line(title='North Grand Mesa', color='red')
(all_points * mean_trend)
# -
# ## Spatial visualizations
# +
i = 0
title=ds.time.values[0].astype('str')
ds['incidence'].isel(time=i).hvplot.image(rasterize=True,
data_aspect=1,
title=title,
cmap='viridis',
clabel='incidence (degrees)')
# -
ds['gamma0_vv'].isel(time=i).hvplot.image(rasterize=True,
data_aspect=1,
title=title,
clim=(0,0.5),
cmap='gray',
clabel='gamma0_vv (watts)')
ds['gamma0_vh'].isel(time=i).hvplot.image(rasterize=True,
data_aspect=1,
title=title,
clim=(0,0.2), # not equal to vv scale
cmap='gray',
clabel='gamma0_vh (watts)')
| odc-stac.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Amazon Fine Food Reviews Analysis
#
#
# Data Source: https://www.kaggle.com/snap/amazon-fine-food-reviews <br>
#
# EDA: https://nycdatascience.com/blog/student-works/amazon-fine-foods-visualization/
#
#
# The Amazon Fine Food Reviews dataset consists of reviews of fine foods from Amazon.<br>
#
# Number of reviews: 568,454<br>
# Number of users: 256,059<br>
# Number of products: 74,258<br>
# Timespan: Oct 1999 - Oct 2012<br>
# Number of Attributes/Columns in data: 10
#
# Attribute Information:
#
# 1. Id
# 2. ProductId - unique identifier for the product
# 3. UserId - unqiue identifier for the user
# 4. ProfileName
# 5. HelpfulnessNumerator - number of users who found the review helpful
# 6. HelpfulnessDenominator - number of users who indicated whether they found the review helpful or not
# 7. Score - rating between 1 and 5
# 8. Time - timestamp for the review
# 9. Summary - brief summary of the review
# 10. Text - text of the review
#
#
# #### Objective:
# Given a review, determine whether the review is positive (rating of 4 or 5) or negative (rating of 1 or 2).
#
# <br>
# [Q] How to determine if a review is positive or negative?<br>
# <br>
# [Ans] We could use Score/Rating. A rating of 4 or 5 can be cosnidered as a positive review. A rating of 1 or 2 can be considered as negative one. A review of rating 3 is considered nuetral and such reviews are ignored from our analysis. This is an approximate and proxy way of determining the polarity (positivity/negativity) of a review.
#
#
#
# # [1]. Reading Data
# ## [1.1] Loading the data
#
# The dataset is available in two forms
# 1. .csv file
# 2. SQLite Database
#
# In order to load the data, We have used the SQLITE dataset as it is easier to query the data and visualise the data efficiently.
# <br>
#
# Here as we only want to get the global sentiment of the recommendations (positive or negative), we will purposefully ignore all Scores equal to 3. If the score is above 3, then the recommendation wil be set to "positive". Otherwise, it will be set to "negative".
# +
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import sqlite3
import pandas as pd
import numpy as np
import nltk
import string
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
import re
# Tutorial about Python regular expressions: https://pymotw.com/2/re/
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
from tqdm import tqdm
import os
# +
# using SQLite Table to read data.
con = sqlite3.connect('D:/Work/database.sqlite')
# filtering only positive and negative reviews i.e.
# not taking into consideration those reviews with Score=3
# SELECT * FROM Reviews WHERE Score != 3 LIMIT 500000, will give top 500000 data points
# you can change the number to any other number based on your computing power
# filtered_data = pd.read_sql_query(""" SELECT * FROM Reviews WHERE Score != 3 LIMIT 500000""", con)
# for tsne assignment you can take 5k data points
filtered_data = pd.read_sql_query(""" SELECT * FROM Reviews WHERE Score != 3 """, con)
# Give reviews with Score>3 a positive rating(1), and reviews with a score<3 a negative rating(0).
def partition(x):
if x < 3:
return 0
return 1
#changing reviews with score less than 3 to be positive and vice-versa
actualScore = filtered_data['Score']
positiveNegative = actualScore.map(partition)
filtered_data['Score'] = positiveNegative
print("Number of data points in our data", filtered_data.shape)
filtered_data.head(3)
# -
display = pd.read_sql_query("""
SELECT UserId, ProductId, ProfileName, Time, Score, Text, COUNT(*)
FROM Reviews
GROUP BY UserId
HAVING COUNT(*)>1
""", con)
print(display.shape)
display.head()
display[display['UserId']=='AZY10LLTJ71NX']
display['COUNT(*)'].sum()
# # [2] Exploratory Data Analysis
# ## [2.1] Data Cleaning: Deduplication
#
# It is observed (as shown in the table below) that the reviews data had many duplicate entries. Hence it was necessary to remove duplicates in order to get unbiased results for the analysis of the data. Following is an example:
display= pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND UserId="AR5J8UI46CURR"
ORDER BY ProductID
""", con)
display.head()
# As it can be seen above that same user has multiple reviews with same values for HelpfulnessNumerator, HelpfulnessDenominator, Score, Time, Summary and Text and on doing analysis it was found that <br>
# <br>
# ProductId=B000HDOPZG was Loacker Quadratini Vanilla Wafer Cookies, 8.82-Ounce Packages (Pack of 8)<br>
# <br>
# ProductId=B000HDL1RQ was Loacker Quadratini Lemon Wafer Cookies, 8.82-Ounce Packages (Pack of 8) and so on<br>
#
# It was inferred after analysis that reviews with same parameters other than ProductId belonged to the same product just having different flavour or quantity. Hence in order to reduce redundancy it was decided to eliminate the rows having same parameters.<br>
#
# The method used for the same was that we first sort the data according to ProductId and then just keep the first similar product review and delelte the others. for eg. in the above just the review for ProductId=B000HDL1RQ remains. This method ensures that there is only one representative for each product and deduplication without sorting would lead to possibility of different representatives still existing for the same product.
#Sorting data according to ProductId in ascending order
sorted_data=filtered_data.sort_values('ProductId', axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')
#Deduplication of entries
final=sorted_data.drop_duplicates(subset={"UserId","ProfileName","Time","Text"}, keep='first', inplace=False)
final.shape
#Checking to see how much % of data still remains
(final['Id'].size*1.0)/(filtered_data['Id'].size*1.0)*100
# <b>Observation:-</b> It was also seen that in two rows given below the value of HelpfulnessNumerator is greater than HelpfulnessDenominator which is not practically possible hence these two rows too are removed from calcualtions
# +
display= pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND Id=44737 OR Id=64422
ORDER BY ProductID
""", con)
display.head()
# -
final=final[final.HelpfulnessNumerator<=final.HelpfulnessDenominator]
# +
#Before starting the next phase of preprocessing lets see the number of entries left
print(final.shape)
#How many positive and negative reviews are present in our dataset?
#final['Score'].value_counts()
#sampling 100k points
pos = final[final['Score']==1].sample(n = 50000)
neg = final[final['Score']==0].sample(n = 50000)
final_ = pd.concat([pos,neg])
print("Sampled data: ", final_.shape)
#sampling 40k points for rbf kernel
pos_ = final[final['Score']==1].sample(n=20000)
neg_ = final[final['Score']==0].sample(n=20000)
final_rbf = pd.concat([pos_,neg_])
print("Data for rbf kernel: ", final_rbf.shape)
# -
# # [3] Preprocessing
# ## [3.1]. Preprocessing Review Text
#
# Now that we have finished deduplication our data requires some preprocessing before we go on further with analysis and making the prediction model.
#
# Hence in the Preprocessing phase we do the following in the order below:-
#
# 1. Begin by removing the html tags
# 2. Remove any punctuations or limited set of special characters like , or . or # etc.
# 3. Check if the word is made up of english letters and is not alpha-numeric
# 4. Check to see if the length of the word is greater than 2 (as it was researched that there is no adjective in 2-letters)
# 5. Convert the word to lowercase
# 6. Remove Stopwords
# 7. Finally Snowball Stemming the word (it was obsereved to be better than Porter Stemming)<br>
#
# After which we collect the words used to describe positive and negative reviews
# +
# printing some random reviews
sent_0 = final_['Text'].values[0]
print(sent_0)
print("="*50)
sent_1000 = final_['Text'].values[1000]
print(sent_1000)
print("="*50)
sent_1500 = final_['Text'].values[1500]
print(sent_1500)
print("="*50)
sent_4900 = final_['Text'].values[4900]
print(sent_4900)
print("="*50)
# +
# remove urls from text python: https://stackoverflow.com/a/40823105/4084039
sent_0 = re.sub(r"http\S+", "", sent_0)
sent_1000 = re.sub(r"http\S+", "", sent_1000)
sent_150 = re.sub(r"http\S+", "", sent_1500)
sent_4900 = re.sub(r"http\S+", "", sent_4900)
print(sent_0)
# +
# https://stackoverflow.com/questions/16206380/python-beautifulsoup-how-to-remove-all-tags-from-an-element
from bs4 import BeautifulSoup
soup = BeautifulSoup(sent_0, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1000, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1500, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_4900, 'lxml')
text = soup.get_text()
print(text)
# +
# https://stackoverflow.com/a/47091490/4084039
import re
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
# -
sent_1500 = decontracted(sent_1500)
print(sent_1500)
print("="*50)
#remove words with numbers python: https://stackoverflow.com/a/18082370/4084039
sent_0 = re.sub("\S*\d\S*", "", sent_0).strip()
print(sent_0)
#remove spacial character: https://stackoverflow.com/a/5843547/4084039
sent_1500 = re.sub('[^A-Za-z0-9]+', ' ', sent_1500)
print(sent_1500)
# +
# https://gist.github.com/sebleier/554280
# we are removing the words from the stop words list: 'no', 'nor', 'not'
# <br /><br /> ==> after the above steps, we are getting "br br"
# we are including them into stop words list
# instead of <br /> if we have <br/> these tags would have revmoved in the 1st step
stopwords= set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't"])
# +
# Combining all the above stundents
from tqdm import tqdm
preprocessed_reviews = []
# tqdm is for printing the status bar
for sentance in tqdm(final_['Text'].values):
sentance = re.sub(r"http\S+", "", sentance)
sentance = BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
# https://gist.github.com/sebleier/554280
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in stopwords)
preprocessed_reviews.append(sentance.strip())
preprocessed_reviews_rbf = []
# tqdm is for printing the status bar
for sentance in tqdm(final_rbf['Text'].values):
sentance = re.sub(r"http\S+", "", sentance)
sentance = BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
# https://gist.github.com/sebleier/554280
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in stopwords)
preprocessed_reviews_rbf.append(sentance.strip())
# -
preprocessed_reviews[1500]
# +
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(preprocessed_reviews, final_['Score'], test_size=0.3)
X_train_rbf, X_test_rbf, Y_train_rbf, Y_test_rbf = train_test_split(preprocessed_reviews_rbf, final_rbf['Score'], test_size=0.3)
# -
# # [4] Featurization
# ## [4.1] BAG OF WORDS
# +
#BoW
count_vect = CountVectorizer(min_df=10, max_features=5000) #in scikit-learn
X_train_bow = count_vect.fit_transform(X_train)
print("some feature names ", count_vect.get_feature_names()[:10])
print('='*50)
X_test_bow = count_vect.transform(X_test)
final_counts = count_vect.transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_counts))
print("the shape of out text BOW vectorizer ",final_counts.get_shape())
print("the number of unique words ", final_counts.get_shape()[1])
#BoW
count_vect_rbf = CountVectorizer(min_df=10, max_features=500)
X_train_bow_rbf = count_vect_rbf.fit_transform(X_train_rbf)
print("some feature names ", count_vect_rbf.get_feature_names()[:10])
print('='*50)
X_test_bow_rbf = count_vect_rbf.transform(X_test_rbf)
final_counts_rbf = count_vect_rbf.transform(preprocessed_reviews_rbf)
print("the type of count vectorizer ",type(final_counts_rbf))
print("the shape of out text BOW vectorizer ",final_counts_rbf.get_shape())
print("the number of unique words ", final_counts_rbf.get_shape()[1])
# -
# ## [4.2] Bi-Grams and n-Grams.
# +
#bi-gram, tri-gram and n-gram
#removing stop words like "not" should be avoided before building n-grams
# count_vect = CountVectorizer(ngram_range=(1,2))
# please do read the CountVectorizer documentation http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
# you can choose these numebrs min_df=10, max_features=5000, of your choice
count_vect = CountVectorizer(ngram_range=(1,2), min_df=10, max_features=5000)
final_bigram_counts = count_vect.fit_transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_bigram_counts))
print("the shape of out text BOW vectorizer ",final_bigram_counts.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_bigram_counts.get_shape()[1])
# -
# ## [4.3] TF-IDF
# +
tf_idf_vect = TfidfVectorizer(ngram_range=(1,2), min_df=10)
X_train_tfidf = tf_idf_vect.fit_transform(X_train)
print("some sample features(unique words in the corpus)",tf_idf_vect.get_feature_names()[0:10])
print('='*50)
X_test_tfidf = tf_idf_vect.transform(X_test)
final_tf_idf = tf_idf_vect.transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_tf_idf))
print("the shape of out text TFIDF vectorizer ",final_tf_idf.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_tf_idf.get_shape()[1])
tf_idf_vect_rbf = TfidfVectorizer(ngram_range=(1,2), min_df=10, max_features=500)
X_train_tfidf_rbf = tf_idf_vect_rbf.fit_transform(X_train_rbf)
print("some sample features(unique words in the corpus)",tf_idf_vect_rbf.get_feature_names()[0:10])
print('='*50)
X_test_tfidf_rbf = tf_idf_vect_rbf.transform(X_test_rbf)
final_tf_idf_rbf = tf_idf_vect_rbf.transform(preprocessed_reviews_rbf)
print("the type of count vectorizer ",type(final_tf_idf_rbf))
print("the shape of out text TFIDF vectorizer ",final_tf_idf_rbf.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_tf_idf_rbf.get_shape()[1])
# -
# ## [4.4] Word2Vec
# +
# Train your own Word2Vec model using your own text corpus
i=0
list_of_sentance = []
for sentance in X_train:
list_of_sentance.append(sentance.split())
list_of_sent_test = []
for sentance in X_test:
list_of_sent_test.append(sentance.split())
list_of_sentance_rbf = []
for sentance in X_train_rbf:
list_of_sentance_rbf.append(sentance.split())
list_of_sent_test_rbf = []
for sentance in X_test_rbf:
list_of_sent_test_rbf.append(sentance.split())
# +
# Using Google News Word2Vectors
# in this project we are using a pretrained model by google
# its 3.3G file, once you load this into your memory
# it occupies ~9Gb, so please do this step only if you have >12G of ram
# we will provide a pickle file wich contains a dict ,
# and it contains all our courpus words as keys and model[word] as values
# To use this code-snippet, download "GoogleNews-vectors-negative300.bin"
# from https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
# it's 1.9GB in size.
# http://kavita-ganesan.com/gensim-word2vec-tutorial-starter-code/#.W17SRFAzZPY
# you can comment this whole cell
# or change these varible according to your need
is_your_ram_gt_16g=False
want_to_use_google_w2v = False
want_to_train_w2v = True
if want_to_train_w2v:
# min_count = 5 considers only words that occured atleast 5 times
w2v_model=Word2Vec(list_of_sentance,min_count=5,size=50, workers=4)
print(w2v_model.wv.most_similar('great'))
print('='*50)
print(w2v_model.wv.most_similar('worst'))
elif want_to_use_google_w2v and is_your_ram_gt_16g:
if os.path.isfile('GoogleNews-vectors-negative300.bin'):
w2v_model=KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
print(w2v_model.wv.most_similar('great'))
print(w2v_model.wv.most_similar('worst'))
else:
print("you don't have gogole's word2vec file, keep want_to_train_w2v = True, to train your own w2v ")
# -
w2v_words = list(w2v_model.wv.vocab)
print("number of words that occured minimum 5 times ",len(w2v_words))
print("sample words ", w2v_words[0:50])
# ## [4.4.1] Converting text into vectors using Avg W2V, TFIDF-W2V
# #### [4.4.1.1] Avg W2v
# +
# average Word2Vec
# compute average word2vec for each review.
sent_vectors_TRAIN_aw2v = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors_TRAIN_aw2v.append(sent_vec)
print(len(sent_vectors_TRAIN_aw2v))
print(len(sent_vectors_TRAIN_aw2v[0]))
sent_vectors_TEST_aw2v = [];
for sent in tqdm(list_of_sent_test):
sent_vec = np.zeros(50)
cnt_words =0;
for word in sent:
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors_TEST_aw2v.append(sent_vec)
print(len(sent_vectors_TEST_aw2v))
print(len(sent_vectors_TEST_aw2v[0]))
# average Word2Vec
# compute average word2vec for each review.
sent_vectors_TRAIN_rbf_aw2v = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(list_of_sentance_rbf): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors_TRAIN_rbf_aw2v.append(sent_vec)
print(len(sent_vectors_TRAIN_rbf_aw2v))
print(len(sent_vectors_TRAIN_rbf_aw2v[0]))
sent_vectors_TEST_rbf_aw2v = [];
for sent in tqdm(list_of_sent_test_rbf):
sent_vec = np.zeros(50)
cnt_words =0;
for word in sent:
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors_TEST_rbf_aw2v.append(sent_vec)
print(len(sent_vectors_TEST_rbf_aw2v))
print(len(sent_vectors_TEST_rbf_aw2v[0]))
# -
# #### [4.4.1.2] TFIDF weighted W2v
# +
# S = ["abc def pqr", "def def def abc", "pqr pqr def"]
model = TfidfVectorizer(max_features=500)
tf_idf_matrix = model.fit_transform(preprocessed_reviews)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary1 = dict(zip(model.get_feature_names(), list(model.idf_)))
model2 = TfidfVectorizer(max_features=500)
tf_idf_matrix_2 = model2.fit_transform(preprocessed_reviews_rbf)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary2 = dict(zip(model2.get_feature_names(), list(model2.idf_)))
# +
# TF-IDF weighted Word2Vec
tfidf_feat = model.get_feature_names() # tfidf words/col-names
# final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf
tfidf_sent_vectors_TRAIN_tfidfw2v = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
# tf_idf = tf_idf_matrix[row, tfidf_feat.index(word)]
# to reduce the computation we are
# dictionary[word] = idf value of word in whole courpus
# sent.count(word) = tf valeus of word in this review
tf_idf = dictionary1[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors_TRAIN_tfidfw2v.append(sent_vec)
row += 1
tfidf_sent_vectors_TEST_tfidfw2v = [];
row=0
for sent in tqdm(list_of_sent_test):
sent_vec = np.zeros(50)
weight_sum =0
for word in sent:
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
tf_idf = dictionary1[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors_TEST_tfidfw2v.append(sent_vec)
row += 1
# TF-IDF weighted Word2Vec
tfidf_feat = model2.get_feature_names() # tfidf words/col-names
# final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf
tfidf_sent_vectors_TRAIN_rbf_tfidfw2v = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(list_of_sentance_rbf): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
# tf_idf = tf_idf_matrix[row, tfidf_feat.index(word)]
# to reduce the computation we are
# dictionary[word] = idf value of word in whole courpus
# sent.count(word) = tf valeus of word in this review
tf_idf = dictionary2[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors_TRAIN_rbf_tfidfw2v.append(sent_vec)
row += 1
tfidf_sent_vectors_TEST_rbf_tfidfw2v = [];
row=0
for sent in tqdm(list_of_sent_test_rbf):
sent_vec = np.zeros(50)
weight_sum =0
for word in sent:
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
tf_idf = dictionary2[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors_TEST_rbf_tfidfw2v.append(sent_vec)
row += 1
# -
# # SVM
# <ol>
# <li><strong>Apply SVM on these feature sets</strong>
# <ul>
# <li><font color='red'>SET 1:</font>Review text, preprocessed one converted into vectors using (BOW)</li>
# <li><font color='red'>SET 2:</font>Review text, preprocessed one converted into vectors using (TFIDF)</li>
# <li><font color='red'>SET 3:</font>Review text, preprocessed one converted into vectors using (AVG W2v)</li>
# <li><font color='red'>SET 4:</font>Review text, preprocessed one converted into vectors using (TFIDF W2v)</li>
# </ul>
# </li>
# <br>
# <li><strong>Procedure</strong>
# <ul>
# <li>You need to work with 2 versions of SVM
# <ul><li>Linear kernel</li>
# <li>RBF kernel</li></ul>
# <li>Use of SGDClassifier’ with hinge loss because it is computationally less expensive and works like linear SVM</li>
# <li>Similarly, like kdtree of knn, while working with RBF kernel it's better to reduce the number of dimensions. min_df = 10, max_features = 500. 40k points considered.</li>
# </ul>
# </li>
# <br>
# <li><strong>Hyper paramter tuning (Best alpha in range [10^-4 to 10^4], and the best penalty among 'l1', 'l2')</strong>
# <ul>
# <li>Finding the best hyper paramter using 3-fold cross validation.</li>
# <li>Used gridsearch cv for hyper parameter tuning in the case of RBF kernel SVM.</li>
# </ul>
# </li>
# <br>
# <li><strong>Feature importance</strong>
# <ul>
# <li>While working on the linear kernel with BOW or TFIDF top 10 best features are printed for each of the positive and negative classes.</li>
# </ul>
# </li>
# # Applying SVM
# ## [5.1] Linear SVM
# ### [5.1.1] Applying Linear SVM on BOW,<font color='red'> SET 1</font>
# +
# standardization
from sklearn.preprocessing import StandardScaler
std = StandardScaler(copy=True, with_mean=False, with_std=True)
X_tra = std.fit_transform(X_train_bow)
X_tes = std.transform(X_test_bow)
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import roc_auc_score
alpha = [0.0001,0.001,0.01,0.1,1,10,100,1000]
pen = ['l2','l1']
d = {}
for i in alpha:
for j in pen:
clf = SGDClassifier(alpha=i, penalty=j, loss='hinge', random_state=10)
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y = cccv.predict_proba(X_tes)[:,1]
temp = roc_auc_score(Y_test, predict_y)
d[i,j] = (temp)
# +
l1 = []
l2 = []
for i in alpha:
l1.append(d[i,'l1'])
l2.append(d[i,'l2'])
plt.plot(np.log(alpha),l1, color = 'darkorange', label='L1')
plt.plot(np.log(alpha),l2, color = 'blue', label='L2')
plt.legend()
plt.xlabel('Hyperparameter')
plt.ylabel('AUC')
plt.title(' Hyperparameter v/s AUC')
plt.show()
# -
import operator
c_ = max(d.items(), key=operator.itemgetter(1))[0]
max_auc = d[c_]
print('The best value of hyper parameter is ', c_)
print('The max auc score is ', max_auc)
# +
#training the model with best value of hyper parameter
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_curve, auc
clf = SGDClassifier(alpha=c_[0], penalty=c_[1], loss='hinge', random_state=10)
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y_tr = cccv.predict_proba(X_tra)
predict_y = cccv.predict_proba(X_tes)
y_pred = cccv.predict(X_tes)
fpr, tpr, thresholds = roc_curve(Y_test, predict_y[:,1])
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train, predict_y_tr[:,1])
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
import seaborn as sns
ax = sns.heatmap(confusion_matrix(Y_test, y_pred), annot=True, fmt='d')
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test, y_pred)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test, y_pred)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test, y_pred)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test, y_pred)))
# +
def show_most_informative_features(vectorizer, clf, n=10):
feature_names = vectorizer.get_feature_names()
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
print("\t\t\tNegative\t\t\t\t\t\tPositive")
print("________________________________________________________________________________________________")
for (coef_1, fn_1), (coef_2, fn_2) in top:
print("\t%.4f\t%-15s\t\t\t\t%.4f\t%-15s" % (coef_1, fn_1, coef_2, fn_2))
show_most_informative_features(count_vect,clf)
#Code Reference:https://stackoverflow.com/questions/11116697/how-to-get-most-informative-features-for-scikit-learn-classifiers
# -
# ### [5.1.2] Applying Linear SVM on TFIDF,<font color='red'> SET 2</font>
# +
# standardization
from sklearn.preprocessing import StandardScaler
std = StandardScaler(copy=True, with_mean=False, with_std=True)
X_tra = std.fit_transform(X_train_tfidf)
X_tes = std.transform(X_test_tfidf)
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import hinge_loss
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
alpha = [0.0001,0.001,0.01,0.1,1,10,100,1000]
pen = ['l2','l1']
d = {}
for i in alpha:
for j in pen:
clf = SGDClassifier(alpha=i, penalty=j, loss='hinge', random_state=10, class_weight='balanced')
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, cv=10, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y = cccv.predict_proba(X_tes)[:,1]
temp = roc_auc_score(Y_test,predict_y)
d[i,j] = (temp)
# +
l1 = []
l2 = []
for i in alpha:
l1.append(d[i,'l1'])
l2.append(d[i,'l2'])
plt.plot(np.log(alpha),l1, color = 'darkorange', label='L1')
plt.plot(np.log(alpha),l2, color = 'blue', label='L2')
plt.legend()
plt.xlabel('Hyperparameter')
plt.ylabel('AUC')
plt.title(' Hyperparameter v/s AUC')
plt.show()
# -
import operator
c_ = max(d.items(), key=operator.itemgetter(1))[0]
max_auc = d[c_]
print('The best value of hyper parameter is ', c_)
print('The max auc score ', max_auc)
# +
#training the model with best value of hyper parameter
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_curve, auc
clf = SGDClassifier(alpha=c_[0], penalty=c_[1], loss='hinge', random_state=10)
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y_tr = cccv.predict_proba(X_tra)[:,1]
predict_y = cccv.predict_proba(X_tes)[:,1]
y_pred = cccv.predict(X_tes)
fpr, tpr, thresholds = roc_curve(Y_test, predict_y)
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train, predict_y_tr)
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
import seaborn as sns
ax = sns.heatmap(confusion_matrix(Y_test, y_pred), annot=True, fmt='d')
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test, y_pred)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test, y_pred)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test, y_pred)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test, y_pred)))
# +
def show_most_informative_features(vectorizer, clf, n=10):
feature_names = vectorizer.get_feature_names()
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
print("\t\t\tNegative\t\t\t\t\t\tPositive")
print("________________________________________________________________________________________________")
for (coef_1, fn_1), (coef_2, fn_2) in top:
print("\t%.4f\t%-15s\t\t\t\t%.4f\t%-15s" % (coef_1, fn_1, coef_2, fn_2))
show_most_informative_features(tf_idf_vect,clf)
#Code Reference:https://stackoverflow.com/questions/11116697/how-to-get-most-informative-features-for-scikit-learn-classifiers
# -
# ### [5.1.3] Applying Linear SVM on AVG W2V,<font color='red'> SET 3</font>
# +
# standardization
from sklearn.preprocessing import StandardScaler
std = StandardScaler(copy=True, with_mean=False, with_std=True)
X_tra = std.fit_transform(sent_vectors_TRAIN_aw2v)
X_tes = std.transform(sent_vectors_TEST_aw2v)
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import hinge_loss
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
alpha = [0.0001,0.001,0.01,0.1,1,10,100,1000]
pen = ['l2','l1']
d = {}
for i in alpha:
for j in pen:
clf = SGDClassifier(alpha=i, penalty=j, loss='hinge', random_state=10, class_weight='balanced')
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, cv=10, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y = cccv.predict_proba(X_tes)[:,1]
temp = roc_auc_score(Y_test,predict_y)
d[i,j] = (temp)
# +
l1 = []
l2 = []
for i in alpha:
l1.append(d[i,'l1'])
l2.append(d[i,'l2'])
plt.plot(np.log(alpha),l1, color = 'darkorange', label='L1')
plt.plot(np.log(alpha),l2, color = 'blue', label='L2')
plt.legend()
plt.xlabel('Hyperparameter')
plt.ylabel('AUC')
plt.title(' Hyperparameter v/s AUC')
plt.show()
# -
import operator
c_ = max(d.items(), key=operator.itemgetter(1))[0]
max_auc = d[c_]
print('The best value of hyper parameter is ', c_)
print('The max auc score ', max_auc)
# +
#training the model with best value of hyper parameter
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_curve, auc
clf = SGDClassifier(alpha=c_[0], penalty=c_[1], loss='hinge', random_state=10)
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y_tr = cccv.predict_proba(X_tra)[:,1]
predict_y = cccv.predict_proba(X_tes)[:,1]
y_pred = cccv.predict(X_tes)
fpr, tpr, thresholds = roc_curve(Y_test, predict_y)
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train, predict_y_tr)
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
import seaborn as sns
ax = sns.heatmap(confusion_matrix(Y_test, y_pred), annot=True, fmt='d')
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test, y_pred)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test, y_pred)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test, y_pred)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test, y_pred)))
# -
# ### [5.1.4] Applying Linear SVM on TFIDF W2V,<font color='red'> SET 4</font>
# +
# standardization
from sklearn.preprocessing import StandardScaler
std = StandardScaler(copy=True, with_mean=False, with_std=True)
X_tra = std.fit_transform(tfidf_sent_vectors_TRAIN_tfidfw2v)
X_tes = std.transform(tfidf_sent_vectors_TEST_tfidfw2v)
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import hinge_loss
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
alpha = [0.0001,0.001,0.01,0.1,1,10,100,1000]
pen = ['l2','l1']
d = {}
for i in alpha:
for j in pen:
clf = SGDClassifier(alpha=i, penalty=j, loss='hinge', random_state=10, class_weight='balanced')
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, cv=10, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y = cccv.predict_proba(X_tes)[:,1]
temp = roc_auc_score(Y_test,predict_y)
d[i,j] = (temp)
# +
l1 = []
l2 = []
for i in alpha:
l1.append(d[i,'l1'])
l2.append(d[i,'l2'])
plt.plot(np.log(alpha),l1, color = 'darkorange', label='L1')
plt.plot(np.log(alpha),l2, color = 'blue', label='L2')
plt.legend()
plt.xlabel('Hyperparameter')
plt.ylabel('AUC')
plt.title(' Hyperparameter v/s AUC')
plt.show()
# -
import operator
c_ = max(d.items(), key=operator.itemgetter(1))[0]
max_auc = d[c_]
print('The best value of hyper parameter is ', c_)
print('The max auc score ', max_auc)
# +
#training the model with best value of hyper parameter
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_curve, auc
clf = SGDClassifier(alpha=c_[0], penalty=c_[1], loss='hinge', random_state=10)
clf.fit(X_tra, Y_train)
cccv = CalibratedClassifierCV(clf, method="sigmoid")
cccv.fit(X_tra, Y_train)
predict_y_tr = cccv.predict_proba(X_tra)[:,1]
predict_y = cccv.predict_proba(X_tes)[:,1]
y_pred = cccv.predict(X_tes)
fpr, tpr, thresholds = roc_curve(Y_test, predict_y)
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train, predict_y_tr)
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
import seaborn as sns
ax = sns.heatmap(confusion_matrix(Y_test, y_pred), annot=True, fmt='d')
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test, y_pred)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test, y_pred)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test, y_pred)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test, y_pred)))
# -
# ## [5.2] RBF SVM
# ### [5.2.1] Applying RBF SVM on BOW,<font color='red'> SET 1</font>
# +
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import roc_curve, auc, make_scorer
def custom_auc(ground_truth, predictions):
fpr, tpr, _ = roc_curve(ground_truth, predictions[:, 1])
return auc(fpr, tpr)
auc_ = make_scorer(custom_auc, greater_is_better=True, needs_proba=True)
clf = SVC(kernel='rbf', probability=True)
param_grid = {'C':[0.001,0.01,0.1,1,10,100,1000]} #params
gsv = GridSearchCV(clf,param_grid,verbose=1,scoring=auc_)
gsv.fit(X_train_bow_rbf,Y_train_rbf)
print("Best HyperParameter: ",gsv.best_params_)
print("Best AUC score: ", gsv.best_score_)
# +
x = []
y = []
c = gsv.best_params_['C']
for i in gsv.grid_scores_:
x.append(i[0]['C'])
y_test = gsv.cv_results_['mean_test_score']
y_train = gsv.cv_results_['mean_train_score']
plt.xlabel('log(C)')
plt.ylabel('AUC score')
plt.plot(np.log(x),y_test, color = 'darkorange')
plt.plot(np.log(x),y_train, color = 'blue')
plt.legend()
plt.title(' C v/s auc score')
plt.show()
# +
clf = SVC(C=c, probability=True)
clf.fit(X_train_bow_rbf,Y_train_rbf)
y_pred = clf.predict_proba(X_test_bow_rbf)
y_pred_tr = clf.predict_proba(X_train_bow_rbf)
y_pr = clf.predict(X_test_bow_rbf)
from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score
fpr, tpr, thresholds = roc_curve(Y_test_rbf, y_pred[:,1])
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train_rbf, y_pred_tr[:,1])
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test_rbf, y_pr)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test_rbf, y_pr)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test_rbf, y_pr)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test_rbf, y_pr)))
ax = sns.heatmap(confusion_matrix(Y_test_rbf, y_pr), annot=True, fmt='d')
# -
# ### [5.2.2] Applying RBF SVM on TFIDF,<font color='red'> SET 2</font>
# +
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import roc_curve, auc, make_scorer
def custom_auc(ground_truth, predictions):
fpr, tpr, _ = roc_curve(ground_truth, predictions[:, 1])
return auc(fpr, tpr)
auc_ = make_scorer(custom_auc, greater_is_better=True, needs_proba=True)
clf = SVC(kernel='rbf', probability=True)
param_grid = {'C':[0.001,0.01,0.1,1,10,100,1000]} #params
gsv = GridSearchCV(clf,param_grid,verbose=1,scoring=auc_)
gsv.fit(X_train_tfidf_rbf,Y_train_rbf)
print("Best HyperParameter: ",gsv.best_params_)
print("Best AUC score: ", gsv.best_score_)
# +
x = []
y = []
c = gsv.best_params_['C']
for i in gsv.grid_scores_:
x.append(i[0]['C'])
y_test = gsv.cv_results_['mean_test_score']
y_train = gsv.cv_results_['mean_train_score']
plt.xlabel('log(C)')
plt.ylabel('AUC score')
plt.plot(np.log(x),y_test, color = 'darkorange')
plt.plot(np.log(x),y_train, color = 'blue')
plt.legend()
plt.title(' C v/s auc score')
plt.show()
# +
clf = SVC(C=c, probability=True)
clf.fit(X_train_tfidf_rbf,Y_train_rbf)
y_pred = clf.predict_proba(X_test_tfidf_rbf)
y_pred_tr = clf.predict_proba(X_train_tfidf_rbf)
y_pr = clf.predict(X_test_tfidf_rbf)
from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score
fpr, tpr, thresholds = roc_curve(Y_test_rbf, y_pred[:,1])
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train_rbf, y_pred_tr[:,1])
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test_rbf, y_pr)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test_rbf, y_pr)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test_rbf, y_pr)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test_rbf, y_pr)))
ax = sns.heatmap(confusion_matrix(Y_test_rbf, y_pr), annot=True, fmt='d')
# -
# ### [5.2.3] Applying RBF SVM on AVG W2V,<font color='red'> SET 3</font>
# +
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import roc_curve, auc, make_scorer
def custom_auc(ground_truth, predictions):
fpr, tpr, _ = roc_curve(ground_truth, predictions[:, 1])
return auc(fpr, tpr)
auc_ = make_scorer(custom_auc, greater_is_better=True, needs_proba=True)
clf = SVC(kernel='rbf', probability=True)
param_grid = {'C':[0.001,0.01,0.1,1,10,100,1000]} #params
gsv = GridSearchCV(clf,param_grid,verbose=1,scoring=auc_)
gsv.fit(sent_vectors_TRAIN_rbf_aw2v,Y_train_rbf)
print("Best HyperParameter: ",gsv.best_params_)
print("Best AUC score: ", gsv.best_score_)
# +
x = []
y = []
c = gsv.best_params_['C']
for i in gsv.grid_scores_:
x.append(i[0]['C'])
y_test = gsv.cv_results_['mean_test_score']
y_train = gsv.cv_results_['mean_train_score']
plt.xlabel('log(C)')
plt.ylabel('AUC score')
plt.plot(np.log(x),y_test, color = 'darkorange')
plt.plot(np.log(x),y_train, color = 'blue')
plt.legend()
plt.title(' C v/s auc score')
plt.show()
# +
clf = SVC(C=c, probability=True)
clf.fit(sent_vectors_TRAIN_rbf_aw2v,Y_train_rbf)
y_pred = clf.predict_proba(sent_vectors_TEST_rbf_aw2v)
y_pred_tr = clf.predict_proba(sent_vectors_TRAIN_rbf_aw2v)
y_pr = clf.predict(sent_vectors_TEST_rbf_aw2v)
from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score
fpr, tpr, thresholds = roc_curve(Y_test_rbf, y_pred[:,1])
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train_rbf, y_pred_tr[:,1])
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test_rbf, y_pr)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test_rbf, y_pr)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test_rbf, y_pr)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test_rbf, y_pr)))
ax = sns.heatmap(confusion_matrix(Y_test_rbf, y_pr), annot=True, fmt='d')
# -
# ### [5.2.4] Applying RBF SVM on TFIDF W2V,<font color='red'> SET 4</font>
# +
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import roc_curve, auc, make_scorer
def custom_auc(ground_truth, predictions):
fpr, tpr, _ = roc_curve(ground_truth, predictions[:, 1])
return auc(fpr, tpr)
auc_ = make_scorer(custom_auc, greater_is_better=True, needs_proba=True)
clf = SVC(kernel='rbf', probability=True)
param_grid = {'C':[0.001,0.01,0.1,1,10,100,1000]} #params
gsv = GridSearchCV(clf,param_grid,verbose=1,scoring=auc_)
gsv.fit(tfidf_sent_vectors_TRAIN_rbf_tfidfw2v,Y_train_rbf)
print("Best HyperParameter: ",gsv.best_params_)
print("Best AUC score: ", gsv.best_score_)
# +
x = []
y = []
c = gsv.best_params_['C']
for i in gsv.grid_scores_:
x.append(i[0]['C'])
y_test = gsv.cv_results_['mean_test_score']
y_train = gsv.cv_results_['mean_train_score']
plt.xlabel('log(C)')
plt.ylabel('AUC score')
plt.plot(np.log(x),y_test, color = 'darkorange')
plt.plot(np.log(x),y_train, color = 'blue')
plt.legend()
plt.title(' C v/s auc score')
plt.show()
# +
clf = SVC(C=c, probability=True)
clf.fit(tfidf_sent_vectors_TRAIN_rbf_tfidfw2v,Y_train_rbf)
y_pred = clf.predict_proba(tfidf_sent_vectors_TEST_rbf_tfidfw2v)
y_pred_tr = clf.predict_proba(tfidf_sent_vectors_TRAIN_rbf_tfidfw2v)
y_pr = clf.predict(tfidf_sent_vectors_TEST_rbf_tfidfw2v)
from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score
fpr, tpr, thresholds = roc_curve(Y_test_rbf, y_pred[:,1])
fpr_tr, tpr_tr, thresholds_tr = roc_curve(Y_train_rbf, y_pred_tr[:,1])
roc_auc_te = auc(fpr, tpr)
roc_auc_tr = auc(fpr_tr, tpr_tr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='Test curve (area = %0.2f)' % roc_auc_te)
plt.plot(fpr_tr, tpr_tr, color='blue', lw=1, label='Train (area = %0.2f)' % roc_auc_tr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
print('Accuracy on test set: %0.2f%%'%(accuracy_score(Y_test_rbf, y_pr)))
print('Precision on test set: %0.2f%%'%(precision_score(Y_test_rbf, y_pr)))
print('Recall on test set: %0.2f%%'%(recall_score(Y_test_rbf, y_pr)))
print('F1 score on test set: %0.2f%%'%(f1_score(Y_test_rbf, y_pr)))
ax = sns.heatmap(confusion_matrix(Y_test_rbf, y_pr), annot=True, fmt='d')
# -
# # [6] Conclusions
# +
from prettytable import PrettyTable
x = PrettyTable()
y = PrettyTable()
print('============================Linear Kernel(SGDClassifier with hinge loss)=================================)')
x.field_names = ["Vectorizer", "alpha", "regularization", "AUC Score"]
x.add_row(["BoW", 0.1, "l2", 0.95])
x.add_row(["Tf-idf", 1, "l2", 0.96])
x.add_row(["Avg W2V", 0.01, "l2", 0.92])
x.add_row(["Tf-idf W2V", 0.01, "l2", 0.86])
print(x)
print('==============================Radial bias function kernel(rbf kernel)===================================)')
y.field_names = ["Vectorizer", "C", "AUC Score"]
y.add_row(["BoW", 100, 0.92])
y.add_row(["Tf-idf", 1000, 0.96])
y.add_row(["Avg W2V", 10, 0.92])
y.add_row(["Tf-idf W2V", 1, 0.93])
print(y)
| Support Vector Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="6uQP3ZbC8J5o"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="_ckMIh7O7s6D"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="both" colab={} colab_type="code" id="vasWnqRgy1H4"
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] colab_type="text" id="jYysdyb-CaWM"
# # Lab 03: Image Classification with Convolutional Neural Networks
# + [markdown] colab_type="text" id="S5Uhzt6vVIB2"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/L03_image_classification_with_cnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/L03_image_classification_with_cnn.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="FbVhjPpzn6BM"
# In this tutorial, we'll build and train a neural network to classify images of clothing, like sneakers and shirts.
# + [markdown] colab_type="text" id="H0tMfX2vR0uD"
# ## Install and import dependencies
#
# We'll need [TensorFlow Datasets](https://www.tensorflow.org/datasets/), an API that simplifies downloading and accessing datasets, and provides several sample datasets to work with. We're also using a few helper libraries.
# + colab={} colab_type="code" id="5HDhfftMGc_i"
import tensorflow as tf
# + colab={} colab_type="code" id="uusvhUp9Gg37"
# Import TensorFlow Datasets
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# Helper libraries
import math
import numpy as np
import matplotlib.pyplot as plt
# + colab={} colab_type="code" id="UXZ44qIaG0Ru"
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# + [markdown] colab_type="text" id="yR0EdgrLCaWR"
# ## Import the Fashion MNIST dataset
# + [markdown] colab_type="text" id="DLdCchMdCaWQ"
# This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset, which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 $\times$ 28 pixels), as seen here:
#
# <table>
# <tr><td>
# <img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
# alt="Fashion MNIST sprite" width="600">
# </td></tr>
# <tr><td align="center">
# <b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>
# </td></tr>
# </table>
#
# Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.
#
# This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.
#
# We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow, using the [Datasets](https://www.tensorflow.org/datasets) API:
# + colab={} colab_type="code" id="7MqDQO0KCaWS"
dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
# + [markdown] colab_type="text" id="t9FDsUlxCaWW"
# Loading the dataset returns metadata as well as a *training dataset* and *test dataset*.
#
# * The model is trained using `train_dataset`.
# * The model is tested against `test_dataset`.
#
# The images are 28 $\times$ 28 arrays, with pixel values in the range `[0, 255]`. The *labels* are an array of integers, in the range `[0, 9]`. These correspond to the *class* of clothing the image represents:
#
# <table>
# <tr>
# <th>Label</th>
# <th>Class</th>
# </tr>
# <tr>
# <td>0</td>
# <td>T-shirt/top</td>
# </tr>
# <tr>
# <td>1</td>
# <td>Trouser</td>
# </tr>
# <tr>
# <td>2</td>
# <td>Pullover</td>
# </tr>
# <tr>
# <td>3</td>
# <td>Dress</td>
# </tr>
# <tr>
# <td>4</td>
# <td>Coat</td>
# </tr>
# <tr>
# <td>5</td>
# <td>Sandal</td>
# </tr>
# <tr>
# <td>6</td>
# <td>Shirt</td>
# </tr>
# <tr>
# <td>7</td>
# <td>Sneaker</td>
# </tr>
# <tr>
# <td>8</td>
# <td>Bag</td>
# </tr>
# <tr>
# <td>9</td>
# <td>Ankle boot</td>
# </tr>
# </table>
#
# Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images:
# + colab={} colab_type="code" id="IjnLH5S2CaWx"
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# + [markdown] colab_type="text" id="Brm0b_KACaWX"
# ### Explore the data
#
# Let's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, and 10000 images in the test set:
# + colab={} colab_type="code" id="MaOTZxFzi48X"
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples: {}".format(num_test_examples))
# + [markdown] colab_type="text" id="ES6uQoLKCaWr"
# ## Preprocess the data
#
# The value of each pixel in the image data is an integer in the range `[0,255]`. For the model to work properly, these values need to be normalized to the range `[0,1]`. So here we create a normalization function, and then apply it to each image in the test and train datasets.
# + colab={} colab_type="code" id="nAsH3Zm-76pB"
def normalize(images, labels):
images = tf.cast(images, tf.float32)
images /= 255
return images, labels
# The map function applies the normalize function to each element in the train
# and test datasets
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)
# The first time you use the dataset, the images will be loaded from disk
# Caching will keep them in memory, making training faster
train_dataset = train_dataset.cache()
test_dataset = test_dataset.cache()
# + [markdown] colab_type="text" id="lIQbEiJGXM-q"
# ### Explore the processed data
#
# Let's plot an image to see what it looks like.
# + colab={} colab_type="code" id="oSzE9l7PjHx0"
# Take a single image, and remove the color dimension by reshaping
for image, label in test_dataset.take(1):
break
image = image.numpy().reshape((28,28))
# Plot the image - voila a piece of fashion clothing
plt.figure()
plt.imshow(image, cmap=plt.cm.binary)
plt.colorbar()
plt.grid(False)
plt.show()
# + [markdown] colab_type="text" id="Ee638AlnCaWz"
# Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
# + colab={} colab_type="code" id="oZTImqg_CaW1"
plt.figure(figsize=(10,10))
i = 0
for (image, label) in test_dataset.take(25):
image = image.numpy().reshape((28,28))
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap=plt.cm.binary)
plt.xlabel(class_names[label])
i += 1
plt.show()
# + [markdown] colab_type="text" id="59veuiEZCaW4"
# ## Build the model
#
# Building the neural network requires configuring the layers of the model, then compiling the model.
# + [markdown] colab_type="text" id="Gxg1XGm0eOBy"
# ### Exercise 3.1 Setup the layers
#
# The basic building block of a neural network is the *layer*. A layer extracts a representation from the data fed into it. Hopefully, a series of connected layers results in a representation that is meaningful for the problem at hand.
#
# Much of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have internal parameters which are adjusted ("learned") during training.
#
# For this exercise, we'll be using two new layers, the Convolution layer (`tf.keras.layers.Conv2D`) and the Max Pooling layer (`tf.keras.layers.MaxPool2D`). Refer to the slides and official documentation on how to use these layers:
#
# * [tf.keras.layers.Conv2D reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D)
# * [tf.keras.layers.MaxPool2D reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D)
#
# **Our network layers are:**
# * 2D Convolution layer - 32 filters, 3x3 kernel, ReLU activation, padding with same values
# * Max pooling layer - 2x2 kernel, 2 stride
# * 2D Convolution layer - 64 filters, 3x3 kernel, ReLU activation, padding with same values
# * Max pooling layer - 2x2 kernel, 2 stride
# * Flatten layer
# * Dense layer - 128 nodes output, ReLU activation
# * Dense layer - 10 nodes output
# -
model = tf.keras.Sequential([
#TODO - Add model layers as described above
])
# #### Exercise 3.1 Solution
#
# The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.1.ipynb)
# + [markdown] colab_type="text" id="gut8A_7rCaW6"
# ### Exercise 3.2 Compile the model with `Model.compile`
#
# Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:
#
# **Compile the model below with the following settings**
# * *Loss function* — SparseCategoricalCrossentropy
# * *Optimizer* — Adam
# * *Metrics* — accuracy
#
# Refer to the [official documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model#compile) if you've forgotten the function.
# +
# TODO - Compile the model
# -
# #### Exercise 3.2 Solution
#
# The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.2.ipynb)
# + [markdown] colab_type="text" id="qKF6uW-BCaW-"
# ## Exercise 3.3 Train the model with `Model.fit`
#
# First, we define the iteration behavior for the train dataset:
# 1. Repeat forever by specifying `dataset.repeat()` (the `epochs` parameter described below limits how long we perform training).
# 2. The `dataset.shuffle(60000)` randomizes the order so our model cannot learn anything from the order of the examples.
# 3. And `dataset.batch(32)` tells `model.fit` to use batches of 32 images and labels when updating the model variables.
#
# Training is performed by calling the `model.fit` method:
# 1. Feed the training data to the model using `train_dataset`.
# 2. The model learns to associate images and labels.
# 3. The `epochs=5` parameter limits training to 5 full iterations of the training dataset, so a total of 5 * 60000 = 300000 examples.
#
# (Don't worry about `steps_per_epoch`, the requirement to have this flag will soon be removed.)
# + colab={} colab_type="code" id="o_Dp8971McQ1"
BATCH_SIZE = 32
train_dataset = train_dataset.cache().repeat().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.cache().batch(BATCH_SIZE)
# -
# Start training the model in the code box below for **10 epochs**. Dont' forget to add the parameter `steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE)`.
#
# Refer to the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model) if you've forgotten the the function.
# +
# TODO - Train the model
# + [markdown] colab_type="text" id="W3ZVOhugCaXA"
# As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.97 (or 97%) on the training data.
# -
# ### Exercise 3.3 Solution
#
# The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.3.ipynb)
# + [markdown] colab_type="text" id="oEw4bZgGCaXB"
# ## Exercise 3.4 Evaluate accuracy with `Model.evaluate`
#
# Next, compare how the model performs on the test dataset. Use all examples we have in the test dataset to assess accuracy.
#
# Refer to the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model) on how to use the function.
# +
# TODO - Evaluate the model
# + [markdown] colab_type="text" id="yWfgsmVXCaXG"
# As it turns out, the accuracy on the test dataset is smaller than the accuracy on the training dataset. This is completely normal, since the model was trained on the `train_dataset`. When the model sees images it has never seen during training, (that is, from the `test_dataset`), we can expect performance to go down.
# -
# ### Exercise 3.4 Solution
#
# The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.4.ipynb)
# + [markdown] colab_type="text" id="xsoS7CPDCaXH"
# ## Make predictions and explore
#
# With the model trained, we can use it to make predictions about some images.
# + colab={} colab_type="code" id="Ccoz4conNCpl"
for test_images, test_labels in test_dataset.take(1):
test_images = test_images.numpy()
test_labels = test_labels.numpy()
predictions = model.predict(test_images)
# + colab={} colab_type="code" id="Gl91RPhdCaXI"
predictions.shape
# + [markdown] colab_type="text" id="x9Kk1voUCaXJ"
# Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
# + colab={} colab_type="code" id="3DmJEUinCaXK"
predictions[0]
# + [markdown] colab_type="text" id="-hw1hgeSCaXN"
# A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value:
# + colab={} colab_type="code" id="qsqenuPnCaXO"
np.argmax(predictions[0])
# + [markdown] colab_type="text" id="E51yS7iCCaXO"
# So the model is usually most confident that this image is a Shirt, or `class_names[6]`. Let's check the label:
# + colab={} colab_type="code" id="Sd7Pgsu6CaXP"
test_labels[0]
# + [markdown] colab_type="text" id="ygh2yYC972ne"
# We can graph this to look at the full set of 10 class predictions
# + colab={} colab_type="code" id="DvYmmrpIy6Y1"
def plot_image(i, predictions_array, true_labels, images):
predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img[...,0], cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# + [markdown] colab_type="text" id="d4Ov9OFDMmOD"
# Let's look at the 0th image, predictions, and prediction array.
# + colab={} colab_type="code" id="HV5jw-5HwSmO"
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
# + colab={} colab_type="code" id="Ko-uzOufSCSe"
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
# + [markdown] colab_type="text" id="kgdvGD52CaXR"
# Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident.
# + colab={} colab_type="code" id="hQlnbqaw2Qu_"
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
# + [markdown] colab_type="text" id="R32zteKHCaXT"
# Finally, use the trained model to make a prediction about a single image.
# + colab={} colab_type="code" id="yRJ7JU7JCaXT"
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
# + [markdown] colab_type="text" id="vz3bVp21CaXV"
# `tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list:
# + colab={} colab_type="code" id="lDFh5yF_CaXW"
# Add the image to a batch where it's the only member.
img = np.array([img])
print(img.shape)
# + [markdown] colab_type="text" id="EQ5wLTkcCaXY"
# Now predict the image:
# + colab={} colab_type="code" id="o_rzNSdrCaXY"
predictions_single = model.predict(img)
print(predictions_single)
# + colab={} colab_type="code" id="6Ai-cpLjO-3A"
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
# + [markdown] colab_type="text" id="cU1Y2OAMCaXb"
# `model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch:
# + colab={} colab_type="code" id="2tRmdq_8CaXb"
np.argmax(predictions_single[0])
# + [markdown] colab_type="text" id="YFc2HbEVCaXd"
# And, as before, the model predicts a label of 6 (shirt).
# + [markdown] colab_type="text" id="-KtnHECKZni_"
# # Exercise 3.5
#
# Experiment with different models and see how the accuracy results differ. In particular change the following parameters:
# * Set training epochs set to 1
# * Number of neurons in the Dense layer following the Flatten one. For example, go really low (e.g. 10) in ranges up to 512 and see how accuracy changes
# * Add additional Dense layers between the Flatten and the final Dense(10), experiment with different units in these layers
# * Don't normalize the pixel values, and see the effect that has
# -
#
# # Exercise 3.6 - CIFAR-10 Dataset with CNNs
#
# Let's apply what we've learned to another dataset.The [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.
#
# As our input is a colour image, we have now 3 values per pixel. When flattened, our input array is is 3072 long ($32\times32\times3$).
#
# * What happens when you use the same network as above?
# * What is the best accuracy that you can achieve?
# Like in the previous lab, download, extract and load the dataset.
#
# The extracted folder `cifar-10-batches-py` contains (in Python's pickle format):
# * Training dataset: `data_batch_1 - 5`
# * Test dataset: `test_batch`
# * Dataset metadata: `batches.meta`
# +
import os
import glob
# Download the data
_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
zip_dir = tf.keras.utils.get_file('cifar-10-python.tar.gz', origin=_URL, extract=True)
# Get the data and meta file names
data_dir = os.path.join(os.path.dirname(zip_dir), 'cifar-10-batches-py')
train_files = glob.glob(os.path.join(data_dir,"data_batch_*"))
test_file = os.path.join(data_dir,"test_batch")
meta_file = os.path.join(data_dir,"batches.meta")
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def build_dataset(files):
x = []
y = []
for file in files:
dict = unpickle(file)
for image in dict[b'data']:
# Image in the dataset is stored as a 3072 length 1D array
x.append(image)
for label in dict[b'labels']:
y.append(label)
return tf.data.Dataset.from_tensor_slices((x,y))
# Build the training dataset
train_dataset = build_dataset(train_files)
# Build the testing dataset
test_dataset = build_dataset([test_file])
# Get the metadata
meta = unpickle(meta_file)
# -
# **Now that we've got a dataset, use what you've learned in this lab to build a CNN model for classifying these images.**
# * Don't forget to pre-process your data
# * You'll want change the shape of the input image from 1D to a 3D array inside your mapping function (hint: [use the reshape function](https://www.tensorflow.org/api_docs/python/tf/reshape))
# * The image is stored as `[colour channel, width, height]`, you'll need to change this odering to `[width, height, colour channel]` (hint: [use the transpose function](https://www.tensorflow.org/api_docs/python/tf/transpose))
# * Remember to check our input shape as it's different from the fashion mnist dataset
# +
# TODO - Create a CNN model and train it using the CIFAR-10 dataset
# -
# ### Exercise 3.6 Solution
#
# The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.6.ipynb)
# # Notice
#
# Remember to enable GPU to make everything run faster (Runtime -> Change runtime type -> Hardware accelerator -> GPU).
# Also, if you run into trouble, simply reset the entire environment and start from the beginning:
# * Edit -> Clear all outputs
# * Runtime -> Reset all runtimes
| notebooks/python/L03_image_classification_with_cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import os
from os import path
# Setting where our data is sitting
data_folder = path.join(
path.abspath('..'), # '..' means the directory above this one
'data')
data_folder
# +
raw_postcodes = \
pd.read_csv(path.join(data_folder, 'australian_postcodes.csv')) \
.dropna(how='any')
# We have to remove duplicate records (there's one for each suburb) so we aggregate them up
postcodes = pd.DataFrame.from_records([
(postcode, df.long.mean(), df.lat.mean(), ', '.join(df.locality))
for postcode, df in raw_postcodes.groupby('postcode')
],
columns=('postcode', 'longitude', 'latitude', 'locality'),
index='postcode'
)
postcodes.head()
# +
fault_data = \
pd.read_csv(path.join(data_folder, 'faults.csv')) \
.dropna(how='any')
# rename columns
new_names = {
'City': 'suburb',
'Notif.Date': 'date',
'PriorityType': 'type',
'Pipe Material': 'material',
'Pipe Size': 'size',
'Notification': 'notif',
}
fault_data.rename(columns=new_names, inplace=True)
fault_data.head()
# -
fault_data = fault_data.join(postcodes, how='inner')
fault_data.head()
fault_data=fault_data.fillna(0)
# Splits from longitude
split_points = [
('longitude', 140, [False, True])
]
import numpy as np
import seaborn as sns
fault_data['is_corr'] = np.vstack([
pd.cut(fault_data[longitude], bins=[0, split, 200], labels=is_corr)
for longitude, split, is_corr in split_points
]).sum(axis=0) == 4
sns.countplot('is_corr', data=fault_data)
sns.pairplot(fault_data.iloc[::5], hue='is_corr', plot_kws={'alpha': 0.5})
sns.jointplot('longitude', 'size', fault_data, joint_kws={'alpha': 0.4, 'marker': '.'})
# +
def logistic(z):
return 1 / (1 + np.exp(-z))
zs = np.linspace(-10, 10)
logistic_data = pd.DataFrame(
{'z': zs, 'logistic': logistic(zs)}
)
logistic_data.plot('z', 'logistic')
# -
sns.distplot(fault_data['longitude'])
# +
from sklearn import preprocessing, pipeline, linear_model
# Make up our pipeline where we transform the longitude first to make it more gaussian!
regressor = pipeline.Pipeline([
('transform', preprocessing.FunctionTransformer(np.log, validate=True)),
('model', linear_model.LogisticRegression())
])
# -
X = fault_data[['longitude']]
y = fault_data['is_corr']
X
regressor.fit(X, y)
predict_df = pd.DataFrame(
{'test_long_values': [110, 120, 130, 140, 150]} # Are these values ore?
)
regressor.predict(predict_df)
model = regressor.named_steps.model
model.intercept_, model.coef_
def logistic(regressor, X):
"""
Plot our logistic model given input values x
We're doing this so that we can see the output of the logistic function - normally
you'd just do `regressor.predict(x)` to get actual 1, 0 labels for your data.
Parameters:
regressor - a fitted logistic regression pipeline
x - the values to evaulate the function at
"""
# We can pull the model and transforms from our pipeline
model = regressor.named_steps.model
tf = regressor.named_steps.transform
# Next we replay the steps in the pipeline to make a prediction
z = model.intercept_ + model.coef_[0][0] * tf.transform(X)
return 1 / (1 + np.exp(-z)).ravel()
logistic(regressor, predict_df)
# +
f, ax = plt.subplots(1, 1)
# Some longitude values to predict from
longs = pd.DataFrame(
{'test_long_values': np.linspace(0.1, 3)}
)
# An offset to stop everything plotting on top of everything else
offset = 0.02
# shows predictions given contents
predictions = regressor.predict(longs)
ax.plot(longs, predictions + offset, '.', alpha=0.7, label='predicted (+ offset)')
# shows measured values plus jitter
jitter = np.random.normal(scale=0.01, size=len(fault_data))
ax.plot(fault_data['longitude'], fault_data['is_corr'] + jitter - offset, '.', alpha=0.1, label='measured (+ jitter - offset)')
# shows logistic function fitted from regressor
ax.plot(longs, logistic(regressor, longs), '--', label='fitted logistic function')
# Generate the logistic curve showing the location of
ax.set_ylim(-0.1, 1.1)
ax.legend()
ax.set_title('Logistic regression with scikit-learn')
f.tight_layout()
#plot below: where y axis = 0.5 move across to logistic function. this shows hte cut off between is_ore and not is_ore.
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regression diagnostics
# This example file shows how to use a few of the ``statsmodels`` regression diagnostic tests in a real-life context. You can learn about more tests and find out more information about the tests here on the [Regression Diagnostics page.](https://www.statsmodels.org/stable/diagnostic.html)
#
# Note that most of the tests described here only return a tuple of numbers, without any annotation. A full description of outputs is always included in the docstring and in the online ``statsmodels`` documentation. For presentation purposes, we use the ``zip(name,test)`` construct to pretty-print short descriptions in the examples below.
# ## Estimate a regression model
# %matplotlib inline
# +
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import matplotlib.pyplot as plt
# Load data
url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/HistData/Guerry.csv'
dat = pd.read_csv(url)
# Fit regression model (using the natural log of one of the regressors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
# Inspect the results
print(results.summary())
# -
# ## Normality of the residuals
# Jarque-Bera test:
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sms.jarque_bera(results.resid)
lzip(name, test)
# Omni test:
name = ['Chi^2', 'Two-tail probability']
test = sms.omni_normtest(results.resid)
lzip(name, test)
# ## Influence tests
#
# Once created, an object of class ``OLSInfluence`` holds attributes and methods that allow users to assess the influence of each observation. For example, we can compute and extract the first few rows of DFbetas by:
from statsmodels.stats.outliers_influence import OLSInfluence
test_class = OLSInfluence(results)
test_class.dfbetas[:5,:]
# Explore other options by typing ``dir(influence_test)``
#
# Useful information on leverage can also be plotted:
from statsmodels.graphics.regressionplots import plot_leverage_resid2
fig, ax = plt.subplots(figsize=(8,6))
fig = plot_leverage_resid2(results, ax = ax)
# Other plotting options can be found on the [Graphics page.](https://www.statsmodels.org/stable/graphics.html)
# ## Multicollinearity
#
# Condition number:
np.linalg.cond(results.model.exog)
# ## Heteroskedasticity tests
#
# Breush-Pagan test:
name = ['Lagrange multiplier statistic', 'p-value',
'f-value', 'f p-value']
test = sms.het_breuschpagan(results.resid, results.model.exog)
lzip(name, test)
# Goldfeld-Quandt test
name = ['F statistic', 'p-value']
test = sms.het_goldfeldquandt(results.resid, results.model.exog)
lzip(name, test)
# ## Linearity
#
# Harvey-Collier multiplier test for Null hypothesis that the linear specification is correct:
name = ['t value', 'p value']
test = sms.linear_harvey_collier(results)
lzip(name, test)
| examples/notebooks/regression_diagnostics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ChristianBenedict-Gomez/CPEN-21A-ECE-2-1/blob/main/Control_Structure.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iIMgt31S9KpG"
# ##If Statement
# + colab={"base_uri": "https://localhost:8080/"} id="WVwEkz7J7tRp" outputId="ff722fb0-86ca-46fe-bd5e-76be7024e40f"
a = 12
b = 100
if b>a:
print("b is greater than a")
# + [markdown] id="bUVuqK0Y9o3A"
# ##Elif Statement
# + colab={"base_uri": "https://localhost:8080/"} id="WTFzJR0f9rNM" outputId="5e3816d6-3987-4b69-8ed7-f80dcea4f450"
a = 12
b = 13
if b>a:
print("b is greater than a")
elif b==a:
print("b is equal to a")
# + [markdown] id="_hHYwPh7-I4y"
# ##Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="W1n1xQlL-K9B" outputId="6975d12c-d75a-4474-ae71-f9e3384bd0be"
a = 30
b = 30
if a>b:
print("a is greater than b")
elif b>a:
print("b is greater than a")
else:
print("a is equal to b")
# + [markdown] id="aFW0oYsk--PD"
# ##Short Hand If Statement
# + colab={"base_uri": "https://localhost:8080/"} id="VGGKiA9o_Bcy" outputId="4676337a-16ab-42f4-8bdd-d29daf84350a"
a = 12
b = 6
if a>b: print("a is greater than b")
# + [markdown] id="lWMzmKQf_N5x"
# ##Short Hand If Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="9GrcngMk_NJj" outputId="f6b01fcb-ddf3-4d33-a626-a79662c722e1"
a = 7
b = 14
print("a is greater than b") if a>b else print("b is greater than a")
# + [markdown] id="1fGclsFg_4TR"
# ##And Logical Condition
# + colab={"base_uri": "https://localhost:8080/"} id="xCKprMBh_4sM" outputId="e3afd934-ebb2-451d-a09b-d6048a408e52"
a = 200
b = 300
c = 500
if a>b and c>a:
print ("Both conditions are True")
else:
print("Evaluated as False")
# + [markdown] id="m5S8YhdpAtjx"
# ##Or logical condtion
# + colab={"base_uri": "https://localhost:8080/"} id="xQH2tfTAA6Jg" outputId="e61e980f-862f-48e5-c46a-676b3ba10d84"
a = 200
b = 300
c = 500
if a>b or c>a:
print ("Evaluated as True")
else:
print("Evaluated as False")
# + [markdown] id="2JJkLjy6BHBl"
# ##Nested If.. Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="8dUSS9WgBMvB" outputId="501ab369-716a-4ffb-8582-175498ce9f63"
x = 20
if x>10:
print("Above ten")
if x>20:
print ("Above twenty")
else:
print("Above ten but Not above twenty")
else:
print("Not above ten")
# + [markdown] id="5uPzZmXvCQuZ"
# Example 1
# + colab={"base_uri": "https://localhost:8080/"} id="4CD-r9GOCZGg" outputId="6560ce4a-2a20-4e2f-a419-565d57cb4780"
# The qualifying age to vote
age = int(input("Enter your age:"))
if age>=18:
print("You are qualified to vote")
else:
print("You are not qualified to vote")
# + [markdown] id="n3spj4piD1sQ"
# Example 2
# + colab={"base_uri": "https://localhost:8080/"} id="scVlYDnqD3C2" outputId="581f8d1e-0986-4585-ef9e-c9592b180586"
num = int(input("Enter the number:"))
if num==0:
print("Zero")
elif num>0:
print("Positive")
else:
print("Negative")
# + [markdown] id="mIOxIJXHEeR3"
# Example 3
# + colab={"base_uri": "https://localhost:8080/"} id="UtkNmWZkEuB6" outputId="a52a0817-715b-447c-e736-33210af3c83f"
grade = float(input("Enter your grade:"))
if grade>=75:
print("Passed")
elif grade==74:
print("Remedial")
else:
print("Failed")
| Control_Structure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# License: BSD
# Author: <NAME>
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# +
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# +
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
# -
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
# +
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# -
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
visualize_model(model_ft)
# +
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as
# opposed to before.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
# -
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=25)
# +
visualize_model(model_conv)
plt.ioff()
plt.show()
| transfer_learning_tutorial.ipynb |
# ---
# jupyter:
# jupyterbook:
# run: false
# jupytext:
# notebook_metadata_filter: all,-language_info
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# For background, please read the [functions as
# values]({{ site.baseurl }}/chapters/07/functions_as_values) page.
#
# Consider these two functions:
def add(a, b):
return a + b
def sub(a, b):
return a - b
# Here's `add` in action:
add(4, 1)
# Here's `sub` in action:
sub(4, 1)
# There's some code below, that will error, because the assignment
# statement does not set `func` to have the value we need. Set `func`
# correctly so the result equals 2:
func = add
func(10, 8)
# Set `my_func1` and `my_func2` in the code fragment below, so that the
# result is 12:
my_func1 = # Your code here
my_func2 = # Your code here
my_func1(8, 2) + my_func2(3, 3)
# Here is a function that takes three arguments. The first, called `f`,
# should be set to a function value \- that is, a value that is the
# internal representation of a function. The second and third values, called `x` and `y`, should set to be number values.
def do_it(f, x, y):
return f(x, y)
# Set `another_func` so the result returned is 4:
another_func = # Your code here
do_it(another_func, 1, 3)
| notebooks/exercises/functions_values_exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''loftr'': conda)'
# name: python388jvsc74a57bd0e2d1507a0fcefcbd70c2e8d5c2edae879585b2c8df0be6cdbe280bf251175c7f
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
os.chdir("..")
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pathlib import Path
from src.utils.plotting import make_matching_figure, error_colormap
# -
def make_prediction_and_evaluation_plot(root_dir, pe, path=None, source='ScanNet'):
img0 = cv2.imread(str(root_dir / pe['pair_names'][0]), cv2.IMREAD_GRAYSCALE)
img1 = cv2.imread(str(root_dir / pe['pair_names'][1]), cv2.IMREAD_GRAYSCALE)
if source == 'ScanNet':
img0 = cv2.resize(img0, (640, 480))
img1 = cv2.resize(img1, (640, 480))
thr = 5e-4
mkpts0 = pe['mkpts0_f']
mkpts1 = pe['mkpts1_f']
color = error_colormap(pe['epi_errs'], thr, alpha=0.3)
text = [
f"LoFTR",
f"#Matches: {len(mkpts0)}",
f"$\\Delta$R:{pe['R_errs']:.2f}°, $\\Delta$t:{pe['t_errs']:.2f}°",
]
if path:
make_matching_figure(img0, img1, mkpts0, mkpts1, color, text=text, path=path)
else:
return make_matching_figure(img0, img1, mkpts0, mkpts1, color, text=text)
# ## Visualization on ScanNet
# - Prediction and Evaluation
root_dir = Path("data/scannet/test") # Scannet
npy_path = "dump/loftr_ds_indoor/LoFTR_pred_eval.npy"
dumps = np.load(npy_path, allow_pickle=True)
fig = make_prediction_and_evaluation_plot(root_dir, dumps[2], source='ScanNet')
# ## Visualization on MegaDepth
# - Prediction and Evaluation
root_dir = Path("data/megadepth/test") # MegaDepth
npy_path = "dump/loftr_ds_outdoor/LoFTR_pred_eval.npy"
dumps = np.load(npy_path, allow_pickle=True)
fig = make_prediction_and_evaluation_plot(root_dir, dumps[51], source='MegaDepth')
| notebooks/visualize_dump_results.ipynb |
Subsets and Splits