path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
optimization/rosenbrock/optimize_rosenbrock.ipynb | ###Markdown
testing out rosenbrock function optimization withgradient descentinspiration:https://www.indusmic.com/post/rosenbrock-function
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# 2 dim rosenbrock function and its partial derivatives
b = 10
f = lambda w1, w2: b * (w2 - w1**2)**2 + (w1 - 1)**2
f_w1 = lambda w1, w2: -4 * b * w1 * (w2 - w1**2) + 2 * (w1 - 1)
f_w2 = lambda w1, w2: 2 * b * (w2 - w1**2)
# hypterparameter
eta = .01 # learning rate
iterations = 1000
sc = 1.5 # scale of plot
plot_function = 1 # plot surface of function
# initial values
w1, w2 = np.random.uniform(-1, 1, 2)
# the history of minimizing (for plotting)
hw1, hw2, hf = [w1], [w2], [f(w1, w2)]
# gradient descent
for i in range(iterations):
w1b = w1
w1 -= eta * f_w1(w1, w2)
w2 -= eta * f_w2(w1b, w2)
hw1.append(w1)
hw2.append(w2)
hf.append(f(w1, w2))
# Initialize figure
fig = plt.figure(figsize=(36, 21))
ax = plt.axes(projection='3d')
# Evaluate function
X1 = np.arange(sc*-2, sc*2, sc*0.15)
X2 = np.arange(sc*-1, sc*3, sc*0.15)
X1, X2 = np.meshgrid(X1, X2)
Z = f(X1, X2)
# Plot the surface
if plot_function:
# ax.plot_surface(X, Y, Z, cmap=cm.gist_heat_r, linewidth=0, antialiased=True)
ax.scatter(X, Y, Z, c='g')
points = [hw1, hw2, hf]
ax.scatter(*points, c='r')
plt.show()
# create a 'movie' of the graph
ax.elev = 10
for i in range(0, 360):
ax.azim = i
fig.savefig("./optimize_rosenbrock/%d.png" % i)
# make images to movie
!ffmpeg -r 60 -f image2 -s 1920x1080 -i ./optimize_rosenbrock/%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p out.mp4
###Output
ffmpeg version 4.4-6ubuntu5 Copyright (c) 2000-2021 the FFmpeg developers
built with gcc 11 (Ubuntu 11.2.0-7ubuntu1)
configuration: --prefix=/usr --extra-version=6ubuntu5 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libdav1d --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzimg --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --enable-pocketsphinx --enable-librsvg --enable-libmfx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-nvenc --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared
libavutil 56. 70.100 / 56. 70.100
libavcodec 58.134.100 / 58.134.100
libavformat 58. 76.100 / 58. 76.100
libavdevice 58. 13.100 / 58. 13.100
libavfilter 7.110.100 / 7.110.100
libswscale 5. 9.100 / 5. 9.100
libswresample 3. 9.100 / 3. 9.100
libpostproc 55. 9.100 / 55. 9.100
Input #0, image2, from './optimize_rosenbrock/%d.png':
Duration: 00:00:06.00, start: 0.000000, bitrate: N/A
Stream #0:0: Video: png, rgba(pc), 2592x1512 [SAR 2835:2835 DAR 12:7], 60 fps, 60 tbr, 60 tbn, 60 tbc
Stream mapping:
Stream #0:0 -> #0:0 (png (native) -> h264 (libx264))
Press [q] to stop, [?] for help
[1;36m[libx264 @ 0x558574067000] [0musing SAR=1/1
[1;36m[libx264 @ 0x558574067000] [0musing cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[1;36m[libx264 @ 0x558574067000] [0mprofile High, level 5.1, 4:2:0, 8-bit
[1;36m[libx264 @ 0x558574067000] [0m264 - core 160 r3011 cde9a93 - H.264/MPEG-4 AVC codec - Copyleft 2003-2020 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=12 lookahead_threads=2 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=25.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, mp4, to 'out.mp4':
Metadata:
encoder : Lavf58.76.100
Stream #0:0: Video: h264 (avc1 / 0x31637661), yuv420p(tv, progressive), 2592x1512 [SAR 1:1 DAR 12:7], q=2-31, 60 fps, 15360 tbn
Metadata:
encoder : Lavc58.134.100 libx264
Side data:
cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: N/A
frame= 360 fps= 46 q=-1.0 Lsize= 2910kB time=00:00:05.95 bitrate=4006.4kbits/s speed=0.756x
video:2905kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 0.172691%
[1;36m[libx264 @ 0x558574067000] [0mframe I:2 Avg QP:19.38 size: 27712
[1;36m[libx264 @ 0x558574067000] [0mframe P:102 Avg QP:26.12 size: 13303
[1;36m[libx264 @ 0x558574067000] [0mframe B:256 Avg QP:30.17 size: 6100
[1;36m[libx264 @ 0x558574067000] [0mconsecutive B-frames: 1.4% 8.3% 9.2% 81.1%
[1;36m[libx264 @ 0x558574067000] [0mmb I I16..4: 32.4% 61.0% 6.6%
[1;36m[libx264 @ 0x558574067000] [0mmb P I16..4: 1.6% 1.8% 1.0% P16..4: 4.0% 2.9% 1.5% 0.0% 0.0% skip:87.1%
[1;36m[libx264 @ 0x558574067000] [0mmb B I16..4: 0.7% 0.2% 0.1% B16..8: 8.0% 2.7% 0.6% direct: 0.2% skip:87.6% L0:47.2% L1:46.3% BI: 6.5%
[1;36m[libx264 @ 0x558574067000] [0m8x8 transform intra:40.7% inter:35.5%
[1;36m[libx264 @ 0x558574067000] [0mcoded y,uvDC,uvAC intra: 12.3% 4.8% 4.0% inter: 1.2% 0.6% 0.3%
[1;36m[libx264 @ 0x558574067000] [0mi16 v,h,dc,p: 90% 8% 2% 0%
[1;36m[libx264 @ 0x558574067000] [0mi8 v,h,dc,ddl,ddr,vr,hd,vl,hu: 28% 5% 65% 0% 0% 0% 0% 0% 0%
[1;36m[libx264 @ 0x558574067000] [0mi4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 43% 18% 28% 2% 2% 2% 2% 2% 2%
[1;36m[libx264 @ 0x558574067000] [0mi8c dc,h,v,p: 93% 3% 4% 0%
[1;36m[libx264 @ 0x558574067000] [0mWeighted P-Frames: Y:0.0% UV:0.0%
[1;36m[libx264 @ 0x558574067000] [0mref P L0: 68.1% 4.3% 19.0% 8.6%
[1;36m[libx264 @ 0x558574067000] [0mref B L0: 84.8% 12.5% 2.7%
[1;36m[libx264 @ 0x558574067000] [0mref B L1: 95.1% 4.9%
[1;36m[libx264 @ 0x558574067000] [0mkb/s:3965.31
|
02_MetaLearning/02_B_Eval_Meta_Model_per_Drift_Class.ipynb | ###Markdown
Evaluate Meta-Model per Drift Class Before fine-tune and after fine-tune!
###Code
import arrow
import learn2learn as l2l
import numpy as np
import os
import pickle
import torch
from torch.nn import Module, Linear, Sequential, ReLU
from torch.nn.functional import mse_loss
from torch.optim import Adam, SGD
from torch.utils.data import TensorDataset
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from utils.evalUtils import print_confusion_matrix
from sklearn.preprocessing import MinMaxScaler
%run -i ./scripts/setConfigs.py
###Output
Set configs..
###Markdown
Read Meta-Model
###Code
%run -i ./scripts/ReadSimpleAE_MetaModel.py
###Output
Load Meta Model AE..
/home/torge/dev/masterthesis_code/02_Experimente/MetaLearning/models/model_bib/20200319_firstMetaModel.pt
SimpleAutoEncoder(
(encoder): Sequential(
(0): Linear(in_features=17, out_features=12, bias=True)
(1): ReLU(inplace=True)
(2): Linear(in_features=12, out_features=8, bias=True)
(3): Tanh()
)
(decoder): Sequential(
(0): Linear(in_features=8, out_features=12, bias=True)
(1): ReLU(inplace=True)
(2): Linear(in_features=12, out_features=17, bias=True)
(3): Tanh()
)
)
###Markdown
Read LogReg
###Code
%run -i ./scripts/ReadLogReg_Meta.py
###Output
Load trained LogReg..
LogisticRegression(C=1.0, class_weight={1: 2.0}, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=None, max_iter=100,
multi_class='auto', n_jobs=None, penalty='l2',
random_state=42, solver='liblinear', tol=0.0001, verbose=0,
warm_start=False)
###Markdown
Prepare the data
###Code
data_fn = os.path.join(data_path, 'simulation_data_y_2020_2021_reduced.h5')
df_data_train = pd.read_hdf(data_fn, key='df')
print('Shape of X_train data: {}'.format(df_data_train.shape))
data_fn = os.path.join(data_path, 'anomalous_drifted_data_y_2023_reduced_more_cos_phi.h5')
df_data_drifted_ano = pd.read_hdf(data_fn, key='df')
print('Shape of X_drifted,ano data: {}'.format(df_data_drifted_ano.shape))
s_drift_labels = df_data_drifted_ano['drift_labels']
s_drift_labels.reset_index(inplace=True, drop=True)
s_ano_labels = df_data_drifted_ano['anomaly_labels']
s_ano_labels.reset_index(inplace=True, drop=True)
df_data_drifted_ano.drop('drift_labels', axis=1, inplace=True)
df_data_drifted_ano.drop('anomaly_labels', axis=1, inplace=True)
print('Shape of X_drifted,ano data: {}'.format(df_data_drifted_ano.shape))
print('Scale data..')
scaler_train = MinMaxScaler((-1,1))
scaler_train = scaler_train.fit(df_data_train)
scaled_drifted_ano = scaler_train.transform(df_data_drifted_ano.to_numpy())
del(df_data_train)
# build tensor from numpy
anormal_drifted_torch_tensor = torch.from_numpy(scaled_drifted_ano).type(torch.FloatTensor)
###Output
_____no_output_____
###Markdown
Make Predictions vor evaluation
###Code
re_drifted_ano = []
for val in anormal_drifted_torch_tensor:
loss = meta_model.calc_reconstruction_error(val)
re_drifted_ano.append(loss.item())
s_re_drifted_ano = pd.Series(re_drifted_ano)
s_re_drifted_ano = s_re_drifted_ano.values.reshape(-1,1)
predictions_drifted_ano = []
for val in s_re_drifted_ano:
val = val.reshape(1,-1)
pred = clf_meta.predict(val)
predictions_drifted_ano.append(pred[0])
###Output
_____no_output_____
###Markdown
Build dataset for analysis
###Code
df_analyze = pd.DataFrame()
df_analyze['anomaly_labels'] = s_ano_labels
df_analyze['drift_labels'] = s_drift_labels
df_analyze['reconstruction_error'] = s_re_drifted_ano
df_analyze['ano_prediction'] = predictions_drifted_ano
df_analyze.head()
###Output
_____no_output_____
###Markdown
Split Dataset per Drift Class
###Code
df_drift_class_0 = df_analyze[df_analyze['drift_labels'] == 0]
df_drift_class_1 = df_analyze[df_analyze['drift_labels'] == 1]
df_drift_class_2 = df_analyze[df_analyze['drift_labels'] == 2]
df_drift_class_3 = df_analyze[df_analyze['drift_labels'] == 3]
df_drift_class_0['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_0['anomaly_labels']]
df_drift_class_1['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_1['anomaly_labels']]
df_drift_class_2['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_2['anomaly_labels']]
df_drift_class_3['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_3['anomaly_labels']]
df_drift_class_0.describe()
df_drift_class_1.describe()
df_drift_class_2.describe()
df_drift_class_3.describe()
###Output
_____no_output_____
###Markdown
KPIs per Drift Class
###Code
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from mlxtend.plotting import plot_confusion_matrix
from utils.evalUtils import calc_cm_metrics
cm_dc_0 = confusion_matrix(df_drift_class_0['reduced_ano_labels'], df_drift_class_0['ano_prediction'])
tn, fp, fn, tp = cm_dc_0.ravel()
accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn)
print('Accuracy: {}'.format(accuracy))
print('Precision: {}'.format(precision))
print('Specifity: {}'.format(specifity))
print('Sensitivity: {}'.format(sensitivity))
print('F1-Score: {}'.format(f1_score))
fig = print_confusion_matrix(cm_dc_0, class_names=['k. Anomalie', 'Anomalie'])
fig.suptitle('Confusion Matrix Drift Class 0 (Keine Manipulation)', fontsize=20)
file_n = os.path.join(fig_path, 'cm_drift_class_0.pdf')
fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 )
cm_dc_1 = confusion_matrix(df_drift_class_1['reduced_ano_labels'], df_drift_class_1['ano_prediction'])
tn, fp, fn, tp = cm_dc_1.ravel()
accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn)
print('Accuracy: {}'.format(accuracy))
print('Precision: {}'.format(precision))
print('Specifity: {}'.format(specifity))
print('Sensitivity: {}'.format(sensitivity))
print('F1-Score: {}'.format(f1_score))
fig = print_confusion_matrix(cm_dc_1, class_names=['k. Anomalie', 'Anomalie'])
fig.suptitle('Confusion Matrix Drift Class 1 (Switch)', fontsize=20)
file_n = os.path.join(fig_path, 'meta_modell_cm_drift_class_1.pdf')
fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 )
cm_dc_2 = confusion_matrix(df_drift_class_2['reduced_ano_labels'], df_drift_class_2['ano_prediction'])
tn, fp, fn, tp = cm_dc_2.ravel()
accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn)
print('Accuracy: {}'.format(accuracy))
print('Precision: {}'.format(precision))
print('Specifity: {}'.format(specifity))
print('Sensitivity: {}'.format(sensitivity))
print('F1-Score: {}'.format(f1_score))
fig = print_confusion_matrix(cm_dc_2, class_names=['k. Anomalie', 'Anomalie'])
fig.suptitle('Confusion Matrix Drift Class 2 (Load Mapping)', fontsize=20)
file_n = os.path.join(fig_path, 'meta_modell_cm_drift_class_2.pdf')
fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 )
cm_dc_3 = confusion_matrix(df_drift_class_3['reduced_ano_labels'], df_drift_class_3['ano_prediction'])
tn, fp, fn, tp = cm_dc_3.ravel()
accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn)
print('Accuracy: {}'.format(accuracy))
print('Precision: {}'.format(precision))
print('Specifity: {}'.format(specifity))
print('Sensitivity: {}'.format(sensitivity))
print('F1-Score: {}'.format(f1_score))
fig = print_confusion_matrix(cm_dc_3, class_names=['k. Anomalie', 'Anomalie'])
fig.suptitle('Confusion Matrix Drift Class 3 (Cos Phi)', fontsize=20)
file_n = os.path.join(fig_path, 'meta_modell_cm_drift_class_3.pdf')
fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 )
###Output
_____no_output_____ |
scratch work/Yuqing-Data-Merge/df4-Scenario2.ipynb | ###Markdown
Gradient descent algorithm for Scenario 2In this part, we implement an gradient descent algorithm to optimization the objective loss function in Scenario 2:$$\min F := \min \frac{1}{2(n-i)} \sum_{i=1000}^n (fbpredic(i) + a*tby(i) +b*ffr(i) + c*fta(i) - asp(i))^2$$Gradient descent: $$ \beta_k = \beta_{k-1} + \delta* \nabla F, $$where $\delta$ control how far does each iteration go. Detailed planFirst, split the data as train and test with 80% and 20% respectively. For the training part, we need prophet() predicted price, there are a couple of issues. One is prophet() can not predict too far in the future. The other is we can not call prophet() too many times, this takes a lot of time. So we will use a sliding window strategy:1, Split the train data as train_1 and train_2, where train_1 is used as a sliding window to fit prophet(), and give predictions in train_2. Train_2 is used train the model we proposed above.2, After we got full size (size of train_2) predictions from prophet(), then we use gradient descent to fit the above model, extracting the coefficients of features to make predicution in the testing data.
###Code
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import FunctionTransformer
from numpy import meshgrid
## For plotting
import matplotlib.pyplot as plt
from matplotlib import style
import datetime as dt
import seaborn as sns
sns.set_style("whitegrid")
df= pd.read_csv('df4.csv', parse_dates=['Date'])
df = df.rename(columns = {"Date":"ds","Close":"y"})
df
# len(df)
from datetime import datetime
p = 0.95
# Train around 90% of dataset
cutoff = int((p*len(df)//100)*100)
df_train = df[:cutoff].copy()
df_test = df.drop(df_train.index).copy()
print(df_train, df_test)
###Output
ds y tby ffr fta eps div
0 2003-01-02 909.03 4.07 1.24 732202.0 40.40 1.79
1 2003-01-03 908.59 4.05 1.24 732202.0 40.40 1.79
2 2003-01-06 929.01 4.09 1.24 732202.0 40.40 1.79
3 2003-01-07 922.93 4.04 1.24 732202.0 40.40 1.79
4 2003-01-08 909.93 4.00 1.24 724902.0 40.40 1.79
... ... ... ... ... ... ... ...
3995 2018-12-24 2351.10 2.74 2.27 4084274.0 138.43 1.96
3996 2018-12-26 2467.70 2.81 2.27 4075636.0 138.43 1.96
3997 2018-12-27 2488.83 2.77 2.27 4075636.0 138.43 1.96
3998 2018-12-28 2485.74 2.72 2.27 4075636.0 138.43 1.96
3999 2018-12-31 2506.85 2.69 2.27 4075636.0 139.58 2.09
[4000 rows x 7 columns] ds y tby ffr fta eps div
4000 2019-01-02 2510.03 2.66 2.40 4058378.0 139.58 2.09
4001 2019-01-03 2447.89 2.56 2.40 4058378.0 139.58 2.09
4002 2019-01-04 2531.94 2.67 2.40 4058378.0 139.58 2.09
4003 2019-01-07 2549.69 2.70 2.40 4058378.0 139.58 2.09
4004 2019-01-08 2574.41 2.73 2.40 4058378.0 139.58 2.09
... ... ... ... ... ... ... ...
4515 2021-01-25 3855.36 1.05 0.09 7414942.0 95.72 1.58
4516 2021-01-26 3849.62 1.05 0.09 7414942.0 95.72 1.58
4517 2021-01-27 3750.77 1.04 0.09 7404926.0 95.72 1.58
4518 2021-01-28 3787.38 1.07 0.09 7404926.0 95.72 1.58
4519 2021-01-29 3714.24 1.11 0.09 7404926.0 95.72 1.58
[520 rows x 7 columns]
###Markdown
Use prophet() to make predictions, we will split training as train_1 and train_2 with ratio 40% vs 60%, train_1 will be used to fit prophet(), then predict on train_2. Getting the predictions, feed the data into the Scenario 2 model, train again to get the parameters a,b,c,....
###Code
#prophet part
from fbprophet import Prophet
start = 1000 # the number of initial data for training
pred_size =100 # predicted periods
num_winds = int((df_train.shape[0]-start)/pred_size) #(4000-3000)/100 =30
pro_pred = []
# use accumulated data to predict the next pred_size data
for i in range(num_winds):
tmp_train = df_train.iloc[: start+ i*pred_size].copy()
fbp = Prophet(daily_seasonality=True)
# fit close price using fbprophet model
fbp.fit(tmp_train[['ds','y']])
# predict pred_size futures and get the forecast price
fut = fbp.make_future_dataframe(periods = pred_size,)
tmp_forecast = fbp.predict(fut)
# only require the forcast on test data of temporary training data
pred = tmp_forecast[start+ i*pred_size:].yhat
pro_pred.append(pred)
pro_pred
flat_pro_pred = [item for l1 in pro_pred for item in l1]
df.columns
def powerset_no_empty(s):
power_set = []
x = len(s)
for i in range(1 << x):
power_set.append([s[j] for j in range(x) if (i & (1 << j))])
return power_set[1:]
possible_features = powerset_no_empty(['tby', 'ffr', 'fta', 'eps', 'div'])
print(len(possible_features))
possible_features
from sklearn.linear_model import LinearRegression
reg = LinearRegression(fit_intercept=False, normalize=True, copy_X = True)
reg.fit(df_train[start:cutoff][possible_features[30]], df_train[start:cutoff]['y'] - flat_pro_pred)
coef = []
t=30
for i in range(len(possible_features[t])):
coef.append(np.round(reg.coef_[i],5))
print(coef)
# Forecast the Test Data
from fbprophet import Prophet
test_time = int((1-p)* len(df))
fbp = Prophet(daily_seasonality=True)
fbp.fit(df_train[['ds','y']])
fut = fbp.make_future_dataframe(periods = test_time,)
forecast = fbp.predict(fut)
pred_test = forecast[cutoff:cutoff+test_time].yhat
pred_test = pred_test.ravel()
len(pred_test)
pp_test = pred_test.copy() # predicted price on testing data
pp_train = flat_pro_pred.copy() # predicted price on training data
for i in range(len(possible_features[t])):
pp_test += coef[i] * df_test[df_test.columns[i+2]][:test_time].ravel()
pp_train += coef[i] * df_train[df_train.columns[i+2]][start:].ravel()
from sklearn.metrics import mean_squared_error as MSE
# MSE for test data
# Actual close price: df_test[:test_time].y
# Predicted price by prophet: pred_test
# Predicted price by tuning
mse1 = MSE(df_test[:test_time].y,pred_test) #
mse2 = MSE(df_test[:test_time].y, pp_test)
print(mse1,mse2)
# MSE for train data
mse3 = MSE(df_train[start:].y, flat_pro_pred)
mse4 = MSE(df_train[start:].y, pp_train)
print(mse3,mse4)
flat_pro_pred
# df_train['pp']=pd.Series(np.append([np.nan for i in range(1000)], pp_train))
# plt.figure(figsize=(11,6))
# # plt.plot(range(1000,4000),df[1000:4000].fbsp,label='fb predicted price on test_data')
# plt.plot(range(1000,4000),df_train[1000:].pp,label="fitted values by our model")
# plt.plot(range(1000,4000), df_train[1000:].y ,label='ture price value')
# plt.legend(fontsize=13)
# plt.title("Fitting on the training data",fontsize=18)
# plt.figure(figsize=(11,6))
# plt.plot(range(0,test_time),pd.Series(pred_test),label='fb predicted price on test_data')
# plt.plot(range(0,test_time),pd.Series(pp_test),label='fitted value on test_data')
# plt.plot(range(0,test_time), df_test[:test_time].y,label='true price value on test')
# plt.legend(fontsize=13)
# plt.title("Prediction on the testing data",fontsize=18)
from sklearn.linear_model import LinearRegression
reg = LinearRegression(normalize=True, copy_X = True)
def get_X_y(df,features,target):
# Returns X then y
return np.array(df[features]), np.array(df[target])
from sklearn.metrics import mean_squared_error as MSE
def get_mse(model, X, y):
# get the prediction
pred = model.predict(X)
# Returns the mse
return MSE(pred,y)
X, y = get_X_y(df_train, possible_features, 'diff')
clone_reg = clone(reg)
clone_reg.fit(X,y)
pred_ = clone_reg.
mse = get_mse(clone_reg, X, y)
###Output
_____no_output_____ |
Notebooks/Time_Series.ipynb | ###Markdown
Time SeriesTimestamps, períodos, intervalos
###Code
import numpy as np
import pandas as pd
np.random.seed(12345)
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
PREVIOUS_MAX_ROWS = pd.options.display.max_rows
pd.options.display.max_rows = 20
np.set_printoptions(precision=4, suppress=True)
###Output
_____no_output_____
###Markdown
Operações com datas
###Code
from datetime import datetime
agora = datetime.now()
utc = datetime.utcnow()
agora.year
agora.month
agora.day
agora
# aterar hora
agora.replace(minute=10, hour=10, second=10)
# definir uma data
evento = datetime(2021, 8, 1)
evento
# diferença entre datas retorna um obj do tipo timedelta
delta = agora - evento
print(delta)
print(f"Desde o evento já se passaram {delta.days} dias e {delta.seconds // 3600} horas.")
# soma retorna uma nova data e uma subtração retorna um delta
evento + delta
# dia da semana
# usar tupla porque essa lista é imutavel
dias = ("Segunda", "Terça", "Quarta", "Quinta", "Sexta", "Sabado", "Domingo")
print(dias[agora.weekday()])
print(f"Dia que nasci: {dias[datetime(day=14, month=10, year=1985).weekday()]}")
###Output
Quarta
Dia que nasci: Segunda
###Markdown
Conversão string e datetime strftime e strptime
###Code
# data para > string
print(f"Hoje (sem formatação): {agora}")
hoje_formatado = agora.strftime("%d/%m/%Y")
print(f"Hoje (formatado dd/mm/aaaa): {hoje_formatado}")
hoje_formatado = agora.strftime("%m/%d/%y")
print(f"Hoje (formatado mm/dd/aa): {hoje_formatado}")
hoje_formatado = agora.strftime("%d/%B/%y")
print(f"Hoje (formatado): {hoje_formatado}")
# String para > data. É diferente de strftime(), strftime é método do objeto e strptime é metodo da classe datetime
nascimento = datetime.strptime("10/31/2021", "%m/%d/%Y")
print(nascimento)
print(type(nascimento))
# converte uma lista de datas no formato str
datestrs = ['7/6/2011', '8/6/2011', '18/10/2021']
datestrs = [datetime.strptime(dt, "%d/%m/%Y") for dt in datestrs]
datestrs
###Output
_____no_output_____
###Markdown
parse
###Code
# Parse: uma alternativa para simplificar o mapeamento das strs para datas sem passar o formato
from dateutil.parser import parse
# faz a conversão mesmo com tipos de formatos diferentes
datesparse = ['7/06/2011', '8/16/2011', '18/10/2021', 'Jan 31, 1997 10:45 PM', None]
# daysfirst informa o padrão de reconhecimento
# mesmo que o dia esteja no lugar do mês ele reconhece que é maior que 12 e trata como dia
# precisa tratar os valores None senão retorna erro
datesparse = [parse(dt, dayfirst=True) for dt in datesparse if dt != None]
datesparse
###Output
_____no_output_____
###Markdown
to_datetime
###Code
# o parse com to_datetime funciona igual, mas trata os erros, inserie como NaT (Not a Time)
# retorna um obj do tipo DatetimeIndex com algumas propriedades adicionais
dates_todatetime = ['2/01/2011', '7/06/2011', '8/16/2011', '8/16/2011', '18/10/2021', 'Jan 31, 1997 10:45 PM', None]
dates_todatetime = pd.to_datetime(dates_todatetime)
dates_todatetime.name = "Indice_DateTime"
# propriedades adicionais de um indice
print(dates_todatetime)
print("---------------------")
print(type(dates_todatetime))
print(dates_todatetime.is_unique)
print(dates_todatetime.name)
print("---------------------")
print(pd.isnull(dates_todatetime))
print("---------------------")
# tratando o ultimo valor que é NaT com fillna
dates_todatetime = dates_todatetime.fillna(datetime.now())
dates_todatetime
###Output
DatetimeIndex(['2011-02-01 00:00:00', '2011-07-06 00:00:00',
'2011-08-16 00:00:00', '2011-08-16 00:00:00',
'2021-10-18 00:00:00', '1997-01-31 22:45:00',
'NaT'],
dtype='datetime64[ns]', name='Indice_DateTime', freq=None)
---------------------
<class 'pandas.core.indexes.datetimes.DatetimeIndex'>
False
Indice_DateTime
---------------------
[False False False False False False True]
---------------------
###Markdown
Series TemporaisSão series indexadas por datas
###Code
dates_list = [datetime(2011, 2, 1), datetime(2011, 1, 5), datetime(2011, 1, 7), datetime(2011, 1, 8), datetime(2011, 1, 10), datetime(2011, 1, 12)]
# series indexada por datas
time_series = pd.Series(np.random.randn(6), index=dates_list, name="time_series")
time_series.index
# criando um novo ts usando a lista de dadas criadas anteriormente.
time_series2 = pd.Series(np.random.randn(7), index=dates_todatetime, name="time_series2")
time_series2.index
###Output
_____no_output_____
###Markdown
operações entre series indexadas por datas
###Code
# operações entre series indexadas por datas
# ao fazer o alinhamento entre series pelo indice
# pareamento das chaves e o calculo de soma
ts_soma = time_series + time_series2
# no dia 01/02/2011 existe uma correspondencia entre as chaves
ts_soma.dropna()
#ts_soma[ts_soma.notnull()]
###Output
_____no_output_____
###Markdown
Filtrar series temporais (slice)
###Code
time_series
# Podemos usar uma string interpretável
# Filtrar apenas as datas maiores que 2011-01-07
time_series['2011-01-07':]
# Filtrar dois dias específicos
time_series[['2011-01-07', '2011-01-12']]
# Filtrar usando um indice sequencial implicito
time_series[2:4]
# em series temporais mais longas um ano ou um mês podem ser usado como filtro de período
# criar uma serie longa
idx_datarange = pd.date_range(start="2020-01-01", end="2021-12-31")
time_series_grande = pd.Series(np.arange(len(idx_datarange)), index=idx_datarange)
time_series_grande
# filtar todos os dias do ano 2021
time_series_grande["2021"]
# filtrar um mês do ano
time_series_grande["2021-12"]
# filtrar um periodo
time_series_grande["2021-12-01": "2021-12-05"]
# remove todas as linhas "depois" de 2020-12-31
time_series_grande.truncate(after="2020-12-31")
###Output
_____no_output_____
###Markdown
Indices de data duplicados
###Code
dates = pd.DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000',
'1/2/2000', '1/3/2000'])
dup_ts = pd.Series(np.arange(5), index=dates)
dup_ts
# possiu indice duplicado
dup_ts.index.is_unique
# agregação para identificar o grau de duplicação para tratar ou agregar
dup_ts.groupby(level=0).count()
# atualiza a series apenas com os valores agregados. Agrega os duplicados usando a média
dup_ts = dup_ts.groupby(level=0).mean()
print(f"O índice é unico? {dup_ts.is_unique}\n")
dup_ts
###Output
O índice é unico? True
###Markdown
Ranges de Datas
###Code
# criando um range a partir de uma data inicial. As datas avançam
pd.date_range(start='2012-04-01', periods=20)
# criando um range de datas a partir da data final. As datas voltam
pd.date_range(end='2012-06-01', periods=20)
# Criando um range usando um período. freq="D" uma data por dia no período
date_index = pd.date_range(start="2020-01-01", end="2021-12-31", freq="D")
date_index
# altera da freq="BM", a ultima data de cada mês do período.
date_index = pd.date_range(start="2020-01-01", end="2021-12-31", freq="BM")
date_index
# normalize=True remove a parte do horário das datas que tenham horário
date_index = pd.date_range(start=datetime.now(), periods=20, normalize=True)
date_index
# a cada 1h30min
date_index = pd.date_range('2000-01-01', periods=10, freq='1h30min')
date_index
###Output
_____no_output_____
###Markdown
Time Zone
###Code
import pytz
# pytz.common_timezones
# lista de timezones ultimos 5
print("US/Eastern" in pytz.common_timezones[-5:])
print(pytz.common_timezones[-5:])
# brasil
[tz for tz in pytz.common_timezones if tz.startswith("America/Sao_Paulo")]
# timezone por padrão é None
print(date_index.tz)
# timezone UTC é o central +0 qualquer mundança adiciona ou subtrai
date_index = pd.date_range('2000-01-01', periods=10, freq='1h30min', tz="UTC")
date_index
# timezone Brasil é o central +2
date_index = pd.date_range('2000-01-01', periods=10, freq='1h30min', tz="America/Sao_Paulo")
date_index
# converte de uma timezone para outra região (Brasil > Nova York)
# -5 horas timezone
date_index.tz_convert("America/New_York")
###Output
_____no_output_____
###Markdown
Timestamp
###Code
# criando localizando e convertendo para outro utc
stamp = pd.Timestamp('2011-03-12 04:00')
stamp_utc = stamp.tz_localize('utc')
stamp_utc.tz_convert('America/New_York')
###Output
_____no_output_____
###Markdown
Reamostragem e conversão de frequênciaTransformar uma serie temporal que está em uma frequencia para outra- downsampling: frequencias mais altas para mais baixas- upsampling: Downsampling: resample()
###Code
# frequencia diaria
data_range_resample = pd.date_range('2000-01-01', periods=100, freq='D')
ts = pd.Series(np.random.randn(len(data_range_resample)), index=data_range_resample)
# semelhante a groupby. Agrupa no nivel de mes e aplica uma agregação mean
ts.resample('M').mean()
# ts.resample('M', kind='period').mean()
###Output
_____no_output_____ |
app/notebooks/metal/shot_detection_weak_labels_downsampled.ipynb | ###Markdown
Train Metal LabelModel
###Code
L_train_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/L_train_100_windows_downsampled.npz'
L_dev_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/L_val_windows_downsampled.npz'
Y_dev_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/Y_val_windows_downsampled.npy'
L_test_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/L_test_windows_downsampled.npz'
Y_test_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/Y_test_windows_downsampled.npy'
stride = 1
L_train = sp.sparse.load_npz(L_train_path).todense()[::stride]
L_dev = sp.sparse.load_npz(L_dev_path).todense()
Y_dev = np.load(Y_dev_path)
L_test = sp.sparse.load_npz(L_test_path).todense()
Y_test = np.load(Y_test_path)
label_model = LabelModel(k=2, seed=123)
label_model.train_model(L_train, class_balance=(0.15, 0.85), n_epochs=500, log_train_every=50)
label_model.score((L_dev, Y_dev), metric=['accuracy','precision', 'recall', 'f1'])
###Output
Computing O...
Estimating \mu...
[50 epo]: TRAIN:[loss=0.059]
[100 epo]: TRAIN:[loss=0.043]
[150 epo]: TRAIN:[loss=0.040]
[200 epo]: TRAIN:[loss=0.039]
[250 epo]: TRAIN:[loss=0.039]
[300 epo]: TRAIN:[loss=0.038]
[350 epo]: TRAIN:[loss=0.038]
[400 epo]: TRAIN:[loss=0.038]
[450 epo]: TRAIN:[loss=0.038]
[500 epo]: TRAIN:[loss=0.038]
Finished Training
Accuracy: 0.954
Precision: 0.943
Recall: 0.726
F1: 0.821
y=1 y=2
l=1 199 12
l=2 75 1600
###Markdown
Tune Metal LabelModel
###Code
from metal.tuners.random_tuner import RandomSearchTuner
#label_model_everything_windows_tuned = LabelModel(k=2, seed=123)
random_tuner = RandomSearchTuner(LabelModel, seed=123, validation_metric='f1')
search_space = {
'seed' : [123],
'n_epochs': list(range(500, 2000, 100)),
'lr': {'range': [1e-5, .1], 'scale': 'log'},
'l2': {'range': [1e-5, .1], 'scale': 'log'},
'log_train_every': [100],
'class_balance': [
(i * .1, 1 - i * .1)
for i in range(1, 10)
]
# 'Y_dev': [Y_test_windows]
}
best_random_model = random_tuner.search(search_space,
(L_dev, Y_dev),
train_args= [L_train],
train_kwargs = {
# 'Y_dev': Y_test_windows
# 'class_balance': (0.2, 0.8)
},
init_kwargs={
'k': 2
}, verbose=True)
best_random_model.score((L_dev, Y_dev), metric=['accuracy','precision', 'recall', 'f1'])
best_random_model.score((L_test, Y_test), metric=['accuracy','precision', 'recall', 'f1'])
###Output
Accuracy: 0.908
Precision: 0.941
Recall: 0.597
F1: 0.730
y=1 y=2
l=1 222 14
l=2 150 1392
###Markdown
Save/Load Best Model
###Code
torch.save(best_random_model, 'models/metal_labelmodel_downsampled.pth')
model = torch.load('models/metal_labelmodel_downsampled.pth')
###Output
_____no_output_____
###Markdown
Make Predictions for Everything and Save to Disk
###Code
import numpy as np
from scipy.sparse import csr_matrix
import scipy.sparse as sparse
import pickle
import rekall
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.interval_list import IntervalList
from rekall.temporal_predicates import *
from metal.label_model.baselines import MajorityLabelVoter
###Output
_____no_output_____
###Markdown
Load Manually Annotated Data
###Code
with open('../../data/manually_annotated_shots.pkl', 'rb') as f:
shots = VideoIntervalCollection(pickle.load(f))
with open('../../data/shot_detection_folds.pkl', 'rb') as f:
shot_detection_folds = pickle.load(f)
clips = shots.dilate(1).coalesce().dilate(-1)
shot_boundaries = shots.map(
lambda intrvl: (intrvl.start, intrvl.start, intrvl.payload)
).set_union(
shots.map(lambda intrvl: (intrvl.end + 1, intrvl.end + 1, intrvl.payload))
).coalesce()
boundary_frames = {
video_id: [
intrvl.start
for intrvl in shot_boundaries.get_intervallist(video_id).get_intervals()
]
for video_id in shot_boundaries.get_allintervals()
}
video_ids = sorted(list(clips.get_allintervals().keys()))
frames_per_video = {
video_id: sorted([
f
for interval in clips.get_intervallist(video_id).get_intervals()
for f in range(interval.start, interval.end + 2)
])
for video_id in video_ids
}
ground_truth = {
video_id: [
1 if f in boundary_frames[video_id] else 2
for f in frames_per_video[video_id]
]
for video_id in video_ids
}
###Output
100%|██████████| 28/28 [00:00<00:00, 10820.02it/s]
100%|██████████| 28/28 [00:00<00:00, 33776.39it/s]
###Markdown
Load Label Matrix with All Frames in it
###Code
with open('../../data/shot_detection_weak_labels/all_labels.pkl', 'rb') as f:
weak_labels_all_movies = pickle.load(f)
###Output
_____no_output_____
###Markdown
Load Videos and Number of Frames Per Video
###Code
with open('../../data/frame_counts.pkl', 'rb') as f:
frame_counts = pickle.load(f)
video_ids_all = sorted(list(frame_counts.keys()))
video_ids_train = sorted(list(set(video_ids_all).difference(set(video_ids))))
###Output
_____no_output_____
###Markdown
Construct windows for each video
###Code
# First, construct windows of 16 frames for each video
windows = VideoIntervalCollection({
video_id: [
(f, f + 16, video_id)
for f in range(0, frame_counts[video_id] - 16, 16)
]
for video_id in video_ids_all
})
###Output
_____no_output_____
###Markdown
Get weak labels for all windows
###Code
# Label windows with the weak labels in our labeling functions
def label_window(per_frame_weak_labels):
if 1 in per_frame_weak_labels:
return 1
if len([l for l in per_frame_weak_labels if l == 2]) >= len(per_frame_weak_labels) / 2:
return 2
return 0
windows_with_weak_labels = windows.map(
lambda window: (
window.start,
window.end,
[
label_window([
lf[window.payload][f-1]
for f in range(window.start, window.end)
])
for lf in weak_labels_all_movies
]
)
)
###Output
_____no_output_____
###Markdown
L matrix
###Code
L_everything_windows = csr_matrix([
intrvl.payload
for video_id in sorted(list(video_ids_all))
for intrvl in windows_with_weak_labels.get_intervallist(video_id).get_intervals()
]).todense()
with open('../../data/shot_detection_weak_labels/L_everything_windows_downsampled.npy', 'wb') as f:
np.save(f, L_everything_windows)
with open('../../data/shot_detection_weak_labels/L_everything_windows_downsampled.npy', 'rb') as f:
L_everything_windows = np.load(f)
###Output
_____no_output_____
###Markdown
Predict Everything
###Code
L_everything_windows.shape
window_predictions_everything = model.predict_proba(L_everything_windows)
window_predictions_everything.shape
fig, ax = plt.subplots()
ax.hist([
pred[0] for pred in window_predictions_everything
], bins=20)
ax.set_xlim(0, 1)
plt.show()
window_nums = [
(video_id, intrvl.start, intrvl.end)
for video_id in sorted(list(video_ids_all))
for intrvl in windows_with_weak_labels.get_intervallist(video_id).get_intervals()
]
predictions_to_save_windows = [
(window_info, prediction)
for window_info, prediction in zip(window_nums, window_predictions_everything)
]
preds_np_windows = np.array(predictions_to_save_windows)
preds_np_windows.shape
# save predictions to disk
with open('../../data/shot_detection_weak_labels/noisy_labels_all_windows_downsampled.npy', 'wb') as f:
np.save(f, preds_np_windows)
###Output
_____no_output_____ |
Copia di l08c03_moving_average.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Moving average Run in Google Colab View source on GitHub Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
###Output
_____no_output_____
###Markdown
Trend and Seasonality
###Code
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
###Output
_____no_output_____
###Markdown
Naive Forecast
###Code
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
naive_forecast = series[split_time - 1:-1]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, start=0, end=150, label="Series")
plot_series(time_valid, naive_forecast, start=1, end=151, label="Forecast")
###Output
_____no_output_____
###Markdown
Now let's compute the mean absolute error between the forecasts and the predictions in the validation period:
###Code
keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy()
###Output
_____no_output_____
###Markdown
That's our baseline, now let's try a moving average. Moving Average
###Code
def moving_average_forecast(series, window_size):
"""Forecasts the mean of the last few values.
If window_size=1, then this is equivalent to naive forecast"""
forecast = []
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
return np.array(forecast)
def moving_average_forecast(series, window_size):
"""Forecasts the mean of the last few values.
If window_size=1, then this is equivalent to naive forecast
This implementation is *much* faster than the previous one"""
mov = np.cumsum(series)
mov[window_size:] = mov[window_size:] - mov[:-window_size]
return mov[window_size - 1:-1] / window_size
moving_avg = moving_average_forecast(series, 30)[split_time - 30:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, moving_avg, label="Moving average (30 days)")
keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy()
###Output
_____no_output_____
###Markdown
That's worse than naive forecast! The moving average does not anticipate trend or seasonality, so let's try to remove them by using differencing. Since the seasonality period is 365 days, we will subtract the value at time *t* – 365 from the value at time *t*.
###Code
diff_series = (series[365:] - series[:-365])
diff_time = time[365:]
plt.figure(figsize=(10, 6))
plot_series(diff_time, diff_series, label="Series(t) – Series(t–365)")
plt.show()
###Output
_____no_output_____
###Markdown
Focusing on the validation period:
###Code
plt.figure(figsize=(10, 6))
plot_series(time_valid, diff_series[split_time - 365:], label="Series(t) – Series(t–365)")
plt.show()
###Output
_____no_output_____
###Markdown
Great, the trend and seasonality seem to be gone, so now we can use the moving average:
###Code
diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, diff_series[split_time - 365:], label="Series(t) – Series(t–365)")
plot_series(time_valid, diff_moving_avg, label="Moving Average of Diff")
plt.show()
###Output
_____no_output_____
###Markdown
Now let's bring back the trend and seasonality by adding the past values from t – 365:
###Code
diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, diff_moving_avg_plus_past, label="Forecasts")
plt.show()
keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy()
###Output
_____no_output_____
###Markdown
Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise:
###Code
diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-359], 11) + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, diff_moving_avg_plus_smooth_past, label="Forecasts")
plt.show()
keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy()
###Output
_____no_output_____ |
notebook/PixieDust 4 - Add External Spark Packages.ipynb | ###Markdown
Add Spark packages and run inside your notebookPixieDust PackageManager lets you install spark packages inside your notebook. This is especailly useful when you're working in a hosted cloud environment without access to configuration files. Use PixieDust Package Manager to install:- a spark package from spark-packages.org- from maven search repository- a jar file directly from URL> **Note:** After you install a package, you must restart the kernel. View list of packagesTo see the packages installed on your system, run the following command:
###Code
import pixiedust
pixiedust.printAllPackages()
###Output
_____no_output_____
###Markdown
Add a package from spark-packages.orgRun the following cell to install GraphFrames.
###Code
pixiedust.installPackage("graphframes:graphframes:0")
###Output
_____no_output_____
###Markdown
Restart your kernelFrom the menu at the top of this notebook, choose **Kernel > Restart**, then run the next cell. View updated list of packagesRun printAllPackages again to see that GraphFrames is now in your list:
###Code
pixiedust.printAllPackages()
###Output
_____no_output_____
###Markdown
Display a GraphFrames data sampleGraphGrames comes with sample data sets. Even if GraphFrames is already installed, running the install command loads the Python that comes along with the package and enables features like the one you're about to see. Run the following cell and PixieDust displays a sample graph data set called **friends**. On the upper left of the display, click the table dropdown and switch between views of nodes and edges.
###Code
#import the Graphs example
from graphframes.examples import Graphs
#create the friends example graph
g=Graphs(sqlContext).friends()
#use the pixiedust display
display(g)
###Output
_____no_output_____
###Markdown
Install from mavenTo install a package from [Maven](https://maven.apache.org/), visist the project and find its `groupId` and `artifactId`, then enter it in the following install command. [Read more](https://pixiedust.github.io/pixiedust/packagemanager.htmlinstall-from-maven-search-repository). For example, the following cell installs Apache Commons:
###Code
pixiedust.installPackage("org.apache.commons:commons-csv:0")
###Output
_____no_output_____
###Markdown
Install a jar file directly from a URL To install a jar file that is not packaged in a maven repository, provide its URL.
###Code
pixiedust.installPackage("https://github.com/ibm-watson-data-lab/spark.samples/raw/master/dist/streaming-twitter-assembly-1.6.jar")
###Output
_____no_output_____
###Markdown
Follow the tutorialTo understand what you can do with this jar file, read David Taieb's latest [Realtime Sentiment Analysis of Twitter Hashtags with Spark](https://medium.com/ibm-watson-data-lab/real-time-sentiment-analysis-of-twitter-hashtags-with-spark-7ee6ca5c1585.2iblfu58c) tutorial. Uninstall a packageIt's just as easy to get rid of a package you installed. Just run the command `pixiedust.uninstallPackage(">")`. For example, you can uninstall Apache Commons:
###Code
pixiedust.uninstallPackage("org.apache.commons:commons-csv:0")
###Output
_____no_output_____
###Markdown
Add Spark packages and run inside your notebookPixieDust PackageManager lets you install spark packages inside your notebook. This is especailly useful when you're working in a hosted cloud environment without access to configuration files. Use PixieDust Package Manager to install:- a spark package from spark-packages.org- from maven search repository- a jar file directly from URL> **Note:** After you install a package, you must restart the kernel. View list of packagesTo see the packages installed on your system, run the following command:
###Code
import pixiedust
pixiedust.printAllPackages()
###Output
_____no_output_____
###Markdown
Add a package from spark-packages.orgRun the following cell to install GraphFrames.
###Code
pixiedust.installPackage("graphframes:graphframes:0")
###Output
_____no_output_____
###Markdown
Restart your kernelFrom the menu at the top of this notebook, choose **Kernel > Restart**, then run the next cell. View updated list of packagesRun printAllPackages again to see that GraphFrames is now in your list:
###Code
pixiedust.printAllPackages()
###Output
_____no_output_____
###Markdown
Display a GraphFrames data sampleGraphGrames comes with sample data sets. Even if GraphFrames is already installed, running the install command loads the Python that comes along with the package and enables features like the one you're about to see. Run the following cell and PixieDust displays a sample graph data set called **friends**. On the upper left of the display, click the table dropdown and switch between views of nodes and edges.
###Code
#import the Graphs example
from graphframes.examples import Graphs
#create the friends example graph
g=Graphs(sqlContext).friends()
#use the pixiedust display
display(g)
###Output
_____no_output_____
###Markdown
Install from mavenTo install a package from [Maven](https://maven.apache.org/), visist the project and find its `groupId` and `artifactId`, then enter it in the following install command. [Read more](https://ibm-watson-data-lab.github.io/pixiedust/packagemanager.htmlinstall-from-maven-search-repository). For example, the following cell installs Apache Commons:
###Code
pixiedust.installPackage("org.apache.commons:commons-csv:0")
###Output
_____no_output_____
###Markdown
Install a jar file directly from a URL To install a jar file that is not packaged in a maven repository, provide its URL.
###Code
pixiedust.installPackage("https://github.com/ibm-watson-data-lab/spark.samples/raw/master/dist/streaming-twitter-assembly-1.6.jar")
###Output
_____no_output_____
###Markdown
Follow the tutorialTo understand what you can do with this jar file, read David Taieb's latest [Realtime Sentiment Analysis of Twitter Hashtags with Spark](https://medium.com/ibm-watson-data-lab/real-time-sentiment-analysis-of-twitter-hashtags-with-spark-7ee6ca5c1585.2iblfu58c) tutorial. Uninstall a packageIt's just as easy to get rid of a package you installed. Just run the command `pixiedust.uninstallPackage(">")`. For example, you can uninstall Apache Commons:
###Code
pixiedust.uninstallPackage("org.apache.commons:commons-csv:0")
###Output
_____no_output_____ |
fairness_indicators/examples/Fairness_Indicators_Example_Colab.ipynb | ###Markdown
Fairness Indicators Example Colab OverviewIn this activity, you'll use Fairness Indicators to explore the Civil Comments dataset. Fairness Indicators is a suite of tools built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) that enable regular evaluation of fairness metrics in product pipelines. This [Introductory Video](https://www.youtube.com/watch?v=pHT-ImFXPQo) provides more details and context on the real-world scenario we are presenting here, one of primary motivations for creating Fairness Indicators.About the DatasetIn this exercise, you'll work with the [Civil Comments dataset](https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification), approximately 2 million public comments made public by the [Civil Comments platform](https://medium.com/@aja_15265/saying-goodbye-to-civil-comments-41859d3a2b1d) in 2017 for ongoing research. This effort was sponsored by Jigsaw, who have hosted competitions on Kaggle to help classify toxic comments as well as minimize unintended model bias. Each individual text comment in the dataset has a toxicity label. Within the data, a subset of comments are labeled with a variety of identity attributes, including categories for gender, sexual orientation, religion, and race or ethnicity.About the Tools[TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) is a library for evaluating both TensorFlow and non-TensorFlow machine learning models. It allows users to evaluate their models on large amounts of data in a distributed manner, computing in-graph and other metrics over different slices of data and visualized in notebooks. Fairness Indicators is built on top of TFMA. With Fairness Indicators, users will be able to: * Evaluate model performance, sliced across defined groups of users* Feel confident about results with confidence intervals and evaluations at multiple thresholdsFairness Indicators is packaged with [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) and [What-If Tool](https://pair-code.github.io/what-if-tool/) to allow users to:* Evaluate the distribution of datasets* Dive deep into individual slices to explore root causes and opportunities for improvement with the What-If Tool ImportingRun the following code to install the fairness_indicators library. This package contains the tools we'll be using in this exercise. Restart Runtime may be requested but is not necessary.
###Code
!pip install fairness-indicators
%tensorflow_version 2.x
import os
import tempfile
import apache_beam as beam
import numpy as np
import pandas as pd
from datetime import datetime
import tensorflow_hub as hub
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_data_validation as tfdv
from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators
from tensorflow_model_analysis.addons.fairness.view import widget_view
from fairness_indicators.examples import util
from witwidget.notebook.visualization import WitConfigBuilder
from witwidget.notebook.visualization import WitWidget
###Output
_____no_output_____
###Markdown
Download and Understand the Data In this exercise, you'll work with the Civil Comments dataset, approximately 2 million public comments made public by the Civil Comments platform in 2017. Additionally, a subset of comments have been labelled with a variety of identity attributes, representing the identities that are mentioned in the comment.We've hosted the dataset on Google Cloud Platform for convenience. Run the following code to download the data from GCP, the data will take about a minute to download and analyze.TensorFlow Data Validation is one tool you can use to analyze your data. You can use it to find potential problems in your data, such as missing values and data imbalances, that can lead to Fairness disparities.
###Code
#@title Options for Downloading data
#@markdown You can choose to download the original and process the data in
#@markdown the colab, which may take minutes. By default, we will download the
#@markdown data that we have already prepocessed for you. In the original
#@markdown dataset, for each indentity annotation columns, the value represents
#@markdown the percent of raters who thought the comment references the identity.
#@markdown When processing the raw data, the threshold 0.5 is chosen and the
#@markdown identities are grouped together by their categories. For example
#@markdown if one comment has { male: 0.3, female: 1.0, transgender: 0.0,
#@markdown heterosexual: 0.8, homosexual_gay_or_lesbian: 1.0 }, after the
#@markdown processing, the data will be { gender: [female],
#@markdown sexual_orientation: [heterosexual, homosexual_gay_or_lesbian] }.
download_original_data = True #@param {type:"boolean"}
if download_original_data:
train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord')
# The identity terms list will be grouped together by their categories
# (see 'IDENTITY_COLUMNS') on threshould 0.5. Only the identity term column,
# text column and label column will be kept after processing.
train_tf_file = util.convert_comments_data(train_tf_file)
validate_tf_file = util.convert_comments_data(validate_tf_file)
else:
train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord')
stats = tfdv.generate_statistics_from_tfrecord(data_location=train_tf_file)
tfdv.visualize_statistics(stats)
###Output
_____no_output_____
###Markdown
There are several interesting things that we may want to note in this data. The first is that the toxicity label, which is what we are predicting, is unbalanced. Only 8% of examples in the training set are toxic, which means that a classifier could get 92% accuracy by predicting that all comments are non-toxic.For the fields relating to identity terms note that out of 1.08 million training examples, only around 6.6k examples deal with homosexuality, and those related to bisexuality are even more rare. This might indicate that performance on these slices may suffer due to lack of training data. Defining Constants Here, we define the feature map that will be used to parse the data. Each example will have a label, comment text, and identity features `sexual orientation`, `gender`, `religion`, `race`, and `disability` that are associated with the text.
###Code
BASE_DIR = tempfile.gettempdir()
TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
FEATURE_MAP = {
# Label:
LABEL: tf.io.FixedLenFeature([], tf.float32),
# Text:
TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),
# Identities:
'sexual_orientation':tf.io.VarLenFeature(tf.string),
'gender':tf.io.VarLenFeature(tf.string),
'religion':tf.io.VarLenFeature(tf.string),
'race':tf.io.VarLenFeature(tf.string),
'disability':tf.io.VarLenFeature(tf.string),
}
###Output
_____no_output_____
###Markdown
Train the Model First, set up the input function to feed data into the model. Note that since we identified a class imbalance by our earlier TensorFlow Data Validation run, we will add a weight column to each example and upweight the toxic examples to account for this. We only use identity features during the evaluation phase, as only the comments are fed into the model at training time.
###Code
def train_input_fn():
def parse_function(serialized):
parsed_example = tf.io.parse_single_example(
serialized=serialized, features=FEATURE_MAP)
# Adds a weight column to deal with unbalanced classes.
parsed_example['weight'] = tf.add(parsed_example[LABEL], 0.1)
return (parsed_example,
parsed_example[LABEL])
train_dataset = tf.data.TFRecordDataset(
filenames=[train_tf_file]).map(parse_function).batch(512)
return train_dataset
###Output
_____no_output_____
###Markdown
Next, create a deep neural network model, and train it on the data:
###Code
model_dir = os.path.join(BASE_DIR, 'train', datetime.now().strftime(
"%Y%m%d-%H%M%S"))
embedded_text_feature_column = hub.text_embedding_column(
key=TEXT_FEATURE,
module_spec='https://tfhub.dev/google/nnlm-en-dim128/1')
classifier = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
weight_column='weight',
feature_columns=[embedded_text_feature_column],
optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.003),
loss_reduction=tf.losses.Reduction.SUM,
n_classes=2,
model_dir=model_dir)
classifier.train(input_fn=train_input_fn, steps=1000)
###Output
_____no_output_____
###Markdown
Run TensorFlow Model Analysis with Fairness Indicators Export Saved Model
###Code
def eval_input_receiver_fn():
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_placeholder')
# This *must* be a dictionary containing a single key 'examples', which
# points to the input placeholder.
receiver_tensors = {'examples': serialized_tf_example}
features = tf.io.parse_example(serialized_tf_example, FEATURE_MAP)
features['weight'] = tf.ones_like(features[LABEL])
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=features[LABEL])
tfma_export_dir = tfma.export.export_eval_savedmodel(
estimator=classifier,
export_dir_base=os.path.join(BASE_DIR, 'tfma_eval_model'),
eval_input_receiver_fn=eval_input_receiver_fn)
###Output
_____no_output_____
###Markdown
Compute Fairness Metrics Select the identity to compute metrics for and whether to run with confidence intervals in the panel on the right-hand side. Depending on your configurations, this step will take 2-10 minutes to run.
###Code
#@title Fairness Indicators Computation Options
tfma_eval_result_path = os.path.join(BASE_DIR, 'tfma_eval_result')
#@markdown Modify the slice_selection for experiments on other identities.
slice_selection = 'sexual_orientation' #@param ["sexual_orientation", "gender", "religion", "race", "disability"]
#@markdown Confidence Intervals can help you make better decisions regarding your data, but as it requires computing multiple resamples, is slower particularly in the colab environment that cannot take advantage of parallelization.
compute_confidence_intervals = False #@param {type:"boolean"}
# Define slices that you want the evaluation to run on.
slice_spec = [
tfma.slicer.SingleSliceSpec(), # Overall slice
tfma.slicer.SingleSliceSpec(columns=[slice_selection]),
]
# Add the fairness metrics.
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators(
thresholds=[0.1, 0.3, 0.5, 0.7, 0.9],
labels_key=LABEL
)
]
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=tfma_export_dir,
add_metrics_callbacks=add_metrics_callbacks)
# Run the fairness evaluation.
with beam.Pipeline() as pipeline:
_ = (
pipeline
| 'ReadData' >> beam.io.ReadFromTFRecord(validate_tf_file)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
slice_spec=slice_spec,
compute_confidence_intervals=compute_confidence_intervals,
output_path=tfma_eval_result_path)
)
eval_result = tfma.load_eval_result(output_path=tfma_eval_result_path)
###Output
_____no_output_____
###Markdown
Render What-if Tool In this section, you'll use the [What-If Tool's ](https://pair-code.github.io/what-if-tool/)interactive visual interface to explore and manipulate data at a micro-level.On the right-hand panel in the visualization, you will see a scatter plot where each point represents one of the examples in the subset loaded into the tool. Click on one of the points. In the left-hand panel, you should now see details about this particular example. The comment text, ground truth toxicity, and applicable identities are shown. At the bottom of this left-hand panel, you see the inference results from the model you just trained.Modify the text of the example. You can then click the "Run inference" button to view how your changes caused the perceived toxicity prediction to change.
###Code
DEFAULT_MAX_EXAMPLES = 1000
# Load 100000 examples in memory. When first rendered,
# What-If Tool should only display 1000 of these due to browser constraints.
def wit_dataset(file, num_examples=100000):
dataset = tf.data.TFRecordDataset(
filenames=[file]).take(num_examples)
return [tf.train.Example.FromString(d.numpy()) for d in dataset]
wit_data = wit_dataset(train_tf_file)
config_builder = WitConfigBuilder(wit_data[:DEFAULT_MAX_EXAMPLES]).set_estimator_and_feature_spec(
classifier, FEATURE_MAP).set_label_vocab(['non-toxicity', LABEL]).set_target_feature(LABEL)
wit = WitWidget(config_builder)
###Output
_____no_output_____
###Markdown
Render Fairness IndicatorsRender the Fairness Indicators widget with the exported evaluation results.Below you will see bar charts displaying performance of each slice of the data on selected metrics. You can adjust the baseline comparison slice as well as the displayed threshold(s) using the drop down menus at the top of the visualization. The Fairness Indicator widget is integrated with the What-If Tool rendered above. If you select one slice of the data in the bar chart, the What-If Tool will update to show you examples from the selected slice. When the data reloads in the What-If Tool above, try modifying **Color By** to **toxicity**. This can give you a visual understanding of the toxicity balance of examples by slice.
###Code
event_handlers={'slice-selected':
wit.create_selection_callback(wit_data, DEFAULT_MAX_EXAMPLES)}
widget_view.render_fairness_indicator(eval_result,
slicing_column=slice_selection,
event_handlers=event_handlers
)
###Output
_____no_output_____
###Markdown
Fairness Indicators Example Colab OverviewIn this activity, you'll use Fairness Indicators to explore the Civil Comments dataset. Fairness Indicators is a suite of tools built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) that enable regular evaluation of fairness metrics in product pipelines. This [Introductory Video](https://www.youtube.com/watch?v=pHT-ImFXPQo) provides more details and context on the real-world scenario we are presenting here, one of primary motivations for creating Fairness Indicators.About the DatasetIn this exercise, you'll work with the [Civil Comments dataset](https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification), approximately 2 million public comments made public by the [Civil Comments platform](https://medium.com/@aja_15265/saying-goodbye-to-civil-comments-41859d3a2b1d) in 2017 for ongoing research. This effort was sponsored by Jigsaw, who have hosted competitions on Kaggle to help classify toxic comments as well as minimize unintended model bias. Each individual text comment in the dataset has a toxicity label. Within the data, a subset of comments are labeled with a variety of identity attributes, including categories for gender, sexual orientation, religion, and race or ethnicity.About the Tools[TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) is a library for evaluating both TensorFlow and non-TensorFlow machine learning models. It allows users to evaluate their models on large amounts of data in a distributed manner, computing in-graph and other metrics over different slices of data and visualized in notebooks. Fairness Indicators is built on top of TFMA. With Fairness Indicators, users will be able to: * Evaluate model performance, sliced across defined groups of users* Feel confident about results with confidence intervals and evaluations at multiple thresholdsFairness Indicators is packaged with [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) and [What-If Tool](https://pair-code.github.io/what-if-tool/) to allow users to:* Evaluate the distribution of datasets* Dive deep into individual slices to explore root causes and opportunities for improvement with the What-If Tool ImportingRun the following code to install the fairness_indicators library. This package contains the tools we'll be using in this exercise. Restart Runtime may be requested but is not necessary.
###Code
!pip install fairness-indicators
%tensorflow_version 2.x
import os
import tempfile
import apache_beam as beam
import numpy as np
import pandas as pd
from datetime import datetime
import tensorflow_hub as hub
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_data_validation as tfdv
from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators
from tensorflow_model_analysis.addons.fairness.view import widget_view
from fairness_indicators.examples import util
from witwidget.notebook.visualization import WitConfigBuilder
from witwidget.notebook.visualization import WitWidget
###Output
_____no_output_____
###Markdown
Download and Understand the Data In this exercise, you'll work with the Civil Comments dataset, approximately 2 million public comments made public by the Civil Comments platform in 2017. Additionally, a subset of comments have been labelled with a variety of identity attributes, representing the identities that are mentioned in the comment.We've hosted the dataset on Google Cloud Platform for convenience. Run the following code to download the data from GCP, the data will take about a minute to download and analyze.TensorFlow Data Validation is one tool you can use to analyze your data. You can use it to find potential problems in your data, such as missing values and data imbalances, that can lead to Fairness disparities.
###Code
#@title Options for Dowloading data
#@markdown You can choose to download the original and process the data in
#@markdown the colab, which may take minutes. By default, we will download the
#@markdown data that we have already prepocessed for you. In the original
#@markdown dataset, for each indentity annotation columns, the value represents
#@markdown the percent of raters who thought the comment references the identity.
#@markdown When processing the raw data, the threshold 0.5 is chosen and the
#@markdown identities are grouped together by their categories. For example
#@markdown if one comment has { male: 0.3, female: 1.0, transgender: 0.0,
#@markdown heterosexual: 0.8, homosexual_gay_or_lesbian: 1.0 }, after the
#@markdown processing, the data will be { gender: [female],
#@markdown sexual_orientation: [heterosexual, homosexual_gay_or_lesbian] }.
download_original_data = True #@param {type:"boolean"}
if download_original_data:
train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord')
# The identity terms list will be grouped together by their categories
# (see 'IDENTITY_COLUMNS') on threshould 0.5. Only the identity term column,
# text column and label column will be kept after processing.
train_tf_file = util.convert_comments_data(train_tf_file)
validate_tf_file = util.convert_comments_data(validate_tf_file)
else:
train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord')
stats = tfdv.generate_statistics_from_tfrecord(data_location=train_tf_file)
tfdv.visualize_statistics(stats)
###Output
_____no_output_____
###Markdown
There are several interesting things that we may want to note in this data. The first is that the toxicity label, which is what we are predicting, is unbalanced. Only 8% of examples in the training set are toxic, which means that a classifier could get 92% accuracy by predicting that all comments are non-toxic.For the fields relating to identity terms note that out of 1.08 million training examples, only around 6.6k examples deal with homosexuality, and those related to bisexuality are even more rare. This might indicate that performance on these slices may suffer due to lack of training data. Defining Constants Here, we define the feature map that will be used to parse the data. Each example will have a label, comment text, and identity features `sexual orientation`, `gender`, `religion`, `race`, and `disability` that are associated with the text.
###Code
BASE_DIR = tempfile.gettempdir()
TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
FEATURE_MAP = {
# Label:
LABEL: tf.io.FixedLenFeature([], tf.float32),
# Text:
TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),
# Identities:
'sexual_orientation':tf.io.VarLenFeature(tf.string),
'gender':tf.io.VarLenFeature(tf.string),
'religion':tf.io.VarLenFeature(tf.string),
'race':tf.io.VarLenFeature(tf.string),
'disability':tf.io.VarLenFeature(tf.string),
}
###Output
_____no_output_____
###Markdown
Train the Model First, set up the input function to feed data into the model. Note that since we identified a class imbalance by our earlier TensorFlow Data Validation run, we will add a weight column to each example and upweight the toxic examples to account for this. We only use identity features during the evaluation phase, as only the comments are fed into the model at training time.
###Code
def train_input_fn():
def parse_function(serialized):
parsed_example = tf.io.parse_single_example(
serialized=serialized, features=FEATURE_MAP)
# Adds a weight column to deal with unbalanced classes.
parsed_example['weight'] = tf.add(parsed_example[LABEL], 0.1)
return (parsed_example,
parsed_example[LABEL])
train_dataset = tf.data.TFRecordDataset(
filenames=[train_tf_file]).map(parse_function).batch(512)
return train_dataset
###Output
_____no_output_____
###Markdown
Next, create a deep neural network model, and train it on the data:
###Code
model_dir = os.path.join(BASE_DIR, 'train', datetime.now().strftime(
"%Y%m%d-%H%M%S"))
embedded_text_feature_column = hub.text_embedding_column(
key=TEXT_FEATURE,
module_spec='https://tfhub.dev/google/nnlm-en-dim128/1')
classifier = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
weight_column='weight',
feature_columns=[embedded_text_feature_column],
optimizer=tf.optimizers.Adagrad(learning_rate=0.003),
loss_reduction=tf.losses.Reduction.SUM,
n_classes=2,
model_dir=model_dir)
classifier.train(input_fn=train_input_fn, steps=1000)
###Output
_____no_output_____
###Markdown
Run TensorFlow Model Analysis with Fairness Indicators Export Saved Model
###Code
def eval_input_receiver_fn():
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_placeholder')
# This *must* be a dictionary containing a single key 'examples', which
# points to the input placeholder.
receiver_tensors = {'examples': serialized_tf_example}
features = tf.io.parse_example(serialized_tf_example, FEATURE_MAP)
features['weight'] = tf.ones_like(features[LABEL])
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=features[LABEL])
tfma_export_dir = tfma.export.export_eval_savedmodel(
estimator=classifier,
export_dir_base=os.path.join(BASE_DIR, 'tfma_eval_model'),
eval_input_receiver_fn=eval_input_receiver_fn)
###Output
_____no_output_____
###Markdown
Compute Fairness Metrics Select the identity to compute metrics for and whether to run with confidence intervals in the panel on the right-hand side. Depending on your configurations, this step will take 2-10 minutes to run.
###Code
#@title Fairness Indicators Computation Options
tfma_eval_result_path = os.path.join(BASE_DIR, 'tfma_eval_result')
#@markdown Modify the slice_selection for experiments on other identities.
slice_selection = 'sexual_orientation' #@param ["sexual_orientation", "gender", "religion", "race", "disability"]
#@markdown Confidence Intervals can help you make better decisions regarding your data, but as it requires computing multiple resamples, is slower particularly in the colab environment that cannot take advantage of parallelization.
compute_confidence_intervals = False #@param {type:"boolean"}
# Define slices that you want the evaluation to run on.
slice_spec = [
tfma.slicer.SingleSliceSpec(), # Overall slice
tfma.slicer.SingleSliceSpec(columns=[slice_selection]),
]
# Add the fairness metrics.
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators(
thresholds=[0.1, 0.3, 0.5, 0.7, 0.9],
labels_key=LABEL
)
]
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=tfma_export_dir,
add_metrics_callbacks=add_metrics_callbacks)
# Run the fairness evaluation.
with beam.Pipeline() as pipeline:
_ = (
pipeline
| 'ReadData' >> beam.io.ReadFromTFRecord(validate_tf_file)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
slice_spec=slice_spec,
compute_confidence_intervals=compute_confidence_intervals,
output_path=tfma_eval_result_path)
)
eval_result = tfma.load_eval_result(output_path=tfma_eval_result_path)
###Output
_____no_output_____
###Markdown
Render What-if Tool In this section, you'll use the [What-If Tool's ](https://pair-code.github.io/what-if-tool/)interactive visual interface to explore and manipulate data at a micro-level.On the right-hand panel in the visualization, you will see a scatter plot where each point represents one of the examples in the subset loaded into the tool. Click on one of the points. In the left-hand panel, you should now see details about this particular example. The comment text, ground truth toxicity, and applicable identities are shown. At the bottom of this left-hand panel, you see the inference results from the model you just trained.Modify the text of the example. You can then click the "Run inference" button to view how your changes caused the perceived toxicity prediction to change.
###Code
DEFAULT_MAX_EXAMPLES = 1000
# Load 100000 examples in memory. When first rendered,
# What-If Tool should only display 1000 of these due to browser constraints.
def wit_dataset(file, num_examples=100000):
dataset = tf.data.TFRecordDataset(
filenames=[train_tf_file]).take(num_examples)
return [tf.train.Example.FromString(d.numpy()) for d in dataset]
wit_data = wit_dataset(train_tf_file)
config_builder = WitConfigBuilder(wit_data[:DEFAULT_MAX_EXAMPLES]).set_estimator_and_feature_spec(
classifier, FEATURE_MAP).set_label_vocab(['non-toxicity', LABEL]).set_target_feature(LABEL)
wit = WitWidget(config_builder)
###Output
_____no_output_____
###Markdown
Render Fairness IndicatorsRender the Fairness Indicators widget with the exported evaluation results.Below you will see bar charts displaying performance of each slice of the data on selected metrics. You can adjust the baseline comparison slice as well as the displayed threshold(s) using the drop down menus at the top of the visualization. The Fairness Indicator widget is integrated with the What-If Tool rendered above. If you select one slice of the data in the bar chart, the What-If Tool will update to show you examples from the selected slice. When the data reloads in the What-If Tool above, try modifying **Color By** to **toxicity**. This can give you a visual understanding of the toxicity balance of examples by slice.
###Code
event_handlers={'slice-selected':
wit.create_selection_callback(wit_data, DEFAULT_MAX_EXAMPLES)}
widget_view.render_fairness_indicator(eval_result,
slicing_column=slice_selection,
event_handlers=event_handlers)
###Output
_____no_output_____ |
examples/guides/detailed-example.ipynb | ###Markdown
Detailed exampleThis overview of the most important functions repeats the previous 30-seconds-example, but in more detail and shows additional functionality and alternative steps. Authentificate & access project
###Code
import up42
up42.authenticate(project_id="12345", project_api_key="12345")
#up42.authenticate(cfg_file="config.json")
project = up42.initialize_project()
project
###Output
_____no_output_____
###Markdown
Get information about the available blocks to later construct your workflow.
###Code
up42.get_blocks(basic=True)
###Output
_____no_output_____
###Markdown
Create or access the workflowYou can either create a new workflow, use project.get_workflows() to get all existing workflows within the project, or access an exisiting workflow directly via its workflow_id. A new workflow is created and filled with tasks ([Sentinel-2 data](https://marketplace.up42.com/block/018dfb34-fc19-4334-8125-14fd7535f979), [Land-Surface-Temperature](https://marketplace.up42.com/block/34767300-5caf-472b-a684-a351212b5c14)). The area of interest and workflow parameters are defined. After running the job, the results are downloaded and visualized.
###Code
# Create a new, empty workflow.
workflow = project.create_workflow(name="30-seconds-workflow", use_existing=False)
workflow
# Add workflow tasks
input_tasks = ["Sentinel-2 L2A (GeoTIFF)", "Sharpening Filter"]
workflow.add_workflow_tasks(input_tasks=input_tasks)
# Check the added tasks.
workflow.get_workflow_tasks(basic=True)
# Alternative: Get all existing workflows within the project.
all_workflows = project.get_workflows()
workflow = all_workflows[0]
workflow
# Alternative: Directly access the existing workflow the id (has to exist within the accessed project).
UP42_WORKFLOW_ID="7fb2ec8a-45be-41ad-a50f-98ba6b528b98"
workflow = up42.initialize_workflow(workflow_id=UP42_WORKFLOW_ID)
workflow
###Output
_____no_output_____
###Markdown
Select the aoiThere are multiple ways to select an aoi, you can:- Provide aoi the geometry directly in code as a FeatureCollection, Feature, GeoDataFrame, shapely Polygon or list of bounds coordinates.- Use up42.draw_aoi() to draw the aoi and export it as a geojson.- Use up42.read_vector_file() to read a geojson, json, shapefile, kml or wkt file.- Use up42.get_example_aoi() to read multiple provided sample aois.
###Code
aoi = [13.375966, 52.515068, 13.378314, 52.516639]
aoi = up42.read_vector_file("data/aoi_berlin.geojson", as_dataframe=True)
aoi.head(1)
#aoi = up42.get_example_aoi(location="Berlin")
#aoi
#up42.draw_aoi()
###Output
_____no_output_____
###Markdown
Select the workflow parametersThere are also multiple ways to construct the workflow input parameters, you can:- Provide the parameters directly in code as a json string.- Use .get_parameters_info() to get a an overview of all potential parameters for the selected workflow and information about the parameter defaults and ranges.- Use .get_input_parameters(aoi_type="bbox", aoi_geometry=aoi) to construct the parameters with the provided aoi and all default parameters. Selecting the aoi_type is independent from the provided aoi, you can e.g. provide a irregular Polygon and still select aoi_type="bbox", then the bounding box of the polygon will be selected.
###Code
workflow.get_parameters_info()
input_parameters = workflow.construct_parameters(geometry=aoi, geometry_operation="bbox", limit=1)
# Further update the input_parameters manually
input_parameters["esa-s2-l2a-gtiff:1"].update({"max_cloud_cover":10})
input_parameters
###Output
_____no_output_____
###Markdown
Price estimation & Test Job
###Code
workflow.estimate_job(input_parameters)
# Run a test job to query data availability and check the configuration.
test_job = workflow.test_job(input_parameters=input_parameters, track_status=True)
test_results = test_job.get_results_json()
print(test_results)
###Output
_____no_output_____
###Markdown
Run the workflow & download results
###Code
# Run the actual job.
job = workflow.run_job(input_parameters=input_parameters, track_status=True)
###Output
_____no_output_____
###Markdown
Download & Display results
###Code
# Download job result (default downloads to Desktop). Only works after download is finished.
results_fp = job.download_results()
job.plot_results(figsize=(6,6))
#job.map_results(bands=[1])
###Output
_____no_output_____
###Markdown
Detailed exampleThis overview of the most important functions repeats the previous 30-seconds-example, but in more detail and shows additional functionality and alternative steps. Authentificate & access project
###Code
import up42
up42.authenticate(project_id="12345", project_api_key="12345")
#up42.authenticate(cfg_file="config.json")
project = up42.initialize_project()
project
###Output
_____no_output_____
###Markdown
Get information about the available blocks to later construct your workflow.
###Code
up42.get_blocks(basic=True)
###Output
_____no_output_____
###Markdown
Create or access the workflowYou can either create a new workflow, use project.get_workflows() to get all existing workflows within the project, or access an exisiting workflow directly via its workflow_id. A new workflow is created and filled with tasks ([Sentinel-2 data](https://marketplace.up42.com/block/018dfb34-fc19-4334-8125-14fd7535f979), [Land-Surface-Temperature](https://marketplace.up42.com/block/34767300-5caf-472b-a684-a351212b5c14)). The area of interest and workflow parameters are defined. After running the job, the results are downloaded and visualized.
###Code
# Create a new, empty workflow.
workflow = project.create_workflow(name="30-seconds-workflow", use_existing=False)
workflow
# Add workflow tasks
input_tasks = ["Sentinel-2 L2A Visual (GeoTIFF)", "Sharpening Filter"]
workflow.add_workflow_tasks(input_tasks=input_tasks)
# Check the added tasks.
workflow.get_workflow_tasks(basic=True)
# Alternative: Get all existing workflows within the project.
all_workflows = project.get_workflows()
workflow = all_workflows[0]
workflow
# Alternative: Directly access the existing workflow the id (has to exist within the accessed project).
UP42_WORKFLOW_ID="7fb2ec8a-45be-41ad-a50f-98ba6b528b98"
workflow = up42.initialize_workflow(workflow_id=UP42_WORKFLOW_ID)
workflow
###Output
_____no_output_____
###Markdown
Select the aoiThere are multiple ways to select an aoi, you can:- Provide aoi the geometry directly in code as a FeatureCollection, Feature, GeoDataFrame, shapely Polygon or list of bounds coordinates.- Use up42.draw_aoi() to draw the aoi and export it as a geojson.- Use up42.read_vector_file() to read a geojson, json, shapefile, kml or wkt file.- Use up42.get_example_aoi() to read multiple provided sample aois.
###Code
aoi = [13.375966, 52.515068, 13.378314, 52.516639]
aoi = up42.read_vector_file("data/aoi_berlin.geojson", as_dataframe=True)
aoi.head(1)
#aoi = up42.get_example_aoi(location="Berlin")
#aoi
#up42.draw_aoi()
###Output
_____no_output_____
###Markdown
Select the workflow parametersThere are also multiple ways to construct the workflow input parameters, you can:- Provide the parameters directly in code as a json string.- Use .get_parameters_info() to get a an overview of all potential parameters for the selected workflow and information about the parameter defaults and ranges.- Use .get_input_parameters(aoi_type="bbox", aoi_geometry=aoi) to construct the parameters with the provided aoi and all default parameters. Selecting the aoi_type is independent from the provided aoi, you can e.g. provide a irregular Polygon and still select aoi_type="bbox", then the bounding box of the polygon will be selected.
###Code
workflow.get_parameters_info()
input_parameters = workflow.construct_parameters(geometry=aoi, geometry_operation="bbox", limit=1)
# Further update the input_parameters manually
input_parameters["esa-s2-l2a-gtiff-visual:1"].update({"max_cloud_cover":10})
input_parameters
workflow
###Output
_____no_output_____
###Markdown
Price estimation & Test Job
###Code
workflow.estimate_job(input_parameters)
# Run a test job to query data availability and check the configuration.
test_job = workflow.test_job(input_parameters=input_parameters, track_status=True)
test_results = test_job.get_results_json()
print(test_results)
###Output
_____no_output_____
###Markdown
Run the workflow & download results
###Code
# Run the actual job.
job = workflow.run_job(input_parameters=input_parameters, track_status=True)
###Output
_____no_output_____
###Markdown
Download & Display results
###Code
# Download job result (default downloads to Desktop). Only works after download is finished.
results_fp = job.download_results()
job.plot_results(figsize=(6,6))
#job.map_results(bands=[1])
###Output
_____no_output_____ |
mdm163-week-6-lab-Copy1.ipynb | ###Markdown
Lab 6 (Project Part 1):
###Code
name = str(input(prompt="Please enter your name: "))
occupation = str(input(prompt="Please enter your occupation: "))
print("Your name is " + name + " and your occupation is " + occupation)
###Output
Please enter your name: Mikey
Please enter your occupation: CS
|
CW3.ipynb | ###Markdown
Classwork 3 Sakthi and Will September 20, 2016 Problem 1 Exercise 5.3This problem asked us to fill two arrays: the first, x, using linspace to just create an array of numbers, and the second, y, as a function of x. It asked us to do this using a vectorized function, as opposed to a for loop. In this case, we created an x array from -4 to 4, split into 41 segments. The return will be an array y which is a result of the function: $$y = \frac{1}{\sqrt{2\pi}}e^{\frac{1}{2}x^2}$$
###Code
fav.main()
###Output
_____no_output_____
###Markdown
As you can see, the array has 41 entries, from y(-4) to y(4), just as desired. Problem 2 Exercise 5.9This problem asked us to plot the function $$y(t) = v_0t - \frac{1}{2}gt^2$$This function gives the height of an object given an initial velocity directly up into the air.It asked for a plot of this function first given $v_0 = 10$, and second given a set of $v_0$'s specified by user input. To do this, we created a function that asked the user to input values of $v_0$, created sets of data for $t$ and the corresponding $y(t)$ values, and plotted them all on one graph, as asked in the second part of the question. To plot just $v_0 = 10$ we didn't write a separate function into the file, we simply implemented the more general function and input only $10$ as a $v_0$ value. So, here is the plot for $v_0 = 10$:
###Code
v = plotter.getV()
l = plotter.getTY(v)
plotter.plot1(v, l[0],l[1])
###Output
Enter your list of v_0's, separated by commas: 10
###Markdown
The plot above shows the height vs time of an object thrown directly upward at a velocity of 10m/s. Here is the plot for a more interesting set of $v_0$'s - the speed of a home run leaving the bat, the speed of sound waves through air, and the speed of a .22 leaving the barrel:
###Code
v = plotter.getV()
l = plotter.getTY(v)
plotter.plot1(v, l[0],l[1])
###Output
Enter your list of v_0's, separated by commas: 49,343,460
###Markdown
Problem 3 Exercise 5.13This exercise asked us to plot the trajectory of an object using this function: $$f(x) = x \tan(\theta) - \frac{1}{2v_0^2} \frac{gx^2}{{\cos^2(\theta)}} + y_0$$The exercise asked us to read the input data for initial values directly from the command line, so we created a main() function which executes everything at once; the input, the function, and the plot. Here is a sample plot:
###Code
pt.main()
###Output
Please enter the starting height of the ball: 10
Please enter the starting angle you want your ball to be thrown at: 45
Please enter your initial velocity: 300
###Markdown
Problem 4 Exercise 5.14This exercise asked us to read data points from a file. The file was formatted as two columns, one for $x$, one for $y$. To do so, we used urllib2 as suggested by the Python documentation, and read from the url at which the file was located. Then we split each line with spaces as delimiters, and appended the lists for $x$ and $y$ coordinates accordingly. Then we created a function to plot a simple graph of $x$ vs $y$, and a function to return $y_{mean}$, $y_{max}$, and $y_{min}$. Here is the graph of $x$ vs $y$:
###Code
l = rc.getData()
mmm = rc.yData(l[1])
rc.plotData(l[0],l[1])
###Output
_____no_output_____
###Markdown
And here is the mean, max, and min of y:
###Code
print ('Mean of y is: ' + str(mmm[0]))
print ('Max of y is: ' + str(mmm[1]))
print ('Min of y is: ' + str(mmm[2]))
###Output
Mean of y is: 2.58191401076e-18
Max of y is: 1.0
Min of y is: -1.0
|
homework01.ipynb | ###Markdown
Задание 1 вычислить: $$7 \cdot \begin{pmatrix}5 & 10 \\7 & 12 \\11.3 & 5 \\25 & 30 \\\end{pmatrix} + 2 \cdot\begin{pmatrix}5 & 10 \\7 & 12 \\11.3 & 5 \\25 & 30 \\\end{pmatrix}$$ Решение: $$\begin{pmatrix}35 & 70 \\49 & 84 \\79.1 & 35 \\175 & 210 \\\end{pmatrix} + \begin{pmatrix}10 & 20 \\14 & 24 \\22.6 & 10 \\50 & 60 \\\end{pmatrix} = \begin{pmatrix}45 & 90 \\63 & 108 \\101.7 & 45 \\225 & 270 \\\end{pmatrix}$$ Задание 2.1 Решите систему уравнений: $$3x - 2y + 5z = 7 \\7x + 4y - 8z = 3 \\5x - 3y - 4z = -12$$ Система линейная, как и все уравнения в ней Решение методом Жордана-Гаусса:
###Code
m = np.matrix([[3,-2,5,7],[7,4,-8,3],[5,-3,-4,-12]], dtype=float)
m
###Output
_____no_output_____
###Markdown
Умножим вторую и третью строки на 3:
###Code
m[1] = m[1]*3
m[2] = m[2]*3
m
###Output
_____no_output_____
###Markdown
из второй строки вычтем 1ю уноженную на 7, а из третьей первую умноженную на 5
###Code
m[1] = m[1] - m[0]*7
m[2] = m[2] - m[0]*5
m
###Output
_____no_output_____
###Markdown
Разделим первую строку на 3, а вторую на 26
###Code
m[0] = m[0]/3
m[1] = m[1]/26
m
###Output
_____no_output_____
###Markdown
из третьей строки вычтем вторую:
###Code
m[2] = m[2]-m[1]
m
t = 1/m[2,2]
t
###Output
_____no_output_____
###Markdown
умножим третью на -0.028..
###Code
m[2] = m[2]*t
m
m[0,2], m[1,2]
###Output
_____no_output_____
###Markdown
из первой вычтем третью, умноженную на 1.66.. а из второй третью, умноженную на -2.269...
###Code
m[0] = m[0] - m[2]*m[0,2]
m[1] = m[1] - m[2]*m[1,2]
m
m[0,1]
###Output
_____no_output_____
###Markdown
из первой вычтем вторую, уноженную на -0.(6)
###Code
m[0] = m[0] - m[1]*m[0,1]
m
#ответ
x = 1
y = 3
z = 2
#проверим
print(3*x - 2*y + 5*z)
print(7*x + 4*y - 8*z)
print(5*x - 3*y - 4*z)
###Output
7
3
-12
###Markdown
Решение правильное Задание 2.2 Решите систему уравнений: $$x^2 + yx - 9 = 0 \\x - y/5 = 0$$ Решение: выразим y из второго уравнения и подставим в первое $$x = y/5 \\y = 5x$$ $$x^2 + 5x^2 - 9 = 0 \\6x^2 = 9 \\x^2 = 3/2$$ $$x = \pm \sqrt{3/2}$$
###Code
x = np.sqrt(3/2)
x
x1 = x
x2 = -x
print(x1, x2)
y1 = x1*5
y2 = x2*5
print(y1, y2)
###Output
6.123724356957945 -6.123724356957945
###Markdown
Проверим:
###Code
f1 = x1**2 + y1*x1 - 9
f2 = x2**2 + y2*x2 - 9
print(f1, f2)
#почти 0 - учитывая погрешности вычислений
f1 = x1 - y1/5
f2 = x2 - y2/5
print(f1, f2)
###Output
0.0 0.0
###Markdown
Задание 3 Решите задачу:Площадь пола прямоугольной комнаты равна 48 м2,а его периметр равен 28 м. Найдите длину и ширину комнаты. Решение: $$x*y = 48 \\2(x+y) = 28, x+y = 14$$ Выразим y из одного уравнения и подставим в другое: $$y = 14 - x \\x * (14 - x) = 48$$ $$x^2 - 14x + 48 = 0$$ $$D = b^2 - 4ac \\x = \frac{-b \pm \sqrt{D}}{2a}$$
###Code
D = (-14)**2 - 4*48
D
x1 = (14 + np.sqrt(4))/2
x1
x2 = (14 - np.sqrt(4))/2
x2
y1 = 14 - x1
y1
y2 = 14 - x2
y2
###Output
_____no_output_____
###Markdown
пары корней системы уравнения одинаковые, проверим:
###Code
f1 = x1*y1
f2 = x1+y1
print(f1, f2)
###Output
48.0 14.0
###Markdown
Задание 4 Постройте на одном графике две кривые y(x) для функции двух переменной y(k,x)=cos(k∙x), взяв для одной кривой значение k=1, а для другой – любое другое k, не равное 1.
###Code
from matplotlib import pyplot as plt
%matplotlib inline
x = np.linspace(-10, 10, 100)
plt.plot(x, np.cos(x))
plt.plot(x, np.cos(2*x))
plt.show()
###Output
_____no_output_____
###Markdown
$$C_n^k = \frac{n!}{k!(n-k)!}$$ $$A_n^k = \frac{n!}{(n-k)!}$$ $$P_n = n!$$
###Code
import numpy as np
from math import factorial
def C(n, k):
return factorial(n)/(factorial(k)*factorial(n-k))
def A(n, k):
return factorial(n)/(factorial(n-k))
def P(n):
return factorial(n)
###Output
_____no_output_____
###Markdown
Задание 1 Из колоды в 52 карты извлекаются случайным образом 4 карты. 1.1 Найти вероятность того, что все карты – крести вариант 1
###Code
prob = C(13, 4)/C(52,4)
prob
###Output
_____no_output_____
###Markdown
вариант 2 поочередно вытащим 4 карты крести:
###Code
prob = (13/52)*(12/51)*(11/50)*(10/49)
prob
###Output
_____no_output_____
###Markdown
1. 2 Найти вероятность, что среди 4-х карт окажется хотя бы один туз. сначала найдем вероятность вытащить 4 карты без тузов
###Code
p1 = C(48, 4)/C(52, 4)
p1
prob = 1 - p1
prob
# вариант 2
prob = 1 - (48/52)*(47/51)*(46/50)*(45/49)
prob
###Output
_____no_output_____
###Markdown
Задание 2 На входной двери подъезда установлен кодовый замок, содержащий десять кнопок с цифрами от 0 до 9. Код содержит три цифры, которые нужно нажать одновременно. Какова вероятность того, что человек, не знающий код, откроет дверь с первой попытки?
###Code
prob = 1/C(10,3)
prob
###Output
_____no_output_____
###Markdown
Задание 3 В ящике имеется 15 деталей, из которых 9 окрашены. Рабочий случайным образом извлекает 3 детали. Какова вероятность того, что все извлеченные детали окрашены?
###Code
prob = (9/15)*(8/14)*(7/13)
prob
prob = C(9,3)/C(15,3)
prob
###Output
_____no_output_____
###Markdown
Задание 4 В лотерее 100 билетов. Из них 2 выигрышных. Какова вероятность того, что 2 приобретенных билета окажутся выигрышными?
###Code
prob = (2/100)*(1/99)
prob
prob = C(2,2)/C(100,2)
prob
###Output
_____no_output_____ |
notebooks/1_data_wrangling.ipynb | ###Markdown
1. Data Wrangling In this part of notebook I will cover how to wrangle data so we can extract the feature easily. We will try to label each event whether it is belong to users subscription phase.
example:
| userId | upgrade_time | downgrade_time | ...event |
|--------|--------------|----------------|----------|
| 1111 | 2020-12-05 | 2020-12-29 | ...event |
| 2222 | 2020-11-12 | null | ...event |
| 3333 | 2020-10-15 | 2020-10-29 | ...event |
the null value in downgrade time means that the user isn't churning Import Needed Library and Initialiaze PySpark
###Code
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
import pyspark.sql.types as T
from pyspark import SparkContext, SparkConf
spark = SparkSession.builder.appName('sparkify') \
.config('spark.driver.maxResultSize', '3g') \
.getOrCreate()
###Output
_____no_output_____
###Markdown
Load the Dataset from Google Cloud Storage
###Code
# Load dataset from GCS and change ts from bigint to datetime format
df = spark.read.parquet('gs://udacity-dsnd/sparkify_event_data.parquet/')
df = df.withColumnRenamed("ts","ts_temp").withColumn("ts", (F.col("ts_temp") / 1000).cast(T.TimestampType())).drop("ts_temp")
df.cache()
df.printSchema()
###Output
root
|-- artist: string (nullable = true)
|-- auth: string (nullable = true)
|-- firstName: string (nullable = true)
|-- gender: string (nullable = true)
|-- itemInSession: long (nullable = true)
|-- lastName: string (nullable = true)
|-- length: double (nullable = true)
|-- level: string (nullable = true)
|-- location: string (nullable = true)
|-- method: string (nullable = true)
|-- page: string (nullable = true)
|-- registration: long (nullable = true)
|-- sessionId: long (nullable = true)
|-- song: string (nullable = true)
|-- status: long (nullable = true)
|-- userAgent: string (nullable = true)
|-- userId: string (nullable = true)
|-- ts: timestamp (nullable = true)
###Markdown
Wrangle Dataframe to Get Event Labeled Dataframe First, we want to find when each user upgrading and downgrading subscription
###Code
# up_df is data when user upgrading
# | userId | ts | page |
# | 234124 | 2018-1-1 12:00:00 | Submit Upgrade |
up_df = df.select(["userId", "ts"]) \
.filter(df.page == "Submit Upgrade") \
.withColumnRenamed('ts', 'up_ts')
# down_df is data when user upgrading
# | userId | ts | page |
# | 234124 | 2018-1-5 12:00:00 | Submit Downgrade |
down_df = df.select(["userId", "ts"]) \
.filter(df.page == "Submit Downgrade") \
.withColumnRenamed('ts', 'down_ts') \
.withColumnRenamed("userId", "userIdTemp")
###Output
_____no_output_____
###Markdown
Second, We query to get every upgrade event and the following downgrade event time
###Code
# key_df join up_df and down_df to create dataframe when user upgrade and following downgrade in the same row like below
# | userId | up_ts | down_ts | isChurn |
# | 234124 | 2018-1-2 12:00:00 | 2018-1-5 12:00:00 | True |
# | 234124 | 2018-1-6 12:00:00 | 2018-1-9 12:00:00 | True |
key_df = up_df.join(down_df,
(down_df.userIdTemp == up_df.userId) &
(down_df.down_ts > up_df.up_ts), how="left") \
.drop(F.col("userIdTemp")) \
.groupBy(F.col("userId"), up_df.up_ts) \
.agg(F.min(down_df.down_ts)) \
.withColumnRenamed("max(userId)", "userId") \
.withColumn("down_ts",
F.when(F.col("min(down_ts)").isNull(), '2099-12-31 00:00:00') \
.otherwise(F.col("min(down_ts)"))) \
.withColumn("isChurn",
F.when(F.col("min(down_ts)").isNull(), False).otherwise(True)) \
.orderBy(up_df.up_ts)
key_df.cache()
key_df.show()
# save the result then read it again to reduce query complexity
key_df.drop("min(down_ts)").write.parquet("gs://udacity-dsnd/key_df.parquet")
key_df = spark.read.parquet("hdfs:///user/key_df.parquet")
key_df.printSchema()
###Output
root
|-- userId: string (nullable = true)
|-- up_ts: timestamp (nullable = true)
|-- down_ts: string (nullable = true)
|-- isChurn: boolean (nullable = true)
###Markdown
Last, we query to label every event with value from key_df and save it to GCS
###Code
# label every event that fall between key_df's up_ts and down_ts with the same userId and save it to GCS
# the resulting table will look like below
# | userId | up_ts | down_ts | isChurn | event |
# | 234124 | 2018-1-5 12:00:00 | 2018-1-5 12:00:00 | True | event1 |
# | 234124 | 2018-1-5 12:00:00 | 2018-1-5 12:00:00 | True | event2 |
df = df.withColumnRenamed("userId", "userIdTemp")
key_df.join(df, (key_df.up_ts <= df.ts) & (df.ts <= key_df.down_ts) & (key_df.userId == df.userIdTemp),how='left') \
.write.parquet('gs://udacity-dsnd/event_labeled.parquet')
###Output
_____no_output_____ |
test-features.ipynb | ###Markdown
Welcome to pyRT - The Python Raytracer https://github.com/martinchristen/pyRTThe goal of pyRT is teaching computer graphics.One part of pyrt is the virtual framebuffer where you can draw Pixels using standard algorithms such as Bresenham's line drawing algorithm.From Version 0.5.0 an additional goal is better Jupyter integration, this is now done in RGBImage.Dependencies:This notebook requires pyrt, pillow, numpy, and numba 1. Virtual Framebuffer for Pixel Operations
###Code
from pyrt.renderer import RGBImage
from pyrt.math import Vec2, Vec3
import random
###Output
_____no_output_____
###Markdown
1.2 Animated Virtual Framebuffer in Jupyter
###Code
w = 320
h = 240
image = RGBImage(w, h)
image.clear(Vec3(0.0,0.0,0.4))
for i in range(5000):
position = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image.drawPoint(position, color, 1)
image.framebuffer()
for i in range(100):
pos1 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
pos2 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image.drawLine(pos1, pos2, color, 2)
image.update(fps=30)
###Output
_____no_output_____
###Markdown
1.3 Loading Images
###Code
from pyrt.renderer import loadimage
image2 = loadimage("data/worldmap/world600.jpg")
image2.framebuffer("world")
image2.drawCircleFilled(Vec2(300,150), radius=10, color=Vec3(1,0,0), fillcolor=Vec3(1,1,0), size=1)
image2.update("world")
for i in range(100):
pos = Vec2(random.randint(0, image2.width - 1), random.randint(0, image2.height - 1))
radius = random.randint(2,20)
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
fillcolor = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image2.drawCircleFilled(pos, radius, color, fillcolor, 1)
image2.update("world", fps=30)
###Output
_____no_output_____
###Markdown
1.4 Example: Display Recent Earthquakes on Mapdata from USGS: https://earthquake.usgs.gov/earthquakes/feed/v1.0/geojson.php
###Code
import requests
import json
from pyrt.renderer import RGBImage, loadimage
from pyrt.math import Vec2, Vec3
data = requests.get("https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_week.geojson")
with open("earthquakes.geojson","wb") as file:
file.write(data.content)
with open("earthquakes.geojson") as json_file:
data = json.load(json_file)
image3 = loadimage("data/worldmap/world600.jpg")
for element in data["features"]:
mag = element["properties"]["mag"]
coord = element["geometry"]["coordinates"]
x = int(image3.width*((coord[0] + 180) / 360))
y = int(image3.height*((coord[1] + 90) / 180))
if mag>4.5:
image3.drawCircleFilled(Vec2(x,y), int(mag), Vec3(0,0,0), Vec3(1,0,0), 1)
image3
###Output
_____no_output_____
###Markdown
1.5 Procedural Images
###Code
from numba import jit
import numpy as np
import math
from pyrt.renderer import RGBImage, loadimage
from pyrt.math import Vec2, Vec3
from pyrt.math import clamp3, cross3, step
from pyrt.math import SimplexNoise, TileableNoise
from pyrt.math import mod
w = 256
h = 256
image = RGBImage(w, h)
rgb = Vec3()
noise = SimplexNoise()
###Output
_____no_output_____
###Markdown
1.5.1 Stripes and Checkerboard
###Code
for x in range(w):
for y in range(h):
xx = x/w # range [0,1[
yy = y/h # range [0,1[
if mod(6.0*xx, 1.0) < 0.5: # if 6.0*xx % 1.0 < 0.5:
rgb[0] = rgb[1] = rgb[2] = 0
else:
rgb[0] = rgb[1] = rgb[2] = 255
image.data[y][x][0] = rgb[0]
image.data[y][x][1] = rgb[1]
image.data[y][x][2] = rgb[2]
image
for x in range(w):
for y in range(h):
xx = x/w # range [0,1[
yy = y/h # range [0,1[
if mod(8.0*yy, 1.0) < 0.5: # if 8.0*yy % 1.0 < 0.5:
rgb[0] = rgb[1] = rgb[2] = 0
else:
rgb[0] = rgb[1] = rgb[2] = 255
image.data[y][x][0] = rgb[0]
image.data[y][x][1] = rgb[1]
image.data[y][x][2] = rgb[2]
image
for x in range(w):
for y in range(h):
xx = x/w # range [0,1[
yy = y/h # range [0,1[
if (mod(4.0*xx, 1.0) < 0.5) ^ (mod(4.0*yy, 1.0) < 0.5):
rgb[0] = rgb[1] = rgb[2] = 0
else:
rgb[0] = rgb[1] = rgb[2] = 255
image.data[y][x][0] = rgb[0]
image.data[y][x][1] = rgb[1]
image.data[y][x][2] = rgb[2]
image
###Output
_____no_output_____
###Markdown
1.5.2 Mandelbrot
###Code
def CreateMandelbrotImage(w=256, h=256, maxiteration=200):
image = RGBImage(w, h)
rgb = Vec3()
for x in range(w):
for y in range(h):
xx = 2*(x/w-0.5)-0.5
yy = 2*(y/h-0.5)
xpos = 0.0
ypos = 0.0
iteration = 0.0
while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration):
xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy
iteration += 1.0
rgb[0] = (iteration % 20.0) / 20.0
rgb[1] = (iteration % 10.0) / 10.0
rgb[2] = (iteration % 20.0) / 20.0
image.data[y][x][0] = int(255*rgb[0])
image.data[y][x][1] = int(255*rgb[1])
image.data[y][x][2] = int(255*rgb[2])
return image
%%time
CreateMandelbrotImage(256,256,200)
###Output
_____no_output_____
###Markdown
Using Numba to speed up:Please note that pyrt functions can't be used with numba at this time, we create an external numpy array for the framebuffer.
###Code
@jit
def CreateMandelbrotImageNumba(w=256, h=256, maxiteration=200):
imagedata = np.zeros((w, h, 3), dtype=np.uint8)
rgb = np.zeros(3, dtype=np.float_)
for x in range(w):
for y in range(h):
xx = 2*(x/w-0.5)-0.5
yy = 2*(y/h-0.5)
xpos = 0.0
ypos = 0.0
iteration = 0.0
while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration):
xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy
iteration += 1.0
rgb[0] = (iteration % 20.0) / 20.0
rgb[1] = (iteration % 10.0) / 10.0
rgb[2] = (iteration % 20.0) / 20.0
imagedata[y][x][0] = int(255*rgb[0])
imagedata[y][x][1] = int(255*rgb[1])
imagedata[y][x][2] = int(255*rgb[2])
return imagedata
%%time
w = 256
h = 256
data = CreateMandelbrotImageNumba(w,h,200)
image = RGBImage(w, h, init_memory=False)
image.data = data
image
###Output
_____no_output_____
###Markdown
1.5.3 Perlin Noise
###Code
for x in range(w):
for y in range(h):
xx = x/w
yy = y/h
n = noise.noise2(10*xx,20*yy)
rgb[0] = 0
rgb[1] = n*n*256
rgb[2] = abs(n*256)
rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255))
image.data[y][x][0] = int(rgb[0])
image.data[y][x][1] = int(rgb[1])
image.data[y][x][2] = int(rgb[2])
image
###Output
_____no_output_____
###Markdown
Perlin Noise 3D
###Code
z=100
for x in range(w):
for y in range(h):
xx = x/w
yy = y/h
n = abs(noise.noise3(5*xx,5*yy,z))*256
if n<=50:
rgb[0] = 60
rgb[1] = 0
rgb[2] = 0
else:
rgb[0] = 0
rgb[1] = n
rgb[2] = n
rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255))
image.data[y][x][0] = int(rgb[0])
image.data[y][x][1] = int(rgb[1])
image.data[y][x][2] = int(rgb[2])
image
###Output
_____no_output_____
###Markdown
Cumulative Noise
###Code
for x in range(w):
for y in range(h):
xx = x/w
yy = y/h
n = abs(noise.noise2(3*xx*xx,2*yy*yy))*256
q = abs(noise.noise2(12*xx,12*yy))*256
rgb[0] = n
rgb[1] = q
rgb[2] = n
rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255))
image.data[y][x][0] = int(rgb[0])
image.data[y][x][1] = int(rgb[1])
image.data[y][x][2] = int(rgb[2])
image
###Output
_____no_output_____
###Markdown
Welcome to pyRT - The Python Raytracer https://github.com/martinchristen/pyRTThe goal of pyRT is teaching computer graphics.One part of pyrt is the virtual framebuffer where you can draw Pixels using standard algorithms such as Bresenham's line drawing algorithm.From Version 0.5.0 an additional goal is better Jupyter integration, this is now done in RGBImage.Dependencies:This notebook requires pyrt, pillow, numpy, and numba 1. Virtual Framebuffer for Pixel Operations
###Code
from pyrt.renderer import RGBImage
from pyrt.math import Vec2, Vec3
import random
###Output
_____no_output_____
###Markdown
1.2 Animated Virtual Framebuffer in Jupyter
###Code
w = 320
h = 240
image = RGBImage(w, h)
image.clear(Vec3(0.0,0.0,0.4))
for i in range(5000):
position = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image.drawPoint(position, color, 1)
image.framebuffer()
for i in range(100):
pos1 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
pos2 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image.drawLine(pos1, pos2, color, 2)
image.update(fps=30)
###Output
_____no_output_____
###Markdown
1.3 Loading Images
###Code
from pyrt.renderer import loadimage
image2 = loadimage("data/worldmap/world600.jpg")
image2.framebuffer("world")
image2.drawCircleFilled(Vec2(300,150), radius=10, color=Vec3(1,0,0), fillcolor=Vec3(1,1,0), size=1)
image2.update("world")
for i in range(100):
pos = Vec2(random.randint(0, image2.width - 1), random.randint(0, image2.height - 1))
radius = random.randint(2,20)
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
fillcolor = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image2.drawCircleFilled(pos, radius, color, fillcolor, 1)
image2.update("world", fps=30)
###Output
_____no_output_____
###Markdown
1.4 Example: Display Recent Earthquakes on Mapdata from USGS: https://earthquake.usgs.gov/earthquakes/feed/v1.0/geojson.php
###Code
import requests
import json
from pyrt.renderer import RGBImage, loadimage
from pyrt.math import Vec2, Vec3
data = requests.get("https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_week.geojson")
with open("earthquakes.geojson","wb") as file:
file.write(data.content)
with open("earthquakes.geojson") as json_file:
data = json.load(json_file)
image3 = loadimage("data/worldmap/world600.jpg")
for element in data["features"]:
mag = element["properties"]["mag"]
coord = element["geometry"]["coordinates"]
x = int(image3.width*((coord[0] + 180) / 360))
y = int(image3.height*((coord[1] + 90) / 180))
if mag>4.5:
image3.drawCircleFilled(Vec2(x,y), int(mag), Vec3(0,0,0), Vec3(1,0,0), 1)
image3
###Output
_____no_output_____
###Markdown
1.5 Procedural Images
###Code
from numba import jit
import numpy as np
import math
from pyrt.renderer import RGBImage, loadimage
from pyrt.math import Vec2, Vec3
from pyrt.math import clamp3, cross3, step
from pyrt.math import SimplexNoise, TileableNoise
from pyrt.math import mod
w = 256
h = 256
image = RGBImage(w, h)
rgb = Vec3()
noise = SimplexNoise()
###Output
_____no_output_____
###Markdown
1.5.1 Stripes and Checkerboard
###Code
for x in range(w):
for y in range(h):
xx = x/w # range [0,1[
yy = y/h # range [0,1[
if mod(6.0*xx, 1.0) < 0.5: # if 6.0*xx % 1.0 < 0.5:
rgb[0] = rgb[1] = rgb[2] = 0
else:
rgb[0] = rgb[1] = rgb[2] = 255
image.data[y][x][0] = rgb[0]
image.data[y][x][1] = rgb[1]
image.data[y][x][2] = rgb[2]
image
for x in range(w):
for y in range(h):
xx = x/w # range [0,1[
yy = y/h # range [0,1[
if mod(8.0*yy, 1.0) < 0.5: # if 8.0*yy % 1.0 < 0.5:
rgb[0] = rgb[1] = rgb[2] = 0
else:
rgb[0] = rgb[1] = rgb[2] = 255
image.data[y][x][0] = rgb[0]
image.data[y][x][1] = rgb[1]
image.data[y][x][2] = rgb[2]
image
for x in range(w):
for y in range(h):
xx = x/w # range [0,1[
yy = y/h # range [0,1[
if (mod(4.0*xx, 1.0) < 0.5) ^ (mod(4.0*yy, 1.0) < 0.5):
rgb[0] = rgb[1] = rgb[2] = 0
else:
rgb[0] = rgb[1] = rgb[2] = 255
image.data[y][x][0] = rgb[0]
image.data[y][x][1] = rgb[1]
image.data[y][x][2] = rgb[2]
image
###Output
_____no_output_____
###Markdown
1.5.2 Mandelbrot
###Code
def CreateMandelbrotImage(w=256, h=256, maxiteration=200):
image = RGBImage(w, h)
rgb = Vec3()
for x in range(w):
for y in range(h):
xx = 2*(x/w-0.5)-0.5
yy = 2*(y/h-0.5)
xpos = 0.0
ypos = 0.0
iteration = 0.0
while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration):
xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy
iteration += 1.0
rgb[0] = (iteration % 20.0) / 20.0
rgb[1] = (iteration % 10.0) / 10.0
rgb[2] = (iteration % 20.0) / 20.0
image.data[y][x][0] = int(255*rgb[0])
image.data[y][x][1] = int(255*rgb[1])
image.data[y][x][2] = int(255*rgb[2])
return image
%%time
CreateMandelbrotImage(256,256,200)
###Output
_____no_output_____
###Markdown
Using Numba to speed up:Please note that pyrt functions can't be used with numba at this time, we create an external numpy array for the framebuffer.
###Code
@jit
def CreateMandelbrotImageNumba(w=256, h=256, maxiteration=200):
imagedata = np.zeros((w, h, 3), dtype=np.uint8)
rgb = np.zeros(3, dtype=np.float_)
for x in range(w):
for y in range(h):
xx = 2*(x/w-0.5)-0.5
yy = 2*(y/h-0.5)
xpos = 0.0
ypos = 0.0
iteration = 0.0
while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration):
xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy
iteration += 1.0
rgb[0] = (iteration % 20.0) / 20.0
rgb[1] = (iteration % 10.0) / 10.0
rgb[2] = (iteration % 20.0) / 20.0
imagedata[y][x][0] = int(255*rgb[0])
imagedata[y][x][1] = int(255*rgb[1])
imagedata[y][x][2] = int(255*rgb[2])
return imagedata
%%time
w = 256
h = 256
data = CreateMandelbrotImageNumba(w,h,200)
image = RGBImage(w, h, init_memory=False)
image.data = data
image
###Output
_____no_output_____
###Markdown
1.5.3 Perlin Noise
###Code
for x in range(w):
for y in range(h):
xx = x/w
yy = y/h
n = noise.noise2(10*xx,20*yy)
rgb[0] = 0
rgb[1] = n*n*256
rgb[2] = abs(n*256)
rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255))
image.data[y][x][0] = int(rgb[0])
image.data[y][x][1] = int(rgb[1])
image.data[y][x][2] = int(rgb[2])
image
###Output
_____no_output_____
###Markdown
Perlin Noise 3D
###Code
z=100
for x in range(w):
for y in range(h):
xx = x/w
yy = y/h
n = abs(noise.noise3(5*xx,5*yy,z))*256
if n<=50:
rgb[0] = 60
rgb[1] = 0
rgb[2] = 0
else:
rgb[0] = 0
rgb[1] = n
rgb[2] = n
rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255))
image.data[y][x][0] = int(rgb[0])
image.data[y][x][1] = int(rgb[1])
image.data[y][x][2] = int(rgb[2])
image
###Output
_____no_output_____
###Markdown
Cumulative Noise
###Code
for x in range(w):
for y in range(h):
xx = x/w
yy = y/h
n = abs(noise.noise2(3*xx*xx,2*yy*yy))*256
q = abs(noise.noise2(12*xx,12*yy))*256
rgb[0] = n
rgb[1] = q
rgb[2] = n
rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255))
image.data[y][x][0] = int(rgb[0])
image.data[y][x][1] = int(rgb[1])
image.data[y][x][2] = int(rgb[2])
image
###Output
_____no_output_____ |
Getting_information_from_stock.ipynb | ###Markdown
###Code
#Getting information from stock
import pandas as pd
import numpy as np
import yfinance as yf
import datetime as dt
from pandas_datareader import data as pdr
yf.pdr_override()
stock=input('Enter a stock ticker symbol: ')
print(stock)
startyear=2021
startmonth=1
startday=1
start=dt.datetime(startyear, startmonth, startday)
now=dt.datetime.now()
df=pdr.get_data_yahoo(stock, start, now)
print(df)
pip install yfinance
###Output
_____no_output_____ |
VGG16_in_Keras.ipynb | ###Markdown
###Code
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O cats_and_dogs_filtered.zip
! unzip cats_and_dogs_filtered.zip
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="cats_and_dogs_filtered/train",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="cats_and_dogs_filtered/validation", target_size=(224,224))
model = Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Flatten())
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=2, activation="softmax"))
from tensorflow.keras.optimizers import Adam # - Works
from keras.optimizers import adam_v2
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
opt = adam_v2.Adam(learning_rate=0.001 )
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['acc', 'mse'])
model.summary()
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')
hist = model.fit_generator(steps_per_epoch=10,generator=traindata, validation_data= testdata, validation_steps=10,epochs=10,callbacks=[checkpoint,early])
import matplotlib.pyplot as plt
plt.plot(hist.history["acc"])
plt.plot(hist.history['val_acc'])
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"])
plt.show()
from keras.preprocessing import image
img = image.load_img("/content/cats_and_dogs_filtered/validation/dogs/dog.2000.jpg",target_size=(224,224))
img = np.asarray(img)
plt.imshow(img)
img = np.expand_dims(img, axis=0)
from keras.models import load_model
saved_model = load_model("vgg16_1.h5")
output = saved_model.predict(img)
if output[0][0] > output[0][1]:
print("cat")
else:
print('dog')
###Output
_____no_output_____ |
cnn_tpu.ipynb | ###Markdown
Pytorch hands-on (CNN on TPU)Adapted from [here](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html)
###Code
!rm -r ./log
%tensorflow_version 2.x
%load_ext tensorboard
%tensorboard --logdir ./log
import os
assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator'
os.environ["XLA_USE_BF16"] = "1"
###Output
_____no_output_____
###Markdown
Installing Pytorch/XLA
###Code
VERSION = "20200325" #@param ["1.5" , "20200325", "nightly"]
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
!python pytorch-xla-env-setup.py --version $VERSION
from time import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torchvision.transforms as transforms
import torch_xla
import torch_xla.core.xla_model as xm
###Output
_____no_output_____
###Markdown
Load image data
###Code
def get_data(batch_size: int=64):
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return trainloader, testloader, classes
###Output
_____no_output_____
###Markdown
CNN model
###Code
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5).bfloat16()
self.pool = nn.MaxPool2d(2, 2).bfloat16()
self.conv2 = nn.Conv2d(6, 16, 5).bfloat16()
self.fc1 = nn.Linear(16 * 5 * 5, 120).bfloat16()
self.fc2 = nn.Linear(120, 84).bfloat16()
self.fc3 = nn.Linear(84, 10).bfloat16()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
###Output
_____no_output_____
###Markdown
Define functions Training
###Code
def train_tpu(model: nn.Module, trainloader, log_dir: str, device):
model.to(device)
loss = nn.CrossEntropyLoss()
opt = optim.Adam(model.parameters(), lr=0.001)
writer = SummaryWriter(log_dir)
running_loss = 0.0
prev_time = time()
n_minibatches = 0
for epoch in range(4):
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs = data[0].to(device)
labels = data[1].to(device)
# zero the parameter gradients
opt.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss_value = loss(outputs, labels)
loss_value.backward()
# opt.step() # For CPU/GPU
xm.optimizer_step(opt, barrier=True) # Note: Cloud TPU-specific code!
writer.add_scalar("loss_value", loss_value, n_minibatches)
n_minibatches += 1
# print statistics
running_loss += loss_value.item()
if i % 100 == 99: # print every 100 mini-batches
print('[{}, {:5d}] loss: {:.3f}, elapsed time: {:.1f} [sec]'.format(
epoch + 1, i + 1, running_loss / 2000, time() - prev_time))
running_loss = 0.0
prev_time = time()
###Output
_____no_output_____
###Markdown
Prediction
###Code
def evaluate(model: nn.Module, testloader, device):
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
inputs = data[0].to(device)
labels = data[1].to(device)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
###Output
_____no_output_____
###Markdown
Training and evaluation of the model on TPU
###Code
trainloader, testloader, classes = get_data()
model = Net()
dev = xm.xla_device()
train_tpu(model, trainloader, "./log/2", dev)
evaluate(model, testloader, dev)
model.to("cpu")
torch.save({
"model": model.state_dict(),
}, "./model_tpu.pt")
###Output
_____no_output_____
###Markdown
Load trained model
###Code
trainloader, testloader, classes = get_data()
model = Net()
dev = xm.xla_device()
checkpoint = torch.load("./model_tpu.pt")
model.load_state_dict(checkpoint["model"])
model.to(dev)
evaluate(model, testloader, dev)
###Output
_____no_output_____ |
notebooks/Logistic_Regression.ipynb | ###Markdown
Logistic Regression
###Code
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
import warnings
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import seaborn as sn
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.feature_selection import RFE
warnings.filterwarnings('ignore')
results = pd.DataFrame(columns=["data", "regularization", "package", "accuracy", "precision", "recall", "f1", "rSquared", "AUC"]) # store results
hcc_median = pd.read_csv('../data/raw/median.csv')
hcc_mean = pd.read_csv('../data/raw/mean.csv')
hcc_mode = pd.read_csv('../data/raw/mode.csv')
hcc_iterative = pd.read_csv('../data/raw/iterative.csv')
def get_data(data_name):
if data_name == 'median':
data = hcc_median
elif data_name == 'mean':
data = hcc_mean
elif data_name == 'mode':
data = hcc_mode
else:
data = hcc_iterative
X = data.drop(['Class'], axis=1) # get independent variable
y = data['Class'] # get dependent variable
# split data 70% to 30%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
return X_train, X_test, y_train, y_test
###Output
_____no_output_____
###Markdown
Feature Selection using Recursive Feature Elimination(RFE)
###Code
def feature_selection(X_train, X_test, y_train):
model = LogisticRegression()
#rfe = RFECV(estimator=model, step=1, cv=7)
rfe = RFE(estimator=model, n_features_to_select = 35, step=1)
rfe = rfe.fit(X_train, y_train)
columns = X_train.columns[rfe.support_]
X_train = rfe.transform(X_train)
X_test = rfe.transform(X_test)
X_train = pd.DataFrame(X_train, columns = columns)
X_test = pd.DataFrame(X_test, columns = columns)
return X_train, X_test, y_train
### R^2 for SkLearn
def full_log_likelihood(w, X, y):
score = np.dot(X, w).reshape(1, X.shape[0])
return np.sum(-np.log(1 + np.exp(score))) + np.sum(y * score)
def null_log_likelihood(w, X, y):
z = np.array([w if i == 0 else 0.0 for i, w in enumerate(w.reshape(1, X.shape[1])[0])]).reshape(X.shape[1], 1)
score = np.dot(X, z).reshape(1, X.shape[0])
return np.sum(-np.log(1 + np.exp(score))) + np.sum(y * score)
def mcfadden_rsquare(w, X, y):
return 1.0 - (full_log_likelihood(w, X, y) / null_log_likelihood(w, X, y))
def mcfadden_adjusted_rsquare(w, X, y):
k = float(X.shape[1])
return 1.0 - ((full_log_likelihood(w, X, y) - k) / null_log_likelihood(w, X, y))
###Output
_____no_output_____
###Markdown
Using StatsModels
###Code
data_list = ['mean', 'mode', 'median', 'iterative']
for data in data_list:
X_train, X_test, y_train, y_test = get_data(data)
X_train, X_test, y_train = feature_selection(X_train, X_test, y_train)
print('\n')
print(data.upper(), ' IMPUTED DATASET')
## run logistic regression using stat models
logistic_sm = sm.Logit(y_train.values.reshape(-1,1), X_train).fit()
print(logistic_sm.summary())
y_pred = logistic_sm.predict(X_test)
y_pred = (y_pred >= 0.5).astype(int).to_numpy()
print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred)))
print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred)))
print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred)))
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
sn.heatmap(confusion_matrix, annot=True)
plt.show()
## save data for comparison
results = results.append(pd.DataFrame([{"data" : data,
"regularization": "default",
"package": "StatsModels",
"accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2),
"precision": np.round(metrics.precision_score(y_test, y_pred), 2),
"recall": np.round(metrics.recall_score(y_test, y_pred), 2),
"f1": np.round(metrics.f1_score(y_test, y_pred), 2),
"rSquared": np.round(logistic_sm.prsquared, 2),
"AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True)
###Output
MEAN IMPUTED DATASET
Optimization terminated successfully.
Current function value: 0.228057
Iterations 10
Logit Regression Results
==============================================================================
Dep. Variable: y No. Observations: 115
Model: Logit Df Residuals: 80
Method: MLE Df Model: 34
Date: Mon, 26 Oct 2020 Pseudo R-squ.: 0.6525
Time: 13:39:36 Log-Likelihood: -26.227
converged: True LL-Null: -75.482
Covariance Type: nonrobust LLR p-value: 3.406e-08
==================================================================================================
coef std err z P>|z| [0.025 0.975]
--------------------------------------------------------------------------------------------------
Gender 3.6851 2.226 1.655 0.098 -0.678 8.048
Symptoms -2.9455 1.445 -2.039 0.041 -5.777 -0.114
Alcohol 2.6879 1.788 1.504 0.133 -0.816 6.192
Hepatitis B Surface Antigen -1.2358 2.347 -0.527 0.598 -5.836 3.364
Hepatitis B Core Antibody 1.9883 1.847 1.077 0.282 -1.632 5.608
Hepatitis C Virus Antibody -1.6356 1.607 -1.018 0.309 -4.786 1.514
Cirrhosis -4.3147 2.956 -1.460 0.144 -10.107 1.478
Endemic Countries 9.2102 7.797 1.181 0.238 -6.072 24.492
Smoking -0.8083 1.822 -0.444 0.657 -4.380 2.763
Diabetes -4.4885 1.780 -2.521 0.012 -7.978 -0.999
Obesity 3.5196 2.656 1.325 0.185 -1.686 8.726
Arterial Hypertension 5.1655 2.149 2.404 0.016 0.954 9.377
Chronic Renal Insufficiency -1.3153 1.600 -0.822 0.411 -4.451 1.821
Nonalcoholic Steatohepatitis 2.2509 3.333 0.675 0.499 -4.281 8.783
Esophageal Varices 3.0143 2.482 1.215 0.224 -1.849 7.878
Splenomegaly -0.3097 1.416 -0.219 0.827 -3.085 2.466
Portal Vein Thrombosis -0.2625 1.267 -0.207 0.836 -2.745 2.220
Liver Metastasis -0.8768 1.342 -0.653 0.514 -3.507 1.754
Age at diagnosis -0.1460 0.073 -1.995 0.046 -0.289 -0.003
Performance Status -0.0389 0.567 -0.069 0.945 -1.150 1.073
Encefalopathy degree 0.4672 1.609 0.290 0.772 -2.687 3.621
Ascites degree -2.7644 1.396 -1.980 0.048 -5.501 -0.027
International Normalised Ratio -5.3986 2.570 -2.100 0.036 -10.437 -0.361
Haemoglobin 0.0915 0.227 0.404 0.686 -0.353 0.536
Mean Corpuscular Volume 0.1796 0.087 2.059 0.040 0.009 0.351
Albumin 1.0345 1.028 1.006 0.314 -0.981 3.050
Total Bilirubin 0.3828 0.463 0.827 0.408 -0.524 1.290
Alanine transaminase 0.0488 0.022 2.205 0.027 0.005 0.092
Aspartate transaminase -0.0446 0.018 -2.513 0.012 -0.079 -0.010
Total Proteins -0.0206 0.084 -0.245 0.806 -0.185 0.144
Creatinine 0.8390 0.734 1.142 0.253 -0.601 2.279
Major dimension of nodule -0.1951 0.131 -1.493 0.136 -0.451 0.061
Direct Bilirubin -0.6758 0.684 -0.989 0.323 -2.016 0.664
Iron 0.0775 0.039 2.004 0.045 0.002 0.153
Oxygen Saturation -0.1447 0.073 -1.970 0.049 -0.289 -0.001
==================================================================================================
Possibly complete quasi-separation: A fraction 0.18 of observations can be
perfectly predicted. This might indicate that there is complete
quasi-separation. In this case some parameters will not be identified.
Accuracy (mean): 0.72
Precision (mean): 0.78
Recall (mean): 0.72
###Markdown
Using ScikitLearn
###Code
data_list = ['mean', 'mode', 'median', 'iterative']
for data in data_list:
X_train, X_test, y_train, y_test = get_data(data)
X_train, X_test, y_train = feature_selection(X_train, X_test, y_train)
print('\n')
print(data.upper(), ' IMPUTED DATASET')
## run logistic regression using sklearn
logistic = LogisticRegression(fit_intercept=False)
logistic = logistic.fit(X_train,y_train)
y_pred = logistic.predict_proba(X_test)[::, 1]
y_pred = (y_pred >= 0.5).astype(int)
w = np.array(logistic.coef_).transpose()
# printing
values = np.append(logistic.intercept_, logistic.coef_)
# get the names of the values
names = np.append('intercept', X_train.columns)
table_ = pd.DataFrame(values, index = names, columns=['coef'])
table_['exp_coef'] = np.exp(table_['coef'])
print(table_)
print('\n')
print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred)))
print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred)))
print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred)))
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
sn.heatmap(confusion_matrix, annot=True)
plt.show()
y_pred_proba = logistic.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="data, auc="+str(auc))
plt.legend(loc=4)
plt.show()
## save data for comparison
results = results.append(pd.DataFrame([{"data" : data,
"regularization": "default",
"package": "SkLearn",
"accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2),
"precision": np.round(metrics.precision_score(y_test, y_pred), 2),
"recall": np.round(metrics.recall_score(y_test, y_pred), 2),
"f1": np.round(metrics.f1_score(y_test, y_pred), 2),
"rSquared": np.round(mcfadden_rsquare(w, X_test, y_pred), 2),
"AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True)
###Output
MEAN IMPUTED DATASET
coef exp_coef
intercept 0.000000 1.000000
Gender 0.431590 1.539703
Symptoms -0.586055 0.556518
Alcohol 0.385811 1.470806
Hepatitis B Surface Antigen 0.516934 1.676879
Hepatitis B Core Antibody 0.204852 1.227344
Hepatitis C Virus Antibody -0.481773 0.617687
Cirrhosis -0.086655 0.916994
Endemic Countries 0.204813 1.227295
Smoking 0.385643 1.470559
Diabetes -0.550968 0.576392
Obesity 0.169974 1.185274
Arterial Hypertension 0.807599 2.242516
Chronic Renal Insufficiency -0.131133 0.877101
Nonalcoholic Steatohepatitis 0.063077 1.065108
Esophageal Varices -0.150364 0.860395
Splenomegaly -0.071636 0.930870
Portal Vein Thrombosis -0.307422 0.735340
Liver Metastasis -0.309711 0.733659
Age at diagnosis -0.027569 0.972807
Performance Status -0.507842 0.601793
Encefalopathy degree -0.263032 0.768717
Ascites degree -0.560014 0.571201
International Normalised Ratio -0.841142 0.431218
Haemoglobin 0.061740 1.063686
Mean Corpuscular Volume 0.049255 1.050488
Albumin 0.169465 1.184671
Total Bilirubin 0.136933 1.146751
Alanine transaminase 0.018462 1.018633
Aspartate transaminase -0.015600 0.984521
Total Proteins -0.029252 0.971172
Creatinine -0.164660 0.848182
Major dimension of nodule -0.162438 0.850069
Direct Bilirubin -0.376935 0.685960
Iron 0.045785 1.046849
Oxygen Saturation -0.067500 0.934728
Accuracy (mean): 0.74
Precision (mean): 0.75
Recall (mean): 0.83
###Markdown
Regularizations Using StatsModels
###Code
data_list = ['mean', 'mode', 'median', 'iterative']
for data in data_list:
X_train, X_test, y_train, y_test = get_data(data)
X_train, X_test, y_train = feature_selection(X_train, X_test, y_train)
for i in [0, 1]:
print('\n')
print(data.upper(), ' IMPUTED DATASET using ', 'Lasso' if i == 1 else 'Ridge')
## run logistic regression using stat models
logistic_sm = sm.Logit(y_train.values.reshape(-1,1), X_train).fit_regularized(L1_wt = i) # if L1_wt = 1, Lasso: 0, Ridge
print(logistic_sm.summary())
y_pred = logistic_sm.predict(X_test)
y_pred = (y_pred >= 0.5).astype(int).to_numpy()
print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred)))
print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred)))
print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred)))
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
sn.heatmap(confusion_matrix, annot=True)
plt.show()
## save data for comparison
results = results.append(pd.DataFrame([{"data" : data,
"regularization": 'Lasso' if i == 1 else 'Ridge',
"package": "StatsModels",
"accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2),
"precision": np.round(metrics.precision_score(y_test, y_pred), 2),
"recall": np.round(metrics.recall_score(y_test, y_pred), 2),
"f1": np.round(metrics.f1_score(y_test, y_pred), 2),
"rSquared": np.round(logistic_sm.prsquared, 2),
"AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True)
###Output
MEAN IMPUTED DATASET using Ridge
Optimization terminated successfully. (Exit mode 0)
Current function value: 0.2280574133501607
Iterations: 210
Function evaluations: 232
Gradient evaluations: 210
Logit Regression Results
==============================================================================
Dep. Variable: y No. Observations: 115
Model: Logit Df Residuals: 80
Method: MLE Df Model: 34
Date: Mon, 26 Oct 2020 Pseudo R-squ.: 0.6525
Time: 13:39:41 Log-Likelihood: -26.227
converged: True LL-Null: -75.482
Covariance Type: nonrobust LLR p-value: 3.406e-08
==================================================================================================
coef std err z P>|z| [0.025 0.975]
--------------------------------------------------------------------------------------------------
Gender 3.6844 2.226 1.655 0.098 -0.678 8.047
Symptoms -2.9454 1.445 -2.039 0.041 -5.777 -0.114
Alcohol 2.6880 1.788 1.504 0.133 -0.816 6.192
Hepatitis B Surface Antigen -1.2357 2.347 -0.527 0.599 -5.836 3.364
Hepatitis B Core Antibody 1.9882 1.847 1.076 0.282 -1.632 5.608
Hepatitis C Virus Antibody -1.6360 1.607 -1.018 0.309 -4.786 1.514
Cirrhosis -4.3147 2.955 -1.460 0.144 -10.107 1.478
Endemic Countries 9.2101 7.797 1.181 0.238 -6.072 24.492
Smoking -0.8079 1.822 -0.443 0.658 -4.380 2.764
Diabetes -4.4881 1.780 -2.521 0.012 -7.977 -0.999
Obesity 3.5197 2.656 1.325 0.185 -1.686 8.726
Arterial Hypertension 5.1649 2.149 2.404 0.016 0.953 9.376
Chronic Renal Insufficiency -1.3154 1.600 -0.822 0.411 -4.451 1.821
Nonalcoholic Steatohepatitis 2.2512 3.333 0.676 0.499 -4.280 8.783
Esophageal Varices 3.0142 2.482 1.215 0.224 -1.849 7.878
Splenomegaly -0.3094 1.416 -0.218 0.827 -3.085 2.466
Portal Vein Thrombosis -0.2630 1.266 -0.208 0.836 -2.745 2.219
Liver Metastasis -0.8767 1.342 -0.653 0.514 -3.507 1.754
Age at diagnosis -0.1460 0.073 -1.995 0.046 -0.289 -0.003
Performance Status -0.0389 0.567 -0.069 0.945 -1.150 1.073
Encefalopathy degree 0.4670 1.609 0.290 0.772 -2.687 3.621
Ascites degree -2.7641 1.396 -1.980 0.048 -5.501 -0.027
International Normalised Ratio -5.3983 2.570 -2.100 0.036 -10.436 -0.360
Haemoglobin 0.0915 0.227 0.404 0.686 -0.353 0.536
Mean Corpuscular Volume 0.1795 0.087 2.058 0.040 0.009 0.351
Albumin 1.0346 1.028 1.006 0.314 -0.981 3.050
Total Bilirubin 0.3827 0.463 0.827 0.408 -0.524 1.290
Alanine transaminase 0.0488 0.022 2.205 0.027 0.005 0.092
Aspartate transaminase -0.0446 0.018 -2.513 0.012 -0.079 -0.010
Total Proteins -0.0206 0.084 -0.245 0.806 -0.185 0.144
Creatinine 0.8390 0.734 1.142 0.253 -0.601 2.278
Major dimension of nodule -0.1951 0.131 -1.493 0.136 -0.451 0.061
Direct Bilirubin -0.6757 0.684 -0.989 0.323 -2.015 0.664
Iron 0.0775 0.039 2.004 0.045 0.002 0.153
Oxygen Saturation -0.1447 0.073 -1.970 0.049 -0.289 -0.001
==================================================================================================
Possibly complete quasi-separation: A fraction 0.18 of observations can be
perfectly predicted. This might indicate that there is complete
quasi-separation. In this case some parameters will not be identified.
Accuracy (mean): 0.72
Precision (mean): 0.78
Recall (mean): 0.72
###Markdown
Using SkLearn
###Code
data_list = ['mean', 'mode', 'median', 'iterative']
for data in data_list:
X_train, X_test, y_train, y_test = get_data(data)
X_train, X_test, y_train = feature_selection(X_train, X_test, y_train)
penalties = ['l1', 'l2', 'elasticnet']
for penalty in penalties:
if penalty == 'l1':
solver = 'liblinear'
name = 'Lasso'
l1_ratio = None
multi_class = 'auto'
elif penalty == 'l2':
solver = 'lbfgs'
name = 'Ridge'
l1_ratio = None
multi_class = 'auto'
elif penalty == 'elasticnet':
solver='saga'
name = 'ElasticNet'
l1_ratio = 0.5
multi_class = 'ovr'
print('\n')
print(data.upper(), ' IMPUTED DATASET using ', name)
## run logistic regression using sklearn
logistic = LogisticRegression(fit_intercept=False, penalty=penalty, solver=solver, multi_class=multi_class, l1_ratio = l1_ratio)
logistic = logistic.fit(X_train,y_train)
y_pred = logistic.predict_proba(X_test)[::, 1]
y_pred = (y_pred >= 0.5).astype(int)
w = np.array(logistic.coef_).transpose()
# printing
values = np.append(logistic.intercept_, logistic.coef_)
# get the names of the values
names = np.append('intercept', X_train.columns)
table_ = pd.DataFrame(values, index = names, columns=['coef'])
table_['exp_coef'] = np.exp(table_['coef'])
print(table_)
print('\n')
print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred)))
print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred)))
print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred)))
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
sn.heatmap(confusion_matrix, annot=True)
plt.show()
y_pred_proba = logistic.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="data, auc="+str(auc))
plt.legend(loc=4)
plt.show()
## save data for comparison
results = results.append(pd.DataFrame([{"data" : data,
"regularization": name,
"package": "SkLearn",
"accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2),
"precision": np.round(metrics.precision_score(y_test, y_pred), 2),
"recall": np.round(metrics.recall_score(y_test, y_pred), 2),
"f1": np.round(metrics.f1_score(y_test, y_pred), 2),
"rSquared": np.round(mcfadden_rsquare(w, X_test, y_pred), 2),
"AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True)
results_sklearn = results[results.package == 'SkLearn']
final_sklearn = results_sklearn.pivot(index=['data', 'regularization'], columns="package", values=['accuracy', 'precision', 'recall', 'f1', 'rSquared', 'AUC'])
final_sklearn.columns = final_sklearn.columns.swaplevel(0,1)
final_sklearn
results_statsmodels = results[results.package == 'StatsModels']
final_statsmodels = results_statsmodels.pivot(index=['data', 'regularization'], columns="package", values=['accuracy', 'precision', 'recall', 'f1', 'rSquared', 'AUC'])
final_statsmodels.columns = final_statsmodels.columns.swaplevel(0,1)
final_statsmodels
###Output
_____no_output_____
###Markdown
Обучение базовой модели логистической регрессии
###Code
import pandas as pd
import numpy as np
import os
import pickle
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
from google.colab import drive
drive.mount('/content/drive')
os.chdir('/content/drive/Shared drives/Кредитные риски')
from CreditRisks.metrics_library.Metrics import *
from CreditRisks.metrics_library.rosstat_utils import *
DIR_OUT='ReadyModels/SGD_final/'
###Output
_____no_output_____
###Markdown
Считывание данных
###Code
df = pd.read_csv('Датасеты/revision_003/companies_ready_train.csv', dtype=RESULT_DTYPES)
y_train = df['target']
X_train = df.drop(columns=['inn', 'year_-1', 'year_0', 'target'])
df.head()
df1 = pd.read_csv('Датасеты/revision_003/companies_ready_test.csv', dtype=RESULT_DTYPES)
y_test = df1['target']
X_test = df1.drop(columns=['inn', 'year_-1', 'year_0', 'target'])
df1.head()
###Output
_____no_output_____
###Markdown
Предобработка данных Добавление новых признаков
###Code
methodCols = ['financialDebt', 'CreditLeverage', 'FinancialIndependence', 'DebtBurden', 'CoverageDebtWithAccumulatedProfit',
'ReturnAssetsNetProfit', 'ReturnAssetsOperatingProfit', 'OperatingMargin', 'NetProfitMargin',
'LiabilityCoverageOperatingProfit', 'OperatingProfitFinancialDebtRatio', 'FinancialDebtRevenueRatio',
'CurrentLiquidity', 'QuickLiquidity', 'InstantLiquidity', 'LevelOfOperatingAssets', 'turnoverDebtorDebt',
'turnoverReserves', 'turnoverCreditDebt', 'FinancialCycle', 'AssetTurnover']
def addFeatures(X:pd.Series)->pd.Series:
X['financialDebt'] = X['year_0_15003'] + X['year_0_14003'] + X['year_0_12503']
financialDebt = X['financialDebt']
X['CreditLeverage'] = X['year_0_13003'] / X['year_0_15003']
X['FinancialIndependence'] = X['year_0_13003'] / X['year_0_16003']
X['DebtBurden'] = financialDebt / X['year_0_16003']
X['CoverageDebtWithAccumulatedProfit'] = X['year_0_13003'] / financialDebt
X['ReturnAssetsNetProfit'] = X['year_0_24003'] / X['year_0_16003']
X['ReturnAssetsOperatingProfit'] = X['year_0_22003'] / X['year_0_16003']
X['OperatingMargin'] = X['year_0_22003'] / pd.concat([X['year_0_21103'], financialDebt], axis=1).max(axis=1)
X['NetProfitMargin'] = X['year_0_24003'] / pd.concat([X['year_0_21103'], financialDebt], axis=1).max(axis=1) # impotant
X['LiabilityCoverageOperatingProfit'] = X['year_0_22003'] / (X['year_0_14003'] + X['year_0_15003'])
X['OperatingProfitFinancialDebtRatio'] = X['year_0_22003'] / financialDebt
X['FinancialDebtRevenueRatio'] = financialDebt / X['year_0_21103'] # impotant
X['CurrentLiquidity'] = X['year_0_12003'] / X['year_0_15003']
X['QuickLiquidity'] = (X['year_0_12003'] - X['year_0_12103']) / X['year_0_15003']
X['InstantLiquidity'] = X['year_0_12503'] / X['year_0_15003'] # impotant
X['LevelOfOperatingAssets'] = (X['year_0_12103'] + X['year_0_12303'] - X['year_0_15203']) / X['year_0_21103']
X['turnoverDebtorDebt'] = 365 * (X['year_0_12303'] + X['year_0_12304']) / (2 * X['year_0_21103'])
X['turnoverReserves'] = 365 * (X['year_0_12103'] + X['year_0_12104']) / (2 * X['year_0_21103'])
X['turnoverCreditDebt'] = 365 * (X['year_0_15203'] + X['year_0_15204']) / (2 * X['year_0_21103'])
X['FinancialCycle'] = X['turnoverDebtorDebt'] + X['turnoverReserves'] - X['turnoverCreditDebt']
X['AssetTurnover'] = X['year_0_21103'] / X['year_0_16003']
for col in methodCols:
m = X.loc[X[col] != np.inf, col].max()
X[col].replace(np.inf,m,inplace=True)
_m = X.loc[X[col] != -np.inf, col].min()
X[col].replace(-np.inf,_m,inplace=True)
X[col].replace(np.nan,0,inplace=True)
return X
X_train = addFeatures(X_train)
X_test = addFeatures(X_test)
###Output
_____no_output_____
###Markdown
Стандартизация, винзоризация и выброс категориальных признаков
###Code
class Winsorizator():
def __init__(self, left, right):
assert 0 <= left <= 1 and 0 <= right <= 1
self.left = left
self.right = right
self.data = None
def fit(self, X:pd.DataFrame):
self.data = X.quantile([self.left, self.right], axis=0)
def transform(self, X:pd.DataFrame):
X.clip(self.data.iloc[0], self.data.iloc[1], axis='columns', inplace=True)
def fit_transform(self, X):
self.fit(X)
return self.transform(X)
categorical_all = ['region','year_-1_okopf', 'year_-1_okfs', 'year_-1_okved', 'year_-1_type',
'year_0_okopf', 'year_0_okfs', 'year_0_okved', 'year_0_type']
sc = StandardScaler()
winz = Winsorizator(0.3, 0.7)
X_train_1 = X_train.drop(columns=categorical_all).astype(np.float32)
winz.fit_transform(X_train_1)
X_train_1 = pd.DataFrame(sc.fit_transform(X_train_1), columns=X_train_1.columns, index=X_train_1.index)
X_test_1 = X_test.drop(columns=categorical_all).astype(np.float32)
winz.transform(X_test_1)
X_test_1 = pd.DataFrame(sc.transform(X_test_1), columns=X_test_1.columns, index=X_test_1.index)
with open(f'{DIR_OUT}StandardScaler.pkl','wb') as f:
pickle.dump(sc, f)
with open(f'{DIR_OUT}Winsorizator.pkl','wb') as f:
pickle.dump(winz, f)
###Output
_____no_output_____
###Markdown
Обучение
###Code
lr = SGDClassifier(random_state=42, loss='log', verbose=1, max_iter=1000).fit(X_train_1, y_train)
with open(f'{DIR_OUT}SGD_model.pkl','wb') as f:
pickle.dump(lr, f)
###Output
_____no_output_____
###Markdown
Результаты
###Code
predict = lr.predict_proba(X_test_1)[:,1]
import CreditRisks.metrics_library.profits as metric
plt_roc(y_test, predict)
plt_pr(y_test, predict)
metric.plt_profit_2_experimental(y_test, predict, percent_space=[0.10, 0.15, 0.20, 0.25, 0.35], title='Алгоритм SGD')
metric.plt_popularity(predict, title='Алгоритм SGD')
###Output
_____no_output_____
###Markdown
My version
###Code
# First, get the regression coefficents
coeffs = []
n = X_train.shape[0]
for i in range(X_train.shape[1]):
p = n*(X_train[:, i]**2).sum() - X_train[:, i].sum()**2
a = ( n*(X_train[:, i]*y_train).sum() - X_train[:, i].sum()*y_train.sum() ) / p
b = ( y_train.sum()*(X_train[:, i]**2).sum() - X_train[:, i].sum()*(X_train[:, i]*y_train).sum() ) / p
coeffs.append([a, b])
def predict(X_test):
ret = 1 / ( 1 + np.exp(sum(-a*X_test[:, i] - b for i in range(X_test.shape[1]))) )
ret[ret > .5] = 1
ret[ret <= .5] = 0
return ret
###Output
_____no_output_____ |
ModelTrain_Data.ipynb | ###Markdown
Importing relevant packages for data processing.
###Code
import numpy as np
from random import randint
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
train_labels = []
train_samples = []
###Output
_____no_output_____
###Markdown
Data creationCreating our own example data set. *As motivation for this data, let's assume that an experimental drug was tested on individuals ranging from age 13 to 100 in a clinical trial. The trial had 2100 partcipants. Half of the participants were under 65 years old, and the other half was 65 years of age or older.* GoalUltimately, we want to build a model to tell us whether or not a patient will experience side effects solely based on the patient's age.
###Code
for i in range(50):
# The ~5% of younger individuals who experienced side effects
random_younger = randint(13,64)
train_samples.append(random_younger)
train_labels.append(1)
# The ~5% of older individuals who did not experience side effects
random_older = randint(65,100)
train_samples.append(random_older)
train_labels.append(0)
for i in range(1000):
# The 95% of younger individuals who did not experience side effects.
random_younger = randint(13,64)
train_samples.append(random_younger)
train_labels.append(0)
# The 95% of older individuals who did experience side effects.
random_older = randint(65,100)
train_samples.append(random_older)
train_labels.append(1)
###Output
_____no_output_____
###Markdown
Data processingNow we convert both lists into numpy arrays.
###Code
train_labels = np.array(train_labels)
train_samples = np.array(train_samples)
train_labels, train_samples = shuffle(train_labels, train_samples)
###Output
_____no_output_____
###Markdown
**We'll use scikit-learn's MinMaxScaler class to scale all the data down from a scale ranging from 13 to 100 to be on a scale from 0 to 1**
###Code
scaler = MinMaxScaler(feature_range=(0,1))
scaled_train_samples = scaler.fit_transform(train_samples.reshape(-1,1))
x_val = scaled_train_samples[0:101]
y_val = train_labels[0:101]
valid_set = (x_val, y_val)
#for i in scaled_train_samples:
# print(i)
###Output
_____no_output_____ |
study_roadmaps/2_transfer_learning_roadmap/5_exploring_model_families/2_vgg/1.1) Intro to vgg network - mxnet backend.ipynb | ###Markdown
Goals Train a architectural heritage site classifier using vgg16 Understand what lies inside vgg network What is vgg Readings on vgg 1) Points from https://towardsdatascience.com/vgg-neural-networks-the-next-step-after-alexnet-3f91fa9ffe2c - VGG addresses another very important aspect of CNNs: depth - All of VGG’s hidden layers use ReLU - Unlike 11x11 kernels of alexnet, it uses smaller ones 1x1 and 3x3 kernels 2) Points from https://becominghuman.ai/what-is-the-vgg-neural-network-a590caa72643 - Intuitively, more layer is better. However, the authors found that VGG-16 is better than VGG-19 - Authors introduce multi-scale evaluationin the paper 3) Read more here - - https://arxiv.org/abs/1409.1556 - https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ - https://www.cs.toronto.edu/~frossard/post/vgg16/ - https://d2l.ai/chapter_convolutional-modern/vgg.html Table of Contents [Install](0) [Load experiment with vgg base architecture](1) [Visualize vgg](2) [Train the classifier](3) [Run inference on trained classifier](4) Install Monk Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` Install Monk Manually (Not recommended) Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` Dataset - Architectural Heritage site Classification - https://old.datahub.io/dataset/architectural-heritage-elements-image-dataset
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC" -O architectural_heritage.zip && rm -rf /tmp/cookies.txt
! unzip -qq architectural_heritage.zip
###Output
_____no_output_____
###Markdown
Imports
###Code
#Using mxnet-gluon backend
# When installed using pip
from monk.gluon_prototype import prototype
# When installed manually (Uncomment the following)
#import os
#import sys
#sys.path.append("monk_v1/");
#sys.path.append("monk_v1/monk/");
#from monk.gluon_prototype import prototype
###Output
_____no_output_____
###Markdown
Load experiment with vgg base architecture Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it
###Code
gtf = prototype(verbose=1);
gtf.Prototype("Project", "vgg-intro");
###Output
Mxnet Version: 1.5.1
Experiment Details
Project: Project
Experiment: vgg-intro
Dir: /home/ubuntu/Desktop/monk_pip_test/monk_v1/study_roadmaps/2_transfer_learning_roadmap/5_exploring_model_families/2_vgg/workspace/Project/vgg-intro/
###Markdown
This creates files and directories as per the following structure workspace | |--------Project | | |-----vgg-intro | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) Set dataset and select the model Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs Sample Dataset folder structure architectural_heritage | |-----train |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) | | |-----val |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on)
###Code
gtf.Default(dataset_path="architectural_heritage/train",
model_name="vgg16",
freeze_base_network=False,
num_epochs=5);
###Output
Dataset Details
Train path: architectural_heritage/train
Val path: None
CSV train path: None
CSV val path: None
Label Type: single
Dataset Params
Input Size: 224
Batch Size: 4
Data Shuffle: True
Processors: 8
Train-val split: 0.7
Pre-Composed Train Transforms
[{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Pre-Composed Val Transforms
[{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Dataset Numbers
Num train images: 7164
Num val images: 3071
Num classes: 10
Model Params
Model name: vgg16
Use Gpu: True
Use pretrained: True
Freeze base network: False
Model Details
Loading pretrained model
Downloading /home/ubuntu/.mxnet/models/vgg16-e660d456.zip from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/models/vgg16-e660d456.zip...
###Markdown
From the summary above - Model Params Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Visualize vgg
###Code
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082);
###Output
Using Netron To Visualize
Not compatible on kaggle
Compatible only for Jupyter Notebooks
Serving 'model-symbol.json' at http://localhost:8082
###Markdown
vgg block - 1 - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block1_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This block has 3 layers - conv -> relu vgg block - 2 - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block2_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This block has 3 layers - conv -> relu -> max_pool vgg fully connected chain
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block_fc_mxnet.png')
###Output
_____no_output_____
###Markdown
vgg Network - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg16_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This network - has 9 type-1 blocks - has 5 type-2 blocks - post these blocks the type-3 (fc) block exists Train the classifier
###Code
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
Training Start
Epoch 1/5
----------
###Markdown
Run inference on trained classifier
###Code
gtf = prototype(verbose=1);
gtf.Prototype("Project", "vgg-intro", eval_infer=True);
output = gtf.Infer(img_name = "architectural_heritage/test/test1.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test1.jpg')
output = gtf.Infer(img_name = "architectural_heritage/test/test2.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test2.jpg')
output = gtf.Infer(img_name = "architectural_heritage/test/test3.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test3.jpg')
###Output
Prediction
Image name: architectural_heritage/test/test3.jpg
Predicted class: dome(outer)
Predicted score: 0.999998152256012
###Markdown
Goals Train a architectural heritage site classifier using vgg16 Understand what lies inside vgg network What is vgg Readings on vgg 1) Points from https://towardsdatascience.com/vgg-neural-networks-the-next-step-after-alexnet-3f91fa9ffe2c - VGG addresses another very important aspect of CNNs: depth - All of VGG’s hidden layers use ReLU - Unlike 11x11 kernels of alexnet, it uses smaller ones 1x1 and 3x3 kernels 2) Points from https://becominghuman.ai/what-is-the-vgg-neural-network-a590caa72643 - Intuitively, more layer is better. However, the authors found that VGG-16 is better than VGG-19 - Authors introduce multi-scale evaluationin the paper 3) Read more here - - https://arxiv.org/abs/1409.1556 - https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ - https://www.cs.toronto.edu/~frossard/post/vgg16/ - https://d2l.ai/chapter_convolutional-modern/vgg.html Table of Contents [0. Install](0) [1. Load experiment with vgg base architecture](1) [2. Visualize vgg](2) [3. Train the classifier](3) [4. Run inference on trained classifier](5) Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version)
###Code
!git clone https://github.com/Tessellate-Imaging/monk_v1.git
# If using Colab install using the commands below
!cd monk_v1/installation/Misc && pip install -r requirements_colab.txt
# If using Kaggle uncomment the following command
#!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt
# Select the requirements file as per OS and CUDA version when using a local system or cloud
#!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
###Output
_____no_output_____
###Markdown
Dataset - Architectural Heritage site Classification - https://old.datahub.io/dataset/architectural-heritage-elements-image-dataset
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC" -O architectural_heritage.zip && rm -rf /tmp/cookies.txt
! unzip -qq architectural_heritage.zip
###Output
_____no_output_____
###Markdown
Imports
###Code
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using mxnet-gluon backend
from gluon_prototype import prototype
###Output
_____no_output_____
###Markdown
Load experiment with vgg base architecture Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it
###Code
gtf = prototype(verbose=1);
gtf.Prototype("Project", "vgg-intro");
###Output
Mxnet Version: 1.5.0
Experiment Details
Project: Project
Experiment: vgg-intro
Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.0_blocks/study_roadmap/change_post_num_layers/6_transfer_learning_model_params/1_exploring_model_families/2_vgg/workspace/Project/vgg-intro/
###Markdown
This creates files and directories as per the following structure workspace | |--------Project | | |-----vgg-intro | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) Set dataset and select the model Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs Sample Dataset folder structure architectural_heritage | |-----train |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) | | |-----val |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on)
###Code
gtf.Default(dataset_path="architectural_heritage/train",
model_name="vgg16",
freeze_base_network=False,
num_epochs=5);
###Output
Dataset Details
Train path: architectural_heritage/train
Val path: None
CSV train path: None
CSV val path: None
Dataset Params
Input Size: 224
Batch Size: 4
Data Shuffle: True
Processors: 4
Train-val split: 0.7
Pre-Composed Train Transforms
[{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Pre-Composed Val Transforms
[{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Dataset Numbers
Num train images: 7164
Num val images: 3071
Num classes: 10
Model Params
Model name: vgg16
Use Gpu: True
Use pretrained: True
Freeze base network: False
Model Details
Loading pretrained model
Model Loaded on device
Model name: vgg16
Num of potentially trainable layers: 16
Num of actual trainable layers: 16
Optimizer
Name: sgd
Learning rate: 0.01
Params: {'lr': 0.01, 'momentum': 0, 'weight_decay': 0, 'momentum_dampening_rate': 0, 'clipnorm': 0.0, 'clipvalue': 0.0}
Learning rate scheduler
Name: steplr
Params: {'step_size': 1, 'gamma': 0.98, 'last_epoch': -1}
Loss
Name: softmaxcrossentropy
Params: {'weight': None, 'batch_axis': 0, 'axis_to_sum_over': -1, 'label_as_categories': True, 'label_smoothing': False}
Training params
Num Epochs: 5
Display params
Display progress: True
Display progress realtime: True
Save Training logs: True
Save Intermediate models: True
Intermediate model prefix: intermediate_model_
###Markdown
From the summary above - Model Params Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Visualize vgg
###Code
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082);
###Output
Using Netron To Visualize
Not compatible on kaggle
Compatible only for Jupyter Notebooks
Stopping http://localhost:8082
Serving 'model-symbol.json' at http://localhost:8082
###Markdown
vgg block - 1 - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block1_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This block has 3 layers - conv -> relu vgg block - 2 - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block2_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This block has 3 layers - conv -> relu -> max_pool vgg fully connected chain
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block_fc_mxnet.png')
###Output
_____no_output_____
###Markdown
vgg Network - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg16_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This network - has 9 type-1 blocks - has 5 type-2 blocks - post these blocks the type-3 (fc) block exists Train the classifier
###Code
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
Training Start
Epoch 1/5
----------
###Markdown
Run inference on trained classifier
###Code
gtf = prototype(verbose=1);
gtf.Prototype("Project", "vgg-intro", eval_infer=True);
output = gtf.Infer(img_name = "architectural_heritage/test/test1.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test1.jpg')
output = gtf.Infer(img_name = "architectural_heritage/test/test2.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test2.jpg')
output = gtf.Infer(img_name = "architectural_heritage/test/test3.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test3.jpg')
###Output
Prediction
Image name: architectural_heritage/test/test3.jpg
Predicted class: dome(outer)
Predicted score: 15.606271743774414
###Markdown
Goals Train a architectural heritage site classifier using vgg16 Understand what lies inside vgg network What is vgg Readings on vgg 1) Points from https://towardsdatascience.com/vgg-neural-networks-the-next-step-after-alexnet-3f91fa9ffe2c - VGG addresses another very important aspect of CNNs: depth - All of VGG’s hidden layers use ReLU - Unlike 11x11 kernels of alexnet, it uses smaller ones 1x1 and 3x3 kernels 2) Points from https://becominghuman.ai/what-is-the-vgg-neural-network-a590caa72643 - Intuitively, more layer is better. However, the authors found that VGG-16 is better than VGG-19 - Authors introduce multi-scale evaluationin the paper 3) Read more here - - https://arxiv.org/abs/1409.1556 - https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ - https://www.cs.toronto.edu/~frossard/post/vgg16/ - https://d2l.ai/chapter_convolutional-modern/vgg.html Table of Contents [0. Install](0) [1. Load experiment with vgg base architecture](1) [2. Visualize vgg](2) [3. Train the classifier](3) [4. Run inference on trained classifier](5) Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version)
###Code
!git clone https://github.com/Tessellate-Imaging/monk_v1.git
# Select the requirements file as per OS and CUDA version
!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
###Output
_____no_output_____
###Markdown
Dataset - Architectural Heritage site Classification - https://old.datahub.io/dataset/architectural-heritage-elements-image-dataset
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC" -O architectural_heritage.zip && rm -rf /tmp/cookies.txt
! unzip -qq architectural_heritage.zip
###Output
_____no_output_____
###Markdown
Imports
###Code
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using mxnet-gluon backend
from gluon_prototype import prototype
###Output
_____no_output_____
###Markdown
Load experiment with vgg base architecture Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it
###Code
gtf = prototype(verbose=1);
gtf.Prototype("Project", "vgg-intro");
###Output
Mxnet Version: 1.5.0
Experiment Details
Project: Project
Experiment: vgg-intro
Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.0_blocks/study_roadmap/change_post_num_layers/6_transfer_learning_model_params/1_exploring_model_families/2_vgg/workspace/Project/vgg-intro/
###Markdown
This creates files and directories as per the following structure workspace | |--------Project | | |-----vgg-intro | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) Set dataset and select the model Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs Sample Dataset folder structure architectural_heritage | |-----train |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) | | |-----val |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on)
###Code
gtf.Default(dataset_path="architectural_heritage/train",
model_name="vgg16",
freeze_base_network=False,
num_epochs=5);
###Output
Dataset Details
Train path: architectural_heritage/train
Val path: None
CSV train path: None
CSV val path: None
Dataset Params
Input Size: 224
Batch Size: 4
Data Shuffle: True
Processors: 4
Train-val split: 0.7
Pre-Composed Train Transforms
[{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Pre-Composed Val Transforms
[{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Dataset Numbers
Num train images: 7164
Num val images: 3071
Num classes: 10
Model Params
Model name: vgg16
Use Gpu: True
Use pretrained: True
Freeze base network: False
Model Details
Loading pretrained model
Model Loaded on device
Model name: vgg16
Num of potentially trainable layers: 16
Num of actual trainable layers: 16
Optimizer
Name: sgd
Learning rate: 0.01
Params: {'lr': 0.01, 'momentum': 0, 'weight_decay': 0, 'momentum_dampening_rate': 0, 'clipnorm': 0.0, 'clipvalue': 0.0}
Learning rate scheduler
Name: steplr
Params: {'step_size': 1, 'gamma': 0.98, 'last_epoch': -1}
Loss
Name: softmaxcrossentropy
Params: {'weight': None, 'batch_axis': 0, 'axis_to_sum_over': -1, 'label_as_categories': True, 'label_smoothing': False}
Training params
Num Epochs: 5
Display params
Display progress: True
Display progress realtime: True
Save Training logs: True
Save Intermediate models: True
Intermediate model prefix: intermediate_model_
###Markdown
From the summary above - Model Params Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Visualize vgg
###Code
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082);
###Output
Using Netron To Visualize
Not compatible on kaggle
Compatible only for Jupyter Notebooks
Stopping http://localhost:8082
Serving 'model-symbol.json' at http://localhost:8082
###Markdown
vgg block - 1 - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block1_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This block has 3 layers - conv -> relu vgg block - 2 - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block2_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This block has 3 layers - conv -> relu -> max_pool vgg fully connected chain
###Code
from IPython.display import Image
Image(filename='imgs/vgg_block_fc_mxnet.png')
###Output
_____no_output_____
###Markdown
vgg Network - Creating network and blocks using monk from scratch will be dealt in different roadmap series
###Code
from IPython.display import Image
Image(filename='imgs/vgg16_mxnet.png')
###Output
_____no_output_____
###Markdown
Properties - This network - has 9 type-1 blocks - has 5 type-2 blocks - post these blocks the type-3 (fc) block exists Train the classifier
###Code
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
Training Start
Epoch 1/5
----------
###Markdown
Run inference on trained classifier
###Code
gtf = prototype(verbose=1);
gtf.Prototype("Project", "vgg-intro", eval_infer=True);
output = gtf.Infer(img_name = "architectural_heritage/test/test1.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test1.jpg')
output = gtf.Infer(img_name = "architectural_heritage/test/test2.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test2.jpg')
output = gtf.Infer(img_name = "architectural_heritage/test/test3.jpg");
from IPython.display import Image
Image(filename='architectural_heritage/test/test3.jpg')
###Output
Prediction
Image name: architectural_heritage/test/test3.jpg
Predicted class: dome(outer)
Predicted score: 15.606271743774414
|
day12_Convolutional_neural_networks/12_cnn_seminar_solved.ipynb | ###Markdown
12: Convolutional neural networks Inspiration for this notebook is taken from YSDA materials__Colab is highly recommended to work with this notebook__ About CNNsConvolutional layers extract features - quantitative representations of some attributes. After the extraction you can use these features for classification, for example. Convolution: Pooling: Deeper layer $\to$ more complex features. Task: Cats vs. Dogs Classification Let's try to build a small convolutional neural network capable of separating cat images from dog images. Datasets in pyTorch Generally, when you have to deal with image, text, audio or video data, you can use standard python packages that load data into a numpy array. Then you can convert this array into a torch.*Tensor.- For images, packages such as *Pillow*, *OpenCV* are useful- For audio, packages such as *scipy* and *librosa*- For text, either raw *Python* or *Cython* based loading, or *NLTK* and *SpaCy* are useful We are dealing with images, so let's have a look at image data loading in pyTorch for [Dogs vs. Cats](https://www.kaggle.com/c/dogs-vs-cats) classification competition.The link for data downloading is in the cell below.Training set size is reduced for performace. If you have enough computational resources, use [this link](https://www.dropbox.com/s/h2vhfxb0j3eazu1/train.zip) for downloading instead of the latter one.
###Code
# Uncomment only on google collab
# from google.colab import drive
# drive.mount('/content/drive')
# Training set with 11K images
! wget -nc https://www.dropbox.com/s/gqdo90vhli893e0/data.zip
! unzip -n -qq data.zip -d data
###Output
File ‘data.zip’ already there; not retrieving.
###Markdown
Now let's look at the way datasets are processed in PyTorch.
###Code
import os
import time
from matplotlib import pyplot as plt
import numpy as np
from tqdm import tqdm_notebook as tqdm
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torchsummary import summary
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
device
###Output
_____no_output_____
###Markdown
Images should be stored class-wise in PC memory: each image class have to be represented as a subfolder with the corresponding image data. `ImageFolder` takes the path to 'root' directory of such structure, e.g. DATA_PATH:- DATA_PATH/dog/xxx.png- DATA_PATH/dog/xxy.png- DATA_PATH/dog/xxz.png- DATA_PATH/cat/123.png- DATA_PATH/cat/nsdf3.png- DATA_PATH/cat/asd932_.png Dataset images are of different size.Batch generator expects a batch of tensors of the same dimensions, thus we need to rescale images in the dataset during data loading.Let's see at the image size distributions.
###Code
### Let's have a cell with global hyperparameters for the CNNs in this notebook
# Path to a directory with image dataset and subfolders for training, validation and final testing
DATA_PATH = 'data' # PATH TO THE DATASET
# Number of threads for data loader
NUM_WORKERS = 4
# Image size: even though image sizes are bigger than 96, we use this to speed up training
SIZE_H = SIZE_W = 96
N_CHANNELS = 3
# Number of classes in the dataset
NUM_CLASSES = 2
# Epochs: number of passes over the training data, we use it this small to reduce training babysitting time
EPOCH_NUM = 30
# Batch size: for batch gradient descent optimization, usually selected as 2**K elements
BATCH_SIZE = 128
# Images mean and std channelwise
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
# Last layer (embeddings) size for CNN models
EMBEDDING_SIZE = 256
###Output
_____no_output_____
###Markdown
Let's define a transformer to be used as image preprocessing step prior to creating pyTorch image dataset
###Code
transformer = transforms.Compose([
transforms.Resize((SIZE_H, SIZE_W)), # scaling images to fixed size
transforms.ToTensor(), # converting to tensors
transforms.Normalize(image_mean, image_std) # normalize image data per-channel
])
###Output
_____no_output_____
###Markdown
Create an ImageFolder instance to be used during training, validation and testing phases.
###Code
# Define the network architecture
from torch import nn, optim
import torch.nn.functional as F
model = nn.Sequential(nn.Linear(784, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 128),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 64),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(64, 10),
nn.LogSoftmax(dim = 1)
)
train_dataset = torchvision.datasets.ImageFolder(os.path.join(DATA_PATH, 'train_11k'), transform=transformer)
val_dataset = torchvision.datasets.ImageFolder(os.path.join(DATA_PATH, 'val'), transform=transformer)
test_dataset = torchvision.datasets.ImageFolder(os.path.join(DATA_PATH, 'test_labeled'), transform=transformer)
###Output
_____no_output_____
###Markdown
Save sample num for further use
###Code
n_train, n_val, n_test = len(train_dataset), len(val_dataset), len(test_dataset)
###Output
_____no_output_____
###Markdown
Now let's create a DataLoader instance, which uses ImageFolder instance to generate batches of data.
###Code
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=NUM_WORKERS,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
)
###Output
_____no_output_____
###Markdown
Let's create a helper function to vizualize images from our data loaders (and also make sure data was properly loaded).
###Code
def plot_from_loader(loader):
data_batch, label_batch = next(iter(loader))
grid_size = (3, 3)
f, axarr = plt.subplots(*grid_size)
f.set_size_inches(15,10)
class_names = loader.dataset.classes
for i in range(grid_size[0] * grid_size[1]):
# read images from batch to numpy.ndarray and change axes order [H, W, C] -> [H, W, C]
batch_image_ndarray = np.transpose(data_batch[i].numpy(), [1, 2, 0])
# inverse normalization for image data values back to [0,1] and clipping the values for correct pyplot.imshow()
src = np.clip(image_std * batch_image_ndarray + image_mean, 0, 1)
# display batch samples with labels
sample_title = 'Label = %d (%s)' % (label_batch[i], class_names[label_batch[i]])
axarr[i // grid_size[0], i % grid_size[0]].imshow(src)
axarr[i // grid_size[0], i % grid_size[0]].set_title(sample_title)
plot_from_loader(train_loader)
###Output
_____no_output_____
###Markdown
Building training pipeline Training function is same to which we used on previous seminar
###Code
def train_model(model, train_loader, val_loader, loss_fn, opt, n_epochs: int):
train_loss = []
val_loss = []
val_accuracy = []
top_val_accuracy = -1
best_model = None
for epoch in range(n_epochs):
ep_train_loss = []
ep_val_loss = []
ep_val_accuracy = []
start_time = time.time()
model.train(True) # enable dropout / batch_norm training behavior
for X_batch, y_batch in train_loader:
# move data to target device
X_batch, y_batch = X_batch.to(device), y_batch.to(device)
# train on batch: compute loss, calc grads, perform optimizer step and zero the grads
predicts = model(X_batch)
loss = loss_fn(predicts, y_batch)
loss.backward()
opt.step()
opt.zero_grad()
ep_train_loss.append(loss.item())
model.train(False) # disable dropout / use averages for batch_norm
with torch.no_grad():
for X_batch, y_batch in val_loader:
# move data to target device
X_batch, y_batch = X_batch.to(device), y_batch.to(device)
# compute predictions
preds = model(X_batch)
ep_val_loss.append(loss_fn(preds, y_batch).item())
y_pred = preds.max(1)[1].data
ep_val_accuracy.append((y_pred == y_batch).to(torch.float32).mean().item())
# print the results for this epoch:
print(f'Epoch {epoch + 1} of {n_epochs} took {time.time() - start_time:.3f}s')
train_loss.append(np.mean(ep_train_loss))
val_loss.append(np.mean(ep_val_loss))
val_accuracy.append(np.mean(ep_val_accuracy))
print(f"\t training loss: {train_loss[-1]:.6f}")
print(f"\tvalidation loss: {val_loss[-1]:.6f}")
print(f"\tvalidation accuracy: {val_accuracy[-1]:.3f}")
if val_accuracy[-1] > top_val_accuracy:
best_model = model
return train_loss, val_loss, val_accuracy, best_model
@torch.no_grad()
def test_model(model, test_loader, subset='test'):
model.train(False) # disable dropout / use averages for batch_norm
test_batch_acc = []
for X_batch, y_batch in test_loader:
logits = model(X_batch.to(device))
y_pred = logits.max(1)[1].data
test_batch_acc.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() ))
test_accuracy = np.mean(test_batch_acc)
print("Results:")
print(f" {subset} accuracy: {test_accuracy * 100:.2f} %")
if test_accuracy > 0.9:
print(" Amazing!")
elif test_accuracy > 0.7:
print(" Good!")
else:
print(" We need more magic! Follow instructons below")
return test_accuracy
###Output
_____no_output_____
###Markdown
Task 0: Multi-layer fully-connected networkLook at the NN structure proposed below.We will use this model as a baseline for classification task.As you already know, fully-connetcted networks are not translation invariant and perform worse on image data, so resulting accuracy will be lower than for convolutional neural networks.
###Code
device
model = nn.Sequential(
nn.Flatten(),
nn.Linear(N_CHANNELS * SIZE_H * SIZE_W, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(256, 128),
nn.ReLU(),
nn.Dropout(0.05),
nn.Linear(128, NUM_CLASSES),
nn.Softmax(dim=1)
)
model = model.to(device)
###Output
_____no_output_____
###Markdown
Print model summary for sanity check:
###Code
summary(model, (N_CHANNELS, SIZE_H, SIZE_W), device=str(device))
###Output
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Flatten-1 [-1, 27648] 0
Linear-2 [-1, 256] 7,078,144
BatchNorm1d-3 [-1, 256] 512
ReLU-4 [-1, 256] 0
Dropout-5 [-1, 256] 0
Linear-6 [-1, 128] 32,896
ReLU-7 [-1, 128] 0
Dropout-8 [-1, 128] 0
Linear-9 [-1, 2] 258
Softmax-10 [-1, 2] 0
================================================================
Total params: 7,111,810
Trainable params: 7,111,810
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.11
Forward/backward pass size (MB): 0.22
Params size (MB): 27.13
Estimated Total Size (MB): 27.46
----------------------------------------------------------------
###Markdown
Training on minibatches* We got 11k images (22k for full train set), that's way too many for a full-batch SGD. Let's train on minibatches instead* For visualization purposes we propose to plot train/val loss graphs and validation score distribution for CNN predictions over images of cats (class_0) and dogs (class_1).
###Code
opt = torch.optim.Adam(model.parameters(), lr=3e-4)
loss_fn = nn.CrossEntropyLoss()
train_loss, val_loss, val_accuracy, best_model = train_model(model, train_loader, val_loader, loss_fn, opt, 5)
def plot_train_process(train_loss, val_loss, val_accuracy):
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
axes[0].set_title('Loss')
axes[0].plot(train_loss, label='train')
axes[0].plot(val_loss, label='validation')
axes[0].legend()
axes[1].set_title('Validation accuracy')
axes[1].plot(val_accuracy)
plot_train_process(train_loss, val_loss, val_accuracy)
best_model = model
model
###Output
_____no_output_____
###Markdown
Evaluate the best model using test set
###Code
val_stats = test_model(best_model, val_loader, 'validation')
test_stats = test_model(best_model, test_loader, 'test')
###Output
Results:
validation accuracy: 63.64 %
We need more magic! Follow instructons below
Results:
test accuracy: 65.52 %
We need more magic! Follow instructons below
###Markdown
Task I: small convolution net First step**conv-pool-conv-pool-dense-dense-everybody!**Let's create a mini-convolutional network with roughly such architecture:* Input layer* 4 classic convolutional blocks `convolution->relu->pool`: * 3x3 convolution with 32 -> 32 -> 64 -> 128 filters and _ReLU_ activation * 2x2 pooling (or set previous convolution stride to 3) * Flatten* 30% Dropout * Dense layer with 128 neurons and _ReLU_ activation* 30% dropout* Output dense layer.__Convolutional layers__ in torch are just like all other layers, but with a specific set of parameters:__`...`____`model.add_module('conv1', nn.Conv2d(in_channels=3, out_channels=128, kernel_size=3)) convolution`____`model.add_module('pool1', nn.MaxPool2d(2)) max pooling 2x2`____`...`__Once you're done (and compute_loss no longer raises errors), train it with __Adam__ optimizer with learning_rate=3e-4 (Karpathy Constant)If everything is right, you should get at least __75%__ validation accuracy.__HACK_OF_THE_DAY__ :the number of channels must be in the order of the number of class_labels__HACK_OF_THE_DAY_2__ : you may set stride=2 for Conv2d layers to increase learning speed, but keep in mind tensor shapes__HACK_OF_THE_DAY_3__ : it might be useful to use 'VGG-like' structure as a baseline for this task: * every CNN layer with 2x2 maxpooling / stride=2 should be followed by increasing the number of output channels x2 * before the fully-connected layer the tensor H and W should be relatively small (less than 10) * in other words, the less H and W of tensor are, the more should you increase C in order to keep more information
###Code
model_cnn = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=2, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
nn.Dropout(0.3),
nn.Flatten(),
nn.Linear(128, EMBEDDING_SIZE),
nn.Dropout(0.3),
nn.Linear(EMBEDDING_SIZE, NUM_CLASSES, bias=False),
nn.Softmax(dim=1),
)
model_cnn.to(device)
###Output
_____no_output_____
###Markdown
__Hint:__ If you don't want to compute shapes by hand, just plug in any shape (e.g. 1 unit) and run compute_loss. You will see something like this:__`RuntimeError: size mismatch, m1: [5 x 1960], m2: [1 x 64] at /some/long/path/to/torch/operation`__See the __1960__ there? That's your actual input shape. Let's see the basic structure of our model and at the same time perform a sanity check for tensor dimensions.
###Code
summary(model_cnn, (3, SIZE_H, SIZE_W), device='cuda')
###Output
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 32, 48, 48] 896
ReLU-2 [-1, 32, 48, 48] 0
Conv2d-3 [-1, 64, 48, 48] 18,496
ReLU-4 [-1, 64, 48, 48] 0
Conv2d-5 [-1, 64, 24, 24] 36,928
ReLU-6 [-1, 64, 24, 24] 0
Conv2d-7 [-1, 128, 24, 24] 73,856
ReLU-8 [-1, 128, 24, 24] 0
Conv2d-9 [-1, 128, 12, 12] 147,584
ReLU-10 [-1, 128, 12, 12] 0
AdaptiveAvgPool2d-11 [-1, 128, 1, 1] 0
Dropout-12 [-1, 128, 1, 1] 0
Flatten-13 [-1, 128] 0
Linear-14 [-1, 256] 33,024
Dropout-15 [-1, 256] 0
Linear-16 [-1, 2] 512
Softmax-17 [-1, 2] 0
================================================================
Total params: 311,296
Trainable params: 311,296
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.11
Forward/backward pass size (MB): 5.35
Params size (MB): 1.19
Estimated Total Size (MB): 6.64
----------------------------------------------------------------
###Markdown
TrainingWe may use the same training pipeline, that we defined above, as it does not depend on model structure.
###Code
model_cnn = model_cnn.to(device)
opt = torch.optim.Adam(model_cnn.parameters(), lr=1e-3)
opt.zero_grad()
ckpt_name_cnn='model_cnn.ckpt'
train_loss, val_loss, val_accuracy, best_model = train_model(\
model_cnn,\
train_loader,\
val_loader, \
loss_fn, \
opt, \
10)
###Output
Epoch 1 of 10 took 8.736s
training loss: 0.663875
validation loss: 0.664546
validation accuracy: 0.581
Epoch 2 of 10 took 8.630s
training loss: 0.630285
validation loss: 0.647163
validation accuracy: 0.624
Epoch 3 of 10 took 8.533s
training loss: 0.620599
validation loss: 0.624910
validation accuracy: 0.659
Epoch 4 of 10 took 8.588s
training loss: 0.589383
validation loss: 0.611298
validation accuracy: 0.674
Epoch 5 of 10 took 8.491s
training loss: 0.583851
validation loss: 0.620155
validation accuracy: 0.668
Epoch 6 of 10 took 8.655s
training loss: 0.572094
validation loss: 0.656461
validation accuracy: 0.627
Epoch 7 of 10 took 8.454s
training loss: 0.559292
validation loss: 0.590685
validation accuracy: 0.710
Epoch 8 of 10 took 8.563s
training loss: 0.555741
validation loss: 0.606333
validation accuracy: 0.691
Epoch 9 of 10 took 8.550s
training loss: 0.540066
validation loss: 0.587310
validation accuracy: 0.713
Epoch 10 of 10 took 8.461s
training loss: 0.544028
validation loss: 0.596310
validation accuracy: 0.701
###Markdown
**A kind reminder again:** don't wait for too many epochs. You can interrupt training after 5-20 epochs once validation accuracy stops going up. Evaluation
###Code
val_stats = test_model(best_model, val_loader, 'validation')
test_stats = test_model(best_model, test_loader, 'test')
plot_train_process(train_loss, val_loss, val_accuracy)
###Output
_____no_output_____
###Markdown
Task 2: Fine-tuning In practice it is easier to use pre-trained NNWe may see, that our current model performs quite well even after a small number of training epochs.But for more complicated image classification or other computer vision tasks, it may be difficult to train CNN model from scratch.State-of-the-art models consist of huge number of layers (100-200 convolutional blocks) and require powerful hardware to converge.Thankfully, there are lots of pre-trained models available to be used for your own task, only slightly changing some of the final layers to your data.This is called fine-tuning.Let's try to load a pre-trained [ResNet-18](https://arxiv.org/abs/1512.03385) model from torch archive and fine-tune its final layers. ResNet (Shortcut + Batch Normalization)
###Code
# Load pre-trained model
model_resnet18 = torchvision.models.resnet18(pretrained=True)
# Disable gradient updates for all the layers except the final layer
for p in model_resnet18.parameters():
p.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_resnet18.fc.in_features
model_resnet18.fc = nn.Linear(num_ftrs, NUM_CLASSES, bias=False)
# Use available device for calculations
model_resnet18 = model_resnet18.to(device)
summary(model_resnet18, (3, SIZE_H, SIZE_W))
###Output
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 48, 48] 9,408
BatchNorm2d-2 [-1, 64, 48, 48] 128
ReLU-3 [-1, 64, 48, 48] 0
MaxPool2d-4 [-1, 64, 24, 24] 0
Conv2d-5 [-1, 64, 24, 24] 36,864
BatchNorm2d-6 [-1, 64, 24, 24] 128
ReLU-7 [-1, 64, 24, 24] 0
Conv2d-8 [-1, 64, 24, 24] 36,864
BatchNorm2d-9 [-1, 64, 24, 24] 128
ReLU-10 [-1, 64, 24, 24] 0
BasicBlock-11 [-1, 64, 24, 24] 0
Conv2d-12 [-1, 64, 24, 24] 36,864
BatchNorm2d-13 [-1, 64, 24, 24] 128
ReLU-14 [-1, 64, 24, 24] 0
Conv2d-15 [-1, 64, 24, 24] 36,864
BatchNorm2d-16 [-1, 64, 24, 24] 128
ReLU-17 [-1, 64, 24, 24] 0
BasicBlock-18 [-1, 64, 24, 24] 0
Conv2d-19 [-1, 128, 12, 12] 73,728
BatchNorm2d-20 [-1, 128, 12, 12] 256
ReLU-21 [-1, 128, 12, 12] 0
Conv2d-22 [-1, 128, 12, 12] 147,456
BatchNorm2d-23 [-1, 128, 12, 12] 256
Conv2d-24 [-1, 128, 12, 12] 8,192
BatchNorm2d-25 [-1, 128, 12, 12] 256
ReLU-26 [-1, 128, 12, 12] 0
BasicBlock-27 [-1, 128, 12, 12] 0
Conv2d-28 [-1, 128, 12, 12] 147,456
BatchNorm2d-29 [-1, 128, 12, 12] 256
ReLU-30 [-1, 128, 12, 12] 0
Conv2d-31 [-1, 128, 12, 12] 147,456
BatchNorm2d-32 [-1, 128, 12, 12] 256
ReLU-33 [-1, 128, 12, 12] 0
BasicBlock-34 [-1, 128, 12, 12] 0
Conv2d-35 [-1, 256, 6, 6] 294,912
BatchNorm2d-36 [-1, 256, 6, 6] 512
ReLU-37 [-1, 256, 6, 6] 0
Conv2d-38 [-1, 256, 6, 6] 589,824
BatchNorm2d-39 [-1, 256, 6, 6] 512
Conv2d-40 [-1, 256, 6, 6] 32,768
BatchNorm2d-41 [-1, 256, 6, 6] 512
ReLU-42 [-1, 256, 6, 6] 0
BasicBlock-43 [-1, 256, 6, 6] 0
Conv2d-44 [-1, 256, 6, 6] 589,824
BatchNorm2d-45 [-1, 256, 6, 6] 512
ReLU-46 [-1, 256, 6, 6] 0
Conv2d-47 [-1, 256, 6, 6] 589,824
BatchNorm2d-48 [-1, 256, 6, 6] 512
ReLU-49 [-1, 256, 6, 6] 0
BasicBlock-50 [-1, 256, 6, 6] 0
Conv2d-51 [-1, 512, 3, 3] 1,179,648
BatchNorm2d-52 [-1, 512, 3, 3] 1,024
ReLU-53 [-1, 512, 3, 3] 0
Conv2d-54 [-1, 512, 3, 3] 2,359,296
BatchNorm2d-55 [-1, 512, 3, 3] 1,024
Conv2d-56 [-1, 512, 3, 3] 131,072
BatchNorm2d-57 [-1, 512, 3, 3] 1,024
ReLU-58 [-1, 512, 3, 3] 0
BasicBlock-59 [-1, 512, 3, 3] 0
Conv2d-60 [-1, 512, 3, 3] 2,359,296
BatchNorm2d-61 [-1, 512, 3, 3] 1,024
ReLU-62 [-1, 512, 3, 3] 0
Conv2d-63 [-1, 512, 3, 3] 2,359,296
BatchNorm2d-64 [-1, 512, 3, 3] 1,024
ReLU-65 [-1, 512, 3, 3] 0
BasicBlock-66 [-1, 512, 3, 3] 0
AdaptiveAvgPool2d-67 [-1, 512, 1, 1] 0
Linear-68 [-1, 2] 1,024
================================================================
Total params: 11,177,536
Trainable params: 1,024
Non-trainable params: 11,176,512
----------------------------------------------------------------
Input size (MB): 0.11
Forward/backward pass size (MB): 11.54
Params size (MB): 42.64
Estimated Total Size (MB): 54.28
----------------------------------------------------------------
###Markdown
Training (only for final layer)
###Code
# Observe that only parameters of final layer are being optimized as opposed to before
opt_resnet = torch.optim.Adam(model_resnet18.fc.parameters(), lr=1e-3)
ckpt_name_resnet18='model_resnet_18_finetune.ckpt'
train_loss, val_loss, val_accuracy, best_model_resnet18 = train_model(\
model_resnet18,\
train_loader,\
val_loader, \
loss_fn, \
opt_resnet, \
10)
###Output
Epoch 1 of 10 took 8.535s
training loss: 0.599295
validation loss: 0.550771
validation accuracy: 0.710
Epoch 2 of 10 took 8.579s
training loss: 0.512135
validation loss: 0.530608
validation accuracy: 0.727
Epoch 3 of 10 took 8.509s
training loss: 0.494768
validation loss: 0.516600
validation accuracy: 0.737
Epoch 4 of 10 took 8.552s
training loss: 0.475611
validation loss: 0.497050
validation accuracy: 0.759
Epoch 5 of 10 took 9.114s
training loss: 0.461734
validation loss: 0.482886
validation accuracy: 0.762
Epoch 6 of 10 took 8.844s
training loss: 0.445673
validation loss: 0.462270
validation accuracy: 0.779
Epoch 7 of 10 took 8.735s
training loss: 0.431096
validation loss: 0.447927
validation accuracy: 0.789
Epoch 8 of 10 took 8.610s
training loss: 0.414886
validation loss: 0.435849
validation accuracy: 0.797
Epoch 9 of 10 took 8.547s
training loss: 0.403419
validation loss: 0.424735
validation accuracy: 0.799
Epoch 10 of 10 took 8.612s
training loss: 0.394624
validation loss: 0.418891
validation accuracy: 0.805
###Markdown
Evaluation
###Code
val_stats = test_model(best_model, val_loader, 'validation')
test_stats = test_model(best_model, test_loader, 'test')
plot_train_process(train_loss, val_loss, val_accuracy)
###Output
_____no_output_____
###Markdown
Use your own image
###Code
from skimage.io import imread
from skimage.transform import resize
src_1_fp = r"img/example_1.png"
src_2_fp = r"img/example_2.png"
src_1 = imread(src_1_fp)
src_2 = imread(src_2_fp)
resized_1 = resize(src_1, (SIZE_H, SIZE_W), mode='reflect')
resized_2 = resize(src_2, (SIZE_H, SIZE_W), mode='reflect')
# convert to torch.Tensor
tensor_1 = torch.Tensor(np.transpose((resized_1/255 - image_mean) / image_std, [2,0,1])[np.newaxis,:,:,:]).to(device)
tensor_2 = torch.Tensor(np.transpose((resized_2/255 - image_mean) / image_std, [2,0,1])[np.newaxis,:,:,:]).to(device)
# 'cat' scores
score_1 = F.softmax(best_model_resnet18(tensor_1), 1)[0][0].detach().cpu().numpy()
score_2 = F.softmax(best_model_resnet18(tensor_2), 1)[0][0].detach().cpu().numpy()
get_label = lambda x: ('cat' if x > 0.5 else 'dog') + ': {:.4f}'.format(x)
plt.figure(figsize=(10,5))
plt.subplot(121)
plt.imshow(src_1)
plt.title(get_label(score_1))
plt.subplot(122)
plt.imshow(src_2)
plt.title(get_label(score_2))
plt.show()
###Output
_____no_output_____
###Markdown
Task 3: adding normalization and different model initialization Let's get back to hard work* Improve the task 1 CNN architecture with the following: * Add batch norm (with default params) between convolution and ReLU * nn.BatchNorm*d (1d for dense, 2d for conv) * usually better to put them after linear/conv but before nonlinearity* Re-train the network with the same optimizer, it should get at least __80%__ validation accuracy at peak.* Use the following model class to simplify the inferenceTo know more about **batch_norm** and **data covariate shift**https://towardsdatascience.com/batch-normalization-in-neural-networks-1ac91516821chttps://www.youtube.com/watch?v=nUUqwaxLnWs
###Code
# Custom model class
def conv_block_3x3(in_channels, out_channels, stride=1):
return nn.Sequential(
# YOUR CODE HERE
# CONV 3x3 -> BN -> ReLU
# YOUR CODE ENDS HERE
)
class MyModel(torch.nn.Module):
def __init__(self, in_feature):
super(MyModel, self).__init__()
self.model = nn.Sequential(
# YOUR CODE HERE: CONV_BLOCKS -> GLOBAL_POOLING (MAX/AVERAGE)
nn.AdaptiveMaxPool2d(1),
nn.Flatten()
)
self.dropout = nn.Dropout(p=0.3)
self.fc = nn.Sequential(
# YOUR CODE HERE: FC->BN->RELU
)
self.pred = nn.Sequential(
nn.Linear(EMBEDDING_SIZE, NUM_CLASSES, bias=False)
)
def forward(self, x):
x = self.model(x)
x = self.dropout(x)
x = self.fc(x)
x = self.dropout(x)
x = self.pred(x)
return x
# outputs are here for convenience
model_cnn_norm = MyModel(3)
model_cnn_norm.to(device)
summary(model_cnn_norm, (3, SIZE_H, SIZE_W), device='cuda')
###Output
_____no_output_____
###Markdown
Training
###Code
model_cnn_norm = model_cnn_norm.to(device)
opt = torch.optim.Adam(model_cnn_norm.parameters(), lr=1e-3)
ckpt_name_cnn_norm='model_cnn_norm.ckpt'
model_cnn_norm, opt = train_model(model_cnn_norm, train_batch_gen, val_batch_gen, opt, ckpt_name=ckpt_name_cnn_norm)
###Output
_____no_output_____
###Markdown
Evaluation
###Code
best_model_cnn_norm = None
with open(ckpt_name_cnn_norm, 'rb') as f:
best_model_cnn_norm = torch.load(f)
val_stats = test_model(best_model_cnn_norm, val_batch_gen, 'val')
test_stats = test_model(best_model_cnn_norm, test_batch_gen, 'test')
if val_stats['f1_score'] > 0.8 and test_stats['f1_score'] > 0.8:
print('You have achieved the baseline for this task.')
else:
print('Train for some more time or change CNN architecture.')
###Output
_____no_output_____
###Markdown
Task 4: Data Augmentation (bonus area)There's a powerful torch tool for image preprocessing useful to do data preprocessing and augmentation.Here's how it works: we define a pipeline that* makes random crops of data (augmentation)* randomly changes image color (augmentation)* randomly flips image horizontally (augmentation)* then normalizes it (preprocessing)
###Code
transformer_augmented = transforms.Compose([
# YOUR CODE HERE
transforms.ToTensor(),
transforms.Normalize(image_mean, image_std)]
)
# Load dataset using ImageFolder using transformer with augmentation
# Note: We do not use augmentation for validation or testing
train_dataset_aug = # YOUR CODE HERE: creade dataset using the transformer above
train_aug_batch_gen = torch.utils.data.DataLoader(train_dataset_aug,
batch_size=BATCH```
```_SIZE,
shuffle=True,
num_workers=NUM_WORKERS)
###Output
_____no_output_____
###Markdown
Let's look at some image examples
###Code
plot_from_batch_generator(train_aug_batch_gen)
###Output
_____no_output_____
###Markdown
Note that we did not change test_dataset, as we do not need to augment image data in it. Let's retrain our model, saving it to another variable Training
###Code
model_cnn_aug = MyModel(3).to(device)
opt = torch.optim.Adam(model_cnn_aug.parameters(), lr=1e-3)
ckpt_name_aug='model_cnn_aug.ckpt'
model_cnn_aug, opt = train_model(model_cnn_aug, train_aug_batch_gen, val_batch_gen, opt,
ckpt_name=ckpt_name_aug, n_epochs=2 * EPOCH_NUM)
###Output
_____no_output_____
###Markdown
Evaluation
###Code
best_model_cnn_aug=None
with open(ckpt_name_aug, 'rb') as f:
best_model_cnn_aug = torch.load(f)
val_stats = test_model(best_model_cnn_aug, val_batch_gen, 'val')
test_stats = test_model(best_model_cnn_aug, test_batch_gen, 'test')
if val_stats['f1_score'] > 0.9 and test_stats['f1_score'] > 0.9:
print('You have achieved the baseline for this task.')
else:
print('Train for some more time or change augmentation scheme.')
best_model_cnn_aug=None
with open(ckpt_name_aug, 'rb') as f:
best_model_cnn_aug = torch.load(f)
val_stats = test_model(best_model_cnn_aug, val_batch_gen, 'val')
test_stats = test_model(best_model_cnn_aug, test_batch_gen, 'test')
###Output
_____no_output_____ |
examples/02_model_hybrid/ncf_deep_dive.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports
###Code
import sys
import os
import shutil
import papermill as pm
import scrapbook as sb
import pandas as pd
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.models.ncf.ncf_singlenode import NCF
from recommenders.models.ncf.dataset import Dataset as NCFDataset
from recommenders.datasets import movielens
from recommenders.datasets.python_splitters import python_chrono_split
from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k,
recall_at_k, get_top_k_items)
from recommenders.utils.constants import SEED as DEFAULT_SEED
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("Tensorflow version: {}".format(tf.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# Model parameters
EPOCHS = 100
BATCH_SIZE = 256
SEED = DEFAULT_SEED # Set None for non-deterministic results
###Output
_____no_output_____
###Markdown
1 Matrix factorization algorithmNCF is a neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces a neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because we have a binary classification task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obtain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `recommenders/models/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopt the leave-one-out evaluation.For each user, we held out his/her last interaction as the test set and utilized the remaining data for training. Since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`.We also show an alternative evaluation method, splitting the data chronologically using `python_chrono_split` to achieve a 75/25% training and test split.
###Code
df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=["userID", "itemID", "rating", "timestamp"]
)
df.head()
train, test = python_chrono_split(df, 0.75)
###Output
_____no_output_____
###Markdown
Filter out any users or items in the test set that do not appear in the training set.
###Code
test = test[test["userID"].isin(train["userID"].unique())]
test = test[test["itemID"].isin(train["itemID"].unique())]
###Output
_____no_output_____
###Markdown
Create a test set containing the last interaction for each user as for the leave-one-out evaluation.
###Code
leave_one_out_test = test.groupby("userID").last().reset_index()
###Output
_____no_output_____
###Markdown
Write datasets to csv files.
###Code
train_file = "./train.csv"
test_file = "./test.csv"
leave_one_out_test_file = "./leave_one_out_test.csv"
train.to_csv(train_file, index=False)
test.to_csv(test_file, index=False)
leave_one_out_test.to_csv(leave_one_out_test_file, index=False)
###Output
_____no_output_____
###Markdown
3.2 Functions of NCF Dataset Important functions of the Dataset class for NCF:`train_loader(batch_size, shuffle_size)`, generate training batches of size `batch_size`. Positive examples are loaded from the training file and negative samples are added in memory. 'shuffle_size' determines the number of rows that are read into memory before the examples are shuffled. By default, the function will attempt to load all data before performing the shuffle. If memory constraints are encountered when using large datasets, try reducing 'shuffle_size'.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns data like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol.
###Code
data = NCFDataset(train_file=train_file, test_file=leave_one_out_test_file, seed=SEED, overwrite_test_file_full=True)
###Output
Indexing ./train.csv ...
Indexing ./leave_one_out_test.csv ...
Indexing ./leave_one_out_test_full.csv ...
###Markdown
3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 615.3995804620008 seconds for training.
###Markdown
3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe:
###Code
predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)]
for (_, row) in test.iterrows()]
predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction'])
predictions.head()
###Output
_____no_output_____
###Markdown
3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map,
"NDCG:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
###Output
MAP: 0.048144
NDCG: 0.198384
Precision@K: 0.176246
Recall@K: 0.098700
###Markdown
3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.
###Code
k = TOP_K
ndcgs = []
hit_ratio = []
for b in data.test_loader():
user_input, item_input, labels = b
output = model.predict(user_input, item_input, is_list=True)
output = np.squeeze(output)
rank = sum(output >= output[0])
if rank <= k:
ndcgs.append(1 / np.log(rank + 1))
hit_ratio.append(1)
else:
ndcgs.append(0)
hit_ratio.append(0)
eval_ndcg = np.mean(ndcgs)
eval_hr = np.mean(hit_ratio)
print("HR:\t%f" % eval_hr)
print("NDCG:\t%f" % eval_ndcg)
###Output
HR: 0.506893
NDCG: 0.401163
###Markdown
3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="GMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/GMF")
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="MLP",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/MLP")
###Output
Took 507.5963159920029 seconds for training.
###Markdown
3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 616.8741841240007 seconds for training.
###Markdown
3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map2,
"NDCG:\t%f" % eval_ndcg2,
"Precision@K:\t%f" % eval_precision2,
"Recall@K:\t%f" % eval_recall2, sep='\n')
# Record results with papermill for tests
sb.glue("map", eval_map)
sb.glue("ndcg", eval_ndcg)
sb.glue("precision", eval_precision)
sb.glue("recall", eval_recall)
sb.glue("map2", eval_map2)
sb.glue("ndcg2", eval_ndcg2)
sb.glue("precision2", eval_precision2)
sb.glue("recall2", eval_recall2)
###Output
_____no_output_____
###Markdown
3.5.4 Delete pre-trained directory
###Code
save_dir = ".pretrain"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir)))
###Output
Did '.pretrain' exist?: False
###Markdown
3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map,
"NDCG:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
###Output
MAP: 0.046273
NDCG: 0.190750
Precision@K: 0.173277
Recall@K: 0.096688
###Markdown
3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item,
###Code
k = TOP_K
ndcgs = []
hit_ratio = []
for b in data.test_loader():
user_input, item_input, labels = b
output = model.predict(user_input, item_input, is_list=True)
output = np.squeeze(output)
rank = sum(output >= output[0])
if rank <= k:
ndcgs.append(1 / np.log(rank + 1))
hit_ratio.append(1)
else:
ndcgs.append(0)
hit_ratio.append(0)
eval_ndcg = np.mean(ndcgs)
eval_hr = np.mean(hit_ratio)
print("HR:\t%f" % eval_hr)
print("NDCG:\t%f" % eval_ndcg)
###Output
HR: 0.488564
NDCG: 0.383339
###Markdown
3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="GMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/GMF")
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="MLP",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/MLP")
###Output
Took 566.8783325639997 seconds for training.
###Markdown
3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 655.1110815689999 seconds for training.
###Markdown
3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map2,
"NDCG:\t%f" % eval_ndcg2,
"Precision@K:\t%f" % eval_precision2,
"Recall@K:\t%f" % eval_recall2, sep='\n')
# Record results with papermill for tests
sb.glue("map", eval_map)
sb.glue("ndcg", eval_ndcg)
sb.glue("precision", eval_precision)
sb.glue("recall", eval_recall)
sb.glue("map2", eval_map2)
sb.glue("ndcg2", eval_ndcg2)
sb.glue("precision2", eval_precision2)
sb.glue("recall2", eval_recall2)
###Output
_____no_output_____
###Markdown
3.5.4 Delete pre-trained directory
###Code
save_dir = ".pretrain"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir)))
###Output
Did '.pretrain' exist?: False
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports
###Code
import sys
import os
import shutil
import papermill as pm
import scrapbook as sb
import pandas as pd
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from reco_utils.common.timer import Timer
from reco_utils.recommender.ncf.ncf_singlenode import NCF
from reco_utils.recommender.ncf.dataset import Dataset as NCFDataset
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_chrono_split
from reco_utils.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k,
recall_at_k, get_top_k_items)
from reco_utils.common.constants import SEED as DEFAULT_SEED
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("Tensorflow version: {}".format(tf.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# Model parameters
EPOCHS = 100
BATCH_SIZE = 256
SEED = DEFAULT_SEED # Set None for non-deterministic results
###Output
_____no_output_____
###Markdown
1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `reco_utils/recommender/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`.
###Code
df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=["userID", "itemID", "rating", "timestamp"]
)
df.head()
train, test = python_chrono_split(df, 0.75)
###Output
_____no_output_____
###Markdown
3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol.
###Code
data = NCFDataset(train=train, test=test, seed=SEED)
###Output
_____no_output_____
###Markdown
3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 663.2377220259996 seconds for training.
###Markdown
3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe:
###Code
predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)]
for (_, row) in test.iterrows()]
predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction'])
predictions.head()
###Output
_____no_output_____
###Markdown
3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map,
"NDCG:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
###Output
MAP: 0.046273
NDCG: 0.190750
Precision@K: 0.173277
Recall@K: 0.096688
###Markdown
3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item,
###Code
k = TOP_K
ndcgs = []
hit_ratio = []
for b in data.test_loader():
user_input, item_input, labels = b
output = model.predict(user_input, item_input, is_list=True)
output = np.squeeze(output)
rank = sum(output >= output[0])
if rank <= k:
ndcgs.append(1 / np.log(rank + 1))
hit_ratio.append(1)
else:
ndcgs.append(0)
hit_ratio.append(0)
eval_ndcg = np.mean(ndcgs)
eval_hr = np.mean(hit_ratio)
print("HR:\t%f" % eval_hr)
print("NDCG:\t%f" % eval_ndcg)
###Output
HR: 0.488564
NDCG: 0.383339
###Markdown
3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="GMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/GMF")
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="MLP",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/MLP")
###Output
Took 566.8783325639997 seconds for training.
###Markdown
3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 655.1110815689999 seconds for training.
###Markdown
3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map2,
"NDCG:\t%f" % eval_ndcg2,
"Precision@K:\t%f" % eval_precision2,
"Recall@K:\t%f" % eval_recall2, sep='\n')
# Record results with papermill for tests
sb.glue("map", eval_map)
sb.glue("ndcg", eval_ndcg)
sb.glue("precision", eval_precision)
sb.glue("recall", eval_recall)
sb.glue("map2", eval_map2)
sb.glue("ndcg2", eval_ndcg2)
sb.glue("precision2", eval_precision2)
sb.glue("recall2", eval_recall2)
###Output
_____no_output_____
###Markdown
3.5.4 Delete pre-trained directory
###Code
save_dir = ".pretrain"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir)))
###Output
Did '.pretrain' exist?: False
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports
###Code
import sys
sys.path.append("../../")
import os
import shutil
import papermill as pm
import scrapbook as sb
import pandas as pd
import numpy as np
import tensorflow as tf
from reco_utils.common.timer import Timer
from reco_utils.recommender.ncf.ncf_singlenode import NCF
from reco_utils.recommender.ncf.dataset import Dataset as NCFDataset
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_chrono_split
from reco_utils.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k,
recall_at_k, get_top_k_items)
from reco_utils.common.constants import SEED as DEFAULT_SEED
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("Tensorflow version: {}".format(tf.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# Model parameters
EPOCHS = 100
BATCH_SIZE = 256
SEED = DEFAULT_SEED # Set None for non-deterministic results
###Output
_____no_output_____
###Markdown
1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `reco_utils/recommender/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`.
###Code
df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=["userID", "itemID", "rating", "timestamp"]
)
df.head()
train, test = python_chrono_split(df, 0.75)
###Output
_____no_output_____
###Markdown
3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol.
###Code
data = NCFDataset(train=train, test=test, seed=SEED)
###Output
_____no_output_____
###Markdown
3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 663.2377220259996 seconds for training.
###Markdown
3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe:
###Code
predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)]
for (_, row) in test.iterrows()]
predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction'])
predictions.head()
###Output
_____no_output_____
###Markdown
3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map,
"NDCG:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
###Output
MAP: 0.046273
NDCG: 0.190750
Precision@K: 0.173277
Recall@K: 0.096688
###Markdown
3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item,
###Code
k = TOP_K
ndcgs = []
hit_ratio = []
for b in data.test_loader():
user_input, item_input, labels = b
output = model.predict(user_input, item_input, is_list=True)
output = np.squeeze(output)
rank = sum(output >= output[0])
if rank <= k:
ndcgs.append(1 / np.log(rank + 1))
hit_ratio.append(1)
else:
ndcgs.append(0)
hit_ratio.append(0)
eval_ndcg = np.mean(ndcgs)
eval_hr = np.mean(hit_ratio)
print("HR:\t%f" % eval_hr)
print("NDCG:\t%f" % eval_ndcg)
###Output
HR: 0.488564
NDCG: 0.383339
###Markdown
3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="GMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/GMF")
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="MLP",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/MLP")
###Output
Took 566.8783325639997 seconds for training.
###Markdown
3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 655.1110815689999 seconds for training.
###Markdown
3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map2,
"NDCG:\t%f" % eval_ndcg2,
"Precision@K:\t%f" % eval_precision2,
"Recall@K:\t%f" % eval_recall2, sep='\n')
# Record results with papermill for tests
sb.glue("map", eval_map)
sb.glue("ndcg", eval_ndcg)
sb.glue("precision", eval_precision)
sb.glue("recall", eval_recall)
sb.glue("map2", eval_map2)
sb.glue("ndcg2", eval_ndcg2)
sb.glue("precision2", eval_precision2)
sb.glue("recall2", eval_recall2)
###Output
_____no_output_____
###Markdown
3.5.4 Delete pre-trained directory
###Code
save_dir = ".pretrain"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir)))
###Output
Did '.pretrain' exist?: False
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports
###Code
import sys
import os
import shutil
import papermill as pm
import scrapbook as sb
import pandas as pd
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.models.ncf.ncf_singlenode import NCF
from recommenders.models.ncf.dataset import Dataset as NCFDataset
from recommenders.datasets import movielens
from recommenders.datasets.python_splitters import python_chrono_split
from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k,
recall_at_k, get_top_k_items)
from recommenders.utils.constants import SEED as DEFAULT_SEED
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("Tensorflow version: {}".format(tf.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# Model parameters
EPOCHS = 100
BATCH_SIZE = 256
SEED = DEFAULT_SEED # Set None for non-deterministic results
###Output
_____no_output_____
###Markdown
1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `recommenders/models/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`.
###Code
df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=["userID", "itemID", "rating", "timestamp"]
)
df.head()
train, test = python_chrono_split(df, 0.75)
###Output
_____no_output_____
###Markdown
3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol.
###Code
data = NCFDataset(train=train, test=test, seed=SEED)
###Output
_____no_output_____
###Markdown
3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 663.2377220259996 seconds for training.
###Markdown
3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe:
###Code
predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)]
for (_, row) in test.iterrows()]
predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction'])
predictions.head()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports
###Code
import sys
import os
import shutil
import papermill as pm
import scrapbook as sb
import pandas as pd
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.models.ncf.ncf_singlenode import NCF
from recommenders.models.ncf.dataset import Dataset as NCFDataset
from recommenders.datasets import movielens
from recommenders.datasets.python_splitters import python_chrono_split
from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k,
recall_at_k, get_top_k_items)
from recommenders.utils.constants import SEED as DEFAULT_SEED
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("Tensorflow version: {}".format(tf.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# Model parameters
EPOCHS = 100
BATCH_SIZE = 256
SEED = DEFAULT_SEED # Set None for non-deterministic results
###Output
_____no_output_____
###Markdown
1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `recommenders/models/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`.
###Code
df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=["userID", "itemID", "rating", "timestamp"]
)
df.head()
train, test = python_chrono_split(df, 0.75)
###Output
_____no_output_____
###Markdown
3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol.
###Code
data = NCFDataset(train=train, test=test, seed=SEED)
###Output
_____no_output_____
###Markdown
3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 663.2377220259996 seconds for training.
###Markdown
3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe:
###Code
predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)]
for (_, row) in test.iterrows()]
predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction'])
predictions.head()
###Output
_____no_output_____
###Markdown
3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map,
"NDCG:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
###Output
MAP: 0.046273
NDCG: 0.190750
Precision@K: 0.173277
Recall@K: 0.096688
###Markdown
3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item,
###Code
k = TOP_K
ndcgs = []
hit_ratio = []
for b in data.test_loader():
user_input, item_input, labels = b
output = model.predict(user_input, item_input, is_list=True)
output = np.squeeze(output)
rank = sum(output >= output[0])
if rank <= k:
ndcgs.append(1 / np.log(rank + 1))
hit_ratio.append(1)
else:
ndcgs.append(0)
hit_ratio.append(0)
eval_ndcg = np.mean(ndcgs)
eval_hr = np.mean(hit_ratio)
print("HR:\t%f" % eval_hr)
print("NDCG:\t%f" % eval_ndcg)
###Output
HR: 0.488564
NDCG: 0.383339
###Markdown
3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="GMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/GMF")
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="MLP",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/MLP")
###Output
Took 566.8783325639997 seconds for training.
###Markdown
3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 655.1110815689999 seconds for training.
###Markdown
3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map2,
"NDCG:\t%f" % eval_ndcg2,
"Precision@K:\t%f" % eval_precision2,
"Recall@K:\t%f" % eval_recall2, sep='\n')
# Record results with papermill for tests
sb.glue("map", eval_map)
sb.glue("ndcg", eval_ndcg)
sb.glue("precision", eval_precision)
sb.glue("recall", eval_recall)
sb.glue("map2", eval_map2)
sb.glue("ndcg2", eval_ndcg2)
sb.glue("precision2", eval_precision2)
sb.glue("recall2", eval_recall2)
###Output
_____no_output_____
###Markdown
3.5.4 Delete pre-trained directory
###Code
save_dir = ".pretrain"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir)))
###Output
Did '.pretrain' exist?: False
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports
###Code
import sys
sys.path.append("../../")
import os
import shutil
import papermill as pm
import scrapbook as sb
import pandas as pd
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from reco_utils.common.timer import Timer
from reco_utils.recommender.ncf.ncf_singlenode import NCF
from reco_utils.recommender.ncf.dataset import Dataset as NCFDataset
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_chrono_split
from reco_utils.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k,
recall_at_k, get_top_k_items)
from reco_utils.common.constants import SEED as DEFAULT_SEED
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("Tensorflow version: {}".format(tf.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# Model parameters
EPOCHS = 100
BATCH_SIZE = 256
SEED = DEFAULT_SEED # Set None for non-deterministic results
###Output
_____no_output_____
###Markdown
1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `reco_utils/recommender/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`.
###Code
df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=["userID", "itemID", "rating", "timestamp"]
)
df.head()
train, test = python_chrono_split(df, 0.75)
###Output
_____no_output_____
###Markdown
3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol.
###Code
data = NCFDataset(train=train, test=test, seed=SEED)
###Output
_____no_output_____
###Markdown
3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 663.2377220259996 seconds for training.
###Markdown
3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe:
###Code
predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)]
for (_, row) in test.iterrows()]
predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction'])
predictions.head()
###Output
_____no_output_____
###Markdown
3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map,
"NDCG:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
###Output
MAP: 0.046273
NDCG: 0.190750
Precision@K: 0.173277
Recall@K: 0.096688
###Markdown
3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item,
###Code
k = TOP_K
ndcgs = []
hit_ratio = []
for b in data.test_loader():
user_input, item_input, labels = b
output = model.predict(user_input, item_input, is_list=True)
output = np.squeeze(output)
rank = sum(output >= output[0])
if rank <= k:
ndcgs.append(1 / np.log(rank + 1))
hit_ratio.append(1)
else:
ndcgs.append(0)
hit_ratio.append(0)
eval_ndcg = np.mean(ndcgs)
eval_hr = np.mean(hit_ratio)
print("HR:\t%f" % eval_hr)
print("NDCG:\t%f" % eval_ndcg)
###Output
HR: 0.488564
NDCG: 0.383339
###Markdown
3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="GMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/GMF")
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="MLP",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
model.save(dir_name=".pretrain/MLP")
###Output
Took 566.8783325639997 seconds for training.
###Markdown
3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF.
###Code
model = NCF (
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=1e-3,
verbose=10,
seed=SEED
)
model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5)
with Timer() as train_time:
model.fit(data)
print("Took {} seconds for training.".format(train_time.interval))
###Output
Took 655.1110815689999 seconds for training.
###Markdown
3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained.
###Code
with Timer() as test_time:
users, items, preds = [], [], []
item = list(train.itemID.unique())
for user in train.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
preds.extend(list(model.predict(user, item, is_list=True)))
all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds})
merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer")
all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1)
print("Took {} seconds for prediction.".format(test_time.interval))
eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)
print("MAP:\t%f" % eval_map2,
"NDCG:\t%f" % eval_ndcg2,
"Precision@K:\t%f" % eval_precision2,
"Recall@K:\t%f" % eval_recall2, sep='\n')
# Record results with papermill for tests
sb.glue("map", eval_map)
sb.glue("ndcg", eval_ndcg)
sb.glue("precision", eval_precision)
sb.glue("recall", eval_recall)
sb.glue("map2", eval_map2)
sb.glue("ndcg2", eval_ndcg2)
sb.glue("precision2", eval_precision2)
sb.glue("recall2", eval_recall2)
###Output
_____no_output_____
###Markdown
3.5.4 Delete pre-trained directory
###Code
save_dir = ".pretrain"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir)))
###Output
Did '.pretrain' exist?: False
|
frameworks/tensorflow/Onnx_Tensorflow.ipynb | ###Markdown
Referecne * API Tutorial: https://github.com/onnx/tutorials* Convert TensorFlow models to ONNX: https://github.com/onnx/tensorflow-onnx* Tensorflow Backend and Frontend for ONNX: https://github.com/onnx/onnx-tensorflow Onnx Preparation
###Code
!pip install onnx-tf
!pip install tf2onnx
import onnx
import onnx_tf
import tf2onnx
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import json
import codecs
from collections import OrderedDict
import matplotlib.pyplot as plt
from PIL import Image
!pip list | grep 'onnx'
!pip list | grep 'onnx-tf'
!pip list | grep 'tf2onnx'
!pip list | grep 'tensorflow'
!pip list | grep 'opencv-python'
!pip list | grep 'numpy'
!pip list | grep 'matplotlib'
###Output
onnx 1.3.0
onnx-tf 1.2.0
tf2onnx 0.3.1
[33mYou are using pip version 18.0, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
onnx-tf 1.2.0
[33mYou are using pip version 18.0, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
tf2onnx 0.3.1
[33mYou are using pip version 18.0, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
tensorflow 1.11.0
[33mYou are using pip version 18.0, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
opencv-python 3.4.3.18
[33mYou are using pip version 18.0, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
numpy 1.15.2
[33mYou are using pip version 18.0, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
matplotlib 3.0.0
[33mYou are using pip version 18.0, however version 18.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
###Markdown
TensorflowHere we use a classification model as the example (https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_1.0_224_frozen.tgz). About how to generate a frozen model, please refer to `freeze_graph.py`. ```shpython freeze_graph.py \ --input_graph output/graph.pbtxt \ --input_checkpoint ./output-16640 \ --output_graph /tmp/out \ --output_node_names ```
###Code
frozen_model_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/frozen_graph.pb'
label_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/labels.txt'
assert os.path.exists(frozen_model_path), "Tensorflow frozen model does not exist."
assert os.path.exists(label_path), "Label file does not exist."
###Output
_____no_output_____
###Markdown
Load the graph
###Code
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(frozen_model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
###Output
_____no_output_____
###Markdown
Show all operations
###Code
def show_operation_names(graph, count=10):
with graph.as_default():
with tf.Session() as sess:
opts = tf.get_default_graph().get_operations()
for opt in opts[:count]:
for output in opt.outputs: print(output.name)
print("...")
for opt in opts[-count:]:
for output in opt.outputs: print(output.name)
show_operation_names(detection_graph, 5)
###Output
input:0
MobilenetV1/Conv2d_0/weights:0
MobilenetV1/Conv2d_0/weights/read:0
MobilenetV1/MobilenetV1/Conv2d_0/convolution:0
MobilenetV1/Conv2d_0/BatchNorm/beta:0
...
MobilenetV1/Predictions/Reshape/shape:0
MobilenetV1/Predictions/Reshape:0
MobilenetV1/Predictions/Softmax:0
MobilenetV1/Predictions/Shape:0
MobilenetV1/Predictions/Reshape_1:0
###Markdown
Load Label
###Code
def loadLabel(labelPath):
tmp = []
labels = OrderedDict()
with codecs.open(labelPath,"r","utf-8") as fin:
for line in fin:
tmp = line.strip().split(':')
labels[tmp[0]] = tmp[1]
return labels
labels = loadLabel(label_path)
[(str(label) + ":" + labels[str(label)]) for label in range(10)]
###Output
_____no_output_____
###Markdown
Inference
###Code
def inference_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# handle input and output tensor
opts = tf.get_default_graph().get_operations()
all_tensorflow_names = { output.name for opt in opts for output in opt.outputs }
tensor_dict = {}
for key in ['MobilenetV1/Predictions/Reshape_1']:
tensor_name = key + ':0'
if tensor_name in all_tensorflow_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
# run for single image
# input
image_tensor = tf.get_default_graph().get_tensor_by_name('input:0')
# inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# convert data type float32 to appropriate
output_dict['MobilenetV1/Predictions/Reshape_1'] = output_dict['MobilenetV1/Predictions/Reshape_1']
return output_dict
def single_image(imagePath):
image_path = imagePath
if not os.path.exists(image_path): raise FileNotFoundError("{} not found.".format(image_path))
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC)
image = image[:,:,::-1]
plt.imshow(image)
output_dict = inference_single_image(image, detection_graph)
return output_dict
#image_path = '/Users/jiankaiwang/devops/Fruit_Recognition/eval/qnap_fruit_val_00003.JPEG'
image_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/test.jpg'
output_dict = single_image(image_path)
print(output_dict)
cls_idx = int(np.argmax(output_dict['MobilenetV1/Predictions/Reshape_1'], axis=1))
print(cls_idx, output_dict['MobilenetV1/Predictions/Reshape_1'][0][cls_idx], labels[str(cls_idx)])
r, c = np.where(output_dict['MobilenetV1/Predictions/Reshape_1'] > 0.05)
for idx in range(len(r)):
print(output_dict['MobilenetV1/Predictions/Reshape_1'][r[idx]][c[idx]], labels[str(c[idx])])
###Output
0.16483758 digital clock
0.27690268 fire screen, fireguard
0.39241496 shower curtain
###Markdown
Tensorflow to Onnx
###Code
from onnx_tf.frontend import tensorflow_graph_to_onnx_model
with tf.gfile.GFile(frozen_model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# ignore_unimplemented: onnx did't implementation the whole tensorflow operations
onnx_model = tensorflow_graph_to_onnx_model(graph_def, \
"MobilenetV1/Predictions/Softmax", \
ignore_unimplemented=True, \
opset=0)
with open("/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/mobilenet.onnx", "wb") as fout:
fout.write(onnx_model.SerializeToString())
###Output
/usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/handler.py:35: UserWarning: Pack doesn't have ONNX_OP. Please use Handler.onnx_op decorator to register ONNX_OP.
cls.__name__))
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:38: UserWarning: Unknown op Undefined in domain `ai.onnx`. Can't check specification by ONNX. Please set should_check flag to False when call make_node method in handler.
"ai.onnx"))
/usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/handler.py:35: UserWarning: Unpack doesn't have ONNX_OP. Please use Handler.onnx_op decorator to register ONNX_OP.
cls.__name__))
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/exception.py:15: UserWarning: Rsqrt is not implemented.
self._func(self.get_message(*args, **kwargs))
/usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/frontend_handler.py:106: UserWarning: Skipped check for Rsqrt.
warnings.warn("Skipped check for {}.".format(node.op_type))
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/exception.py:15: UserWarning: Relu6 is not implemented.
self._func(self.get_message(*args, **kwargs))
/usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/frontend_handler.py:106: UserWarning: Skipped check for Relu6.
warnings.warn("Skipped check for {}.".format(node.op_type))
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/exception.py:15: UserWarning: DepthwiseConv2dNative is not implemented.
self._func(self.get_message(*args, **kwargs))
/usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/frontend_handler.py:106: UserWarning: Skipped check for DepthwiseConv2dNative.
warnings.warn("Skipped check for {}.".format(node.op_type))
###Markdown
In the above warning message, opts `Rsqrt`, `Relu6`, `DepthwiseConv2dNative`, ... are not implemented by onnx so that the onnx model would loss the corresponding operations even if it is transformed. In onnx community, they don't plan to implement all of them because of too many operations in tensorflow. Inference from ONNX model Here we use the onnx official model downloaded from https://github.com/onnx/models/tree/master/models/image_classification/mobilenet. Convert it and infer a image.
###Code
from onnx_tf.backend import prepare
onnx_model_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/mobilenetv2-1.0.onnx'
assert os.path.exists(onnx_model_path), "ONNX model is not found."
model = onnx.load(onnx_model_path) # Load the ONNX file
tf_rep = prepare(model) # Import the ONNX model to Tensorflow
###Output
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:74: UserWarning: Unknown op ConstantLike in domain `ai.onnx`.
handler.ONNX_OP, handler.DOMAIN or "ai.onnx"))
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:71: UserWarning: Fail to get since_version of Expand in domain `` with max_inclusive_version=7. Set to 1.
handler.ONNX_OP, handler.DOMAIN, version))
###Markdown
Show all operations
###Code
print(tf_rep.inputs) # Input nodes to the model
print('-----')
print(tf_rep.outputs) # Output nodes from the model
print('-----')
print(tf_rep.tensor_dict) # All nodes in the model
from IPython.display import display
test_img = "/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/test.png"
assert os.path.exists(test_img), "Test image is not found."
img = Image.open(test_img).resize((224, 224))
display(img)
img = np.asarray(img)
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 0, 1)
img = img.reshape(1, 3, 224, 224) # in order to meet the requirement of input's shape
print(img.shape)
output_cls = tf_rep.run(img)
print(output_cls["mobilenetv20_output_flatten0_reshape0"].shape)
output_cls_idx = np.argmax(output_cls["mobilenetv20_output_flatten0_reshape0"], axis=1)
print(output_cls_idx)
print(output_cls["mobilenetv20_output_flatten0_reshape0"][0][output_cls_idx])
###Output
[470]
[185.9536]
###Markdown
Onnx to Tensorflow
###Code
import onnx
from onnx_tf.backend import prepare
def onnx2pb(onnx_input_path, pb_output_path):
onnx_model = onnx.load(onnx_input_path) # load onnx model
tf_exp = prepare(onnx_model) # prepare tf representation
tf_exp.export_graph(pb_output_path) # export the model
onnx_input_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/mobilenetv2-1.0.onnx'
pb_output_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/mobilenetv2-1.0.pb'
onnx2pb(onnx_input_path, pb_output_path)
###Output
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:74: UserWarning: Unknown op ConstantLike in domain `ai.onnx`.
handler.ONNX_OP, handler.DOMAIN or "ai.onnx"))
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:71: UserWarning: Fail to get since_version of Expand in domain `` with max_inclusive_version=7. Set to 1.
handler.ONNX_OP, handler.DOMAIN, version))
###Markdown
Test PB model from onnx
###Code
onnx_pb_graph = tf.Graph()
with onnx_pb_graph.as_default():
onnx_pb_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_output_path, 'rb') as fid:
serialized_graph = fid.read()
onnx_pb_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(onnx_pb_graph_def, name='')
show_operation_names(onnx_pb_graph, 5)
def general_inference_single_image(image, graph, input_layer="input:0", output_layer=['MobilenetV1/Predictions/Reshape_1']):
with graph.as_default():
with tf.Session() as sess:
# handle input and output tensor
opts = tf.get_default_graph().get_operations()
all_tensorflow_names = { output.name for opt in opts for output in opt.outputs }
tensor_dict = {}
for key in output_layer:
tensor_name = key + ':0'
if tensor_name in all_tensorflow_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
assert len(tensor_dict.keys()) > 0, "No output layer is found."
# run for single image
# input
image_tensor = tf.get_default_graph().get_tensor_by_name(input_layer)
# inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
return output_dict
def single_image(imagePath, used_graph, input_layer, output_layer):
image_path = imagePath
if not os.path.exists(image_path): raise FileNotFoundError("{} not found.".format(image_path))
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC)
image = image[:,:,::-1]
plt.imshow(image)
print(image.shape)
image = np.swapaxes(image, 1, 2)
image = np.swapaxes(image, 0, 1)
image = image.reshape(3, 224, 224)
print(image.shape)
output_dict = general_inference_single_image(image, used_graph, input_layer, output_layer)
return output_dict
onnx_pb_output_dict = single_image(test_img, onnx_pb_graph, "data:0", ["mobilenetv20_output_flatten0_reshape0"])
print(onnx_pb_output_dict["mobilenetv20_output_flatten0_reshape0"].shape)
onnx_pb_output_dict_idx = np.argmax(onnx_pb_output_dict["mobilenetv20_output_flatten0_reshape0"], axis=1)
print(onnx_pb_output_dict_idx)
print(onnx_pb_output_dict["mobilenetv20_output_flatten0_reshape0"][0][onnx_pb_output_dict_idx])
###Output
(1, 1000)
[470]
[185.9536]
|
P_menindee_lakes.ipynb | ###Markdown
Curlew Sandpipers at the Menindee lakesThe Menindee Lakes is a chain of shallow ephemeral freshwater lakes connected to the Darling River to form a storage system. The lakes lie in the far west region of New South Wales, Australia, near the town of Menindee. In the past few years, the Darling River System along with the Menindee Lakes have been severely impacted by extreme drought conditions. This has resulted in particularly low and prolonged storage inflows, lower storage releases, and high evaporation rates. [Source: MDBA](https://www.mdba.gov.au/river-murray-system/running-river-murray/menindee-lakes-facts)The Menindee Lakes are an integral breeding ground for the hundreds bird, fish and reptile species in the Murray-Darling Basin, one of the richest ecologies in the country. Lake Menindee has completely dried up several times in the past few years. (Source: ABC News) Your task:You are an ecologist working for Wildaroo, a non-for-profit organisation that protects endangered species across Australia. You have been tasked with drafting an action plan to protect the Curlew Sandpipers, a species of birds commonly found in the Menindee lakes. The number of birds spotted has been declining in recent years due to the lack of water in the region. Wildaroo wants to start an intervention to increase the numbers of Sandpipers in the area.Your mission is to identify the areas that are most affected by drought and decide the best locations where your organisation should deploy birdfeeders to support the existing population. The ideal place should be close to wet areas, which is the natural habitat of Curlew Sandpipers. Load packagesYou start by loading the usual Python libraries to start working on this project.
###Code
%matplotlib inline
import datacube
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
###Output
_____no_output_____
###Markdown
Load dataYou request Water Observations from Space (WOfS) data for the first 6 months of the year.
###Code
dc_menindee = datacube.Datacube(app="Menindee_Lakes")
query = {'lat': (-32.55, -32.25),
'lon': (142.15, 142.45),
'time':('2019-01-01', '2019-06-01')}
menindee_19 = dc_menindee.load(product='wofs_albers', **query)
menindee_19
###Output
_____no_output_____
###Markdown
Interpreting WOfSYou want to understand what values are contained in this dataset and use `unique`, a numpy function that returns the unique values contained in an array:
###Code
np.unique(menindee_19.water.data)
###Output
_____no_output_____
###Markdown
There is a special function in DEA that displays the interpretation of mask bitflags in collection. So, you import the required library from the Datacube.
###Code
from datacube.utils import masking
masking.describe_variable_flags(menindee_19, with_pandas=True)
###Output
_____no_output_____
###Markdown
Creating the mask and computing the median water contentWet areas are represented using the value `128`, as oposed to dry areas which are represented with the `0` value. You represent the median water content over the first 6 months.
###Code
m2019 = menindee_19.water.where(menindee_19.water == 128).median(dim='time')
m2019.plot()
###Output
_____no_output_____
###Markdown
Replicate for 2001
###Code
query = {'lat': (-32.55, -32.25),
'lon': (142.15, 142.45),
'time':('2001-01-01', '2001-06-01')}
menindee_01 = dc_menindee.load(product='wofs_albers', **query)
menindee_01
m2001 = menindee_01.water.where(menindee_01.water == 128).median(dim='time')
m2001.plot()
###Output
_____no_output_____
###Markdown
Representing changes in water using a Semaphore plotYou want to create a plot to represent changes in the water between the years 2001 and 2019. The plot maps the intersection of water areas of both years with the following mapping:* If a pixel contains water in both years -> Blue* If a pixel had water before but doesn't have now -> Red (water lost)* If a pixel didn't have water but has water now -> Green (new water)* If a pixel never had water -> Transparent (RGB supports a 4th channel called Alpha for transparency)_Hint: This is an example of what a semaphore plot looks like for the years 2016-2019 at this location:_ Coding the semaphore plotFor creating this plot you'll need to:1. Transform `m2001` and `m2019` to boolean types (water/dry) using a threshold.2. Use numpy logical functions to compute the boolean values of the different components Red, Green Blue, Alpha. _int: use `np.logical_not()` and `np.logical_and()` to compute the components.3. Stack the components along a 3rd dimension making use of `np.dstack`4. Convert the resulting array into `float64` type5. Plot using matplotlib
###Code
### Your code goes here
###Output
_____no_output_____ |
notebooks/Dataset Statistics.ipynb | ###Markdown
Dataset Statistics Check dataset existence
###Code
# check data existence
from pathlib import Path
data_folder = Path.cwd().parent.joinpath("data/processed")
def check_data_existence(folder):
file_count = len(list(folder.glob("e*_ann.json")))
if file_count == 0:
raise Exception("Processed Data does not exist.")
else:
print("{} files exist.".format(file_count))
check_data_existence(data_folder)
###Output
230 files exist.
###Markdown
Read data to DataFrame
###Code
import json
import pandas as pd
companies = []
sentences = []
entities = []
for f in data_folder.glob("e*_ann.json"):
with f.open(encoding="utf-8") as j:
d = json.load(j)
# company infos
company_info = d["header"]
companies.append(company_info)
# sentences
company_code = company_info["document_id"]
for s in d["sentences"]:
line = {
"company": company_code,
"sentence": s["sentence"],
"entities": len(s["opinions"])
}
sentences.append(line)
# entities
for o in s["opinions"]:
entities.append(o)
companies = pd.DataFrame(companies)
sentences = pd.DataFrame(sentences)
entities = pd.DataFrame(entities)
companies.head(5)
sentences.head(5)
entities.head(5)
###Output
_____no_output_____
###Markdown
Show Statistics
###Code
%matplotlib inline
###Output
_____no_output_____
###Markdown
Data distribution
###Code
translation = """
水産・農林業 Fishery, Agriculture & Forestry
鉱業 Mining
建設業 Construction
食料品 Foods
繊維製品 Textiles and Apparels
パルプ・紙 Pulp and Paper
化学 Chemicals
医薬品 Pharmaceutical
石油・石炭製品 Oil and Coal Products
ゴム製品 Rubber Products
ガラス・土石製品 Glass and Ceramics Products
鉄鋼 Iron and Steel
非鉄金属 Nonferrous Metals
金属製品 Metal Products
機械 Machinery
電気機器 Electric Appliances
輸送用機器 Transportation Equipment
精密機器 Precision Instruments
その他製品 Other Products
電気・ガス業 Electric Power and Gas
陸運業 Land Transportation
海運業 Marine Transportation
空運業 Air Transportation
倉庫・運輸関連業 Warehousing and Harbor Transportation
情報・通信業 Information & Communication
卸売業 Wholesale Trade
小売業 Retail Trade
銀行業 Banks
証券、商品先物取引業 Securities and Commodities Futures
保険業 Insurance
その他金融業 Other Financing Business
不動産業 Real Estate
サービス業 Services
"""
translation_list = [t.split("\t") for t in translation.split("\n") if t]
translation_list = dict(translation_list )
companies["category33_en"] = companies["category33"].apply(lambda c: translation_list[c])
companies.groupby(["category33"]).count()["edi_id"].sort_values(ascending=False).plot(kind="bar", figsize=(15,5))
companies.groupby(["category33_en"]).count()["edi_id"].sort_values(ascending=False).plot(kind="bar", figsize=(15,5))
###Output
_____no_output_____
###Markdown
Label distribution
###Code
print("{} entities are annotated.".format(len(entities)))
entities.groupby(["category"]).count()["target"].sort_values(ascending=False).plot(kind="bar")
(entities.groupby(["category"]).count()["target"].sort_values(ascending=False).cumsum() * 100 / len(entities)).plot.line(secondary_y=True, style="g", rot=90)
entities.groupby(["polarity"]).count()["target"].plot.bar()
entities.groupby(["polarity", "category"]).count()["target"].divide(entities.groupby(["category"]).count()["target"]).unstack("polarity").plot.bar(stacked=True)
###Output
_____no_output_____
###Markdown
Sentence distribution
###Code
print("The sentences that have entities are {}.".format(len(sentences[sentences["entities"] > 0])))
print("The number of sentences are {}.".format(len(sentences)))
sentences[sentences["entities"] > 0].groupby(["entities"]).count()["company"].plot.bar()
###Output
_____no_output_____ |
ML_pandas/ML_pandas2.ipynb | ###Markdown
Starting with this code, loading in a CSV to a dataframe can be as simple as:
###Code
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('ZILLOW-C252_MSPFCO.csv')
print(df.head())
###Output
Date Value
0 2016-07-31 807.9414
1 2016-06-30 797.5535
2 2016-05-31 809.5157
3 2016-04-30 818.5681
4 2016-03-31 800.9157
###Markdown
Notice that we have no decent index again. We can fix that like we did before doing:
###Code
df=df.set_index('Date')
###Output
_____no_output_____
###Markdown
Now, let's say we want to send this back to a CSV, we can do:
###Code
df.to_csv('newcsv2.csv')
###Output
_____no_output_____
###Markdown
With pandas we can quickly make a plot of the data.
###Code
df.plot()
plt.show()
###Output
_____no_output_____
###Markdown
We notice that it is ploting from most recent data to the oldest data. This is not how we would like such data to be presented. We can quickly flip the dataset using panda indexing.Lets start by looking at the 25 most recent data point
###Code
print(df[:25])
###Output
Value
Date
2016-07-31 807.9414
2016-06-30 797.5535
2016-05-31 809.5157
2016-04-30 818.5681
2016-03-31 800.9157
2016-02-29 771.4921
2016-01-31 747.5905
2015-12-31 745.8960
2015-11-30 786.6922
2015-10-31 744.3815
2015-09-30 719.1234
2015-08-31 736.4989
2015-07-31 736.1203
2015-06-30 734.6554
2015-05-31 761.6679
2015-04-30 730.1186
2015-03-31 762.7666
2015-02-28 704.9493
2014-12-31 654.5523
2014-11-30 648.7748
2014-10-31 673.1492
2014-09-30 662.6267
2014-08-31 630.2345
2014-07-31 629.4594
2014-06-30 651.3413
###Markdown
We can notice that this is the value reported at the end of each month. Let's say we only care about the value reported at the end of every third month. We could use indexing like this:
###Code
print(df[:25:3])
###Output
Value
Date
2016-07-31 807.9414
2016-04-30 818.5681
2016-01-31 747.5905
2015-10-31 744.3815
2015-07-31 736.1203
2015-04-30 730.1186
2014-12-31 654.5523
2014-09-30 662.6267
2014-06-30 651.3413
###Markdown
Alternatively, we could look at the data provided with year spacing.
###Code
print(df[::12])
###Output
Value
Date
2016-07-31 807.9414
2015-07-31 736.1203
2014-06-30 651.3413
2013-06-30 615.2628
2012-06-30 541.8987
2011-06-30 484.4020
2010-06-30 543.6748
2009-06-30 572.6300
2008-06-30 590.9401
2007-06-30 594.4301
2006-06-30 652.5614
2005-06-30 624.5646
2004-06-30 549.3914
2003-06-30 400.8241
2002-06-30 360.6274
2001-06-30 321.8108
2000-06-30 270.7787
1999-06-30 244.5099
1998-06-30 217.9388
1997-05-31 192.2811
1996-05-31 174.5560
###Markdown
Finally, we could use this indexing to flip our data and created a sensable plot.
###Code
df[::-1].plot()
plt.show()
###Output
_____no_output_____
###Markdown
We can extract this data in a variety of ways. This is a Pandas tutorial, so if we can use Pandas, we shall. Let's check out the read_html from Pandas. It's not being called "experimental" anymore, but I would still label this as expirimental. The standard and quality of the other IO modules is very high and dependable. This read_html is not quite up to par, but I still say it's very impressive and useful code, and just plain cool. The way it works is you just simply feed in a URL, and Pandas will extract the dataframe worthy date from tables into a dataframe. This means, unlike the other typical methods you will usually use, read_html ends up reading into a list of dataframes. This isn't the only one that is different, but it is different. First, in order to use read_html, we need html5lib. Open up cmd.exe or your terminal and do: pip install html5lib. Now, we can make our first attempt by doing:
###Code
fiddy_states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states')
print(fiddy_states)
###Output
[ 0 1 2 3 4
0 Sl no. Abbreviations State Name Capital Became a State
1 1 AL Alabama Montgomery December 14, 1819
2 2 AK Alaska Juneau January 3, 1959
3 3 AZ Arizona Phoenix February 14, 1912
4 4 AR Arkansas Little Rock June 15, 1836
5 5 CA California Sacramento September 9, 1850
6 6 CO Colorado Denver August 1, 1876
7 7 CT Connecticut Hartford January 9, 1788
8 8 DE Delaware Dover December 7, 1787
9 9 FL Florida Tallahassee March 3, 1845
10 10 GA Georgia Atlanta January 2, 1788
11 11 HI Hawaii Honolulu August 21, 1959
12 12 ID Idaho Boise July 3, 1890
13 13 IL Illinois Springfield December 3, 1818
14 14 IN Indiana Indianapolis December 11, 1816
15 15 IA Iowa Des Moines December 28, 1846
16 16 KS Kansas Topeka January 29, 1861
17 17 KY Kentucky Frankfort June 1, 1792
18 18 LA Louisiana Baton Rouge April 30, 1812
19 19 ME Maine Augusta March 15, 1820
20 20 MD Maryland Annapolis April 28, 1788
21 21 MA Massachusetts Boston February 6, 1788
22 22 MI Michigan Lansing January 26, 1837
23 23 MN Minnesota Saint Paul May 11, 1858
24 24 MS Mississippi Jackson December 10, 1817
25 25 MO Missouri Jefferson City August 10, 1821
26 26 MT Montana Helena November 8, 1889
27 27 NE Nebraska Lincoln March 1, 1867
28 28 NV Nevada Carson City October 31, 1864
29 29 NH New Hampshire Concord June 21, 1788
30 30 NJ New Jersey Trenton December 18, 1787
31 31 NM New Mexico Santa Fe January 6, 1912
32 32 NY New York Albany July 26, 1788
33 33 NC North Carolina Raleigh November 21, 1789
34 34 ND North Dakota Bismarck November 2, 1889
35 35 OH Ohio Columbus March 1, 1803
36 36 OK Oklahoma Oklahoma City November 16, 1907
37 37 OR Oregon Salem February 14, 1859
38 38 PA Pennsylvania Harrisburg December 12, 1787
39 39 RI Rhode Island Providence May 19, 1790
40 40 SC South Carolina Columbia May 23, 1788
41 41 SD South Dakota Pierre November 2, 1889
42 42 TN Tennessee Nashville June 1, 1796
43 43 TX Texas Austin December 29, 1845
44 44 UT Utah Salt Lake City January 4, 1896
45 45 VT Vermont Montpelier March 4, 1791
46 46 VA Virginia Richmond June 25, 1788
47 47 WA Washington Olympia November 11, 1889
48 48 WV West Virginia Charleston June 20, 1863
49 49 WI Wisconsin Madison May 29, 1848
50 50 WY Wyoming Cheyenne July 10, 1890, 0 \
0 vtePolitical divisions of the United States
1 States
2 Federal district
3 Insular areas
4 Outlying islands
1
0 NaN
1 Alabama Alaska Arizona Arkansas California Col...
2 Washington, D.C.
3 American Samoa Guam Northern Mariana Islands P...
4 Baker Island Howland Island Jarvis Island John... ]
###Markdown
That's more output than I am going to post here, but you get the idea. At least some of this data is what we want, and it looks like the first dataframe is off to a good start. So let's do:
###Code
print(fiddy_states[0])
###Output
0 1 2 3 4
0 Sl no. Abbreviations State Name Capital Became a State
1 1 AL Alabama Montgomery December 14, 1819
2 2 AK Alaska Juneau January 3, 1959
3 3 AZ Arizona Phoenix February 14, 1912
4 4 AR Arkansas Little Rock June 15, 1836
5 5 CA California Sacramento September 9, 1850
6 6 CO Colorado Denver August 1, 1876
7 7 CT Connecticut Hartford January 9, 1788
8 8 DE Delaware Dover December 7, 1787
9 9 FL Florida Tallahassee March 3, 1845
10 10 GA Georgia Atlanta January 2, 1788
11 11 HI Hawaii Honolulu August 21, 1959
12 12 ID Idaho Boise July 3, 1890
13 13 IL Illinois Springfield December 3, 1818
14 14 IN Indiana Indianapolis December 11, 1816
15 15 IA Iowa Des Moines December 28, 1846
16 16 KS Kansas Topeka January 29, 1861
17 17 KY Kentucky Frankfort June 1, 1792
18 18 LA Louisiana Baton Rouge April 30, 1812
19 19 ME Maine Augusta March 15, 1820
20 20 MD Maryland Annapolis April 28, 1788
21 21 MA Massachusetts Boston February 6, 1788
22 22 MI Michigan Lansing January 26, 1837
23 23 MN Minnesota Saint Paul May 11, 1858
24 24 MS Mississippi Jackson December 10, 1817
25 25 MO Missouri Jefferson City August 10, 1821
26 26 MT Montana Helena November 8, 1889
27 27 NE Nebraska Lincoln March 1, 1867
28 28 NV Nevada Carson City October 31, 1864
29 29 NH New Hampshire Concord June 21, 1788
30 30 NJ New Jersey Trenton December 18, 1787
31 31 NM New Mexico Santa Fe January 6, 1912
32 32 NY New York Albany July 26, 1788
33 33 NC North Carolina Raleigh November 21, 1789
34 34 ND North Dakota Bismarck November 2, 1889
35 35 OH Ohio Columbus March 1, 1803
36 36 OK Oklahoma Oklahoma City November 16, 1907
37 37 OR Oregon Salem February 14, 1859
38 38 PA Pennsylvania Harrisburg December 12, 1787
39 39 RI Rhode Island Providence May 19, 1790
40 40 SC South Carolina Columbia May 23, 1788
41 41 SD South Dakota Pierre November 2, 1889
42 42 TN Tennessee Nashville June 1, 1796
43 43 TX Texas Austin December 29, 1845
44 44 UT Utah Salt Lake City January 4, 1896
45 45 VT Vermont Montpelier March 4, 1791
46 46 VA Virginia Richmond June 25, 1788
47 47 WA Washington Olympia November 11, 1889
48 48 WV West Virginia Charleston June 20, 1863
49 49 WI Wisconsin Madison May 29, 1848
50 50 WY Wyoming Cheyenne July 10, 1890
###Markdown
Yep, that's looking good, we want column 0. So, we want to iterate through column 0 of fiddy_states[0]. Remember, right now, fiddy_states is a list of dataframes, and fiddy_states[0] is the first dataframe. To reference column 0 then, we do fiddy_states[0][0]. One is a list index, which returns a dataframe. The other is a column within the dataframe. Next, we notice the first item in column 0 is the word "abbreviation," which we don't want. We can correct the header and indexing as so:
###Code
##Use only the first Table
fiddy_states=fiddy_states[0]
###Correct the header
fiddy_states.columns = fiddy_states.iloc[0]
fiddy_states=fiddy_states.drop(fiddy_states.index[0])
###Corrrect the indexing
fiddy_states=fiddy_states.set_index('Sl no.')
print(fiddy_states)
###Output
0 Abbreviations State Name Capital Became a State
Sl no.
1 AL Alabama Montgomery December 14, 1819
2 AK Alaska Juneau January 3, 1959
3 AZ Arizona Phoenix February 14, 1912
4 AR Arkansas Little Rock June 15, 1836
5 CA California Sacramento September 9, 1850
6 CO Colorado Denver August 1, 1876
7 CT Connecticut Hartford January 9, 1788
8 DE Delaware Dover December 7, 1787
9 FL Florida Tallahassee March 3, 1845
10 GA Georgia Atlanta January 2, 1788
11 HI Hawaii Honolulu August 21, 1959
12 ID Idaho Boise July 3, 1890
13 IL Illinois Springfield December 3, 1818
14 IN Indiana Indianapolis December 11, 1816
15 IA Iowa Des Moines December 28, 1846
16 KS Kansas Topeka January 29, 1861
17 KY Kentucky Frankfort June 1, 1792
18 LA Louisiana Baton Rouge April 30, 1812
19 ME Maine Augusta March 15, 1820
20 MD Maryland Annapolis April 28, 1788
21 MA Massachusetts Boston February 6, 1788
22 MI Michigan Lansing January 26, 1837
23 MN Minnesota Saint Paul May 11, 1858
24 MS Mississippi Jackson December 10, 1817
25 MO Missouri Jefferson City August 10, 1821
26 MT Montana Helena November 8, 1889
27 NE Nebraska Lincoln March 1, 1867
28 NV Nevada Carson City October 31, 1864
29 NH New Hampshire Concord June 21, 1788
30 NJ New Jersey Trenton December 18, 1787
31 NM New Mexico Santa Fe January 6, 1912
32 NY New York Albany July 26, 1788
33 NC North Carolina Raleigh November 21, 1789
34 ND North Dakota Bismarck November 2, 1889
35 OH Ohio Columbus March 1, 1803
36 OK Oklahoma Oklahoma City November 16, 1907
37 OR Oregon Salem February 14, 1859
38 PA Pennsylvania Harrisburg December 12, 1787
39 RI Rhode Island Providence May 19, 1790
40 SC South Carolina Columbia May 23, 1788
41 SD South Dakota Pierre November 2, 1889
42 TN Tennessee Nashville June 1, 1796
43 TX Texas Austin December 29, 1845
44 UT Utah Salt Lake City January 4, 1896
45 VT Vermont Montpelier March 4, 1791
46 VA Virginia Richmond June 25, 1788
47 WA Washington Olympia November 11, 1889
48 WV West Virginia Charleston June 20, 1863
49 WI Wisconsin Madison May 29, 1848
50 WY Wyoming Cheyenne July 10, 1890
###Markdown
Now we can issolate any part of this table. We are interested in the Abbrevations and can look at them using the header.
###Code
print(fiddy_states["Abbreviations"])
###Output
Sl no.
1 AL
2 AK
3 AZ
4 AR
5 CA
6 CO
7 CT
8 DE
9 FL
10 GA
11 HI
12 ID
13 IL
14 IN
15 IA
16 KS
17 KY
18 LA
19 ME
20 MD
21 MA
22 MI
23 MN
24 MS
25 MO
26 MT
27 NE
28 NV
29 NH
30 NJ
31 NM
32 NY
33 NC
34 ND
35 OH
36 OK
37 OR
38 PA
39 RI
40 SC
41 SD
42 TN
43 TX
44 UT
45 VT
46 VA
47 WA
48 WV
49 WI
50 WY
Name: Abbreviations, dtype: object
|
src/awesome_python/algorithm/staic_learn/第15章 奇异值分解/15.SVD.ipynb | ###Markdown
第15章 奇异值分解 1.矩阵的奇异值分解是指将$m \times n$实矩阵$A$表示为以下三个实矩阵乘积形式的运算$$A = U \Sigma V ^ { T }$$其中$U$是$m$阶正交矩阵,$V$是$n$阶正交矩阵,$\Sigma$是$m \times n$矩形对角矩阵$$\Sigma = \operatorname { diag } ( \sigma _ { 1 } , \sigma _ { 2 } , \cdots , \sigma _ { p } ) , \quad p = \operatorname { min } \{ m , n \}$$其对角线元素非负,且满足$\sigma _ { 1 } \geq \sigma _ { 2 } \geq \cdots \geq \sigma _ { p } \geq 0$2.任意给定一个实矩阵,其奇异值分解一定存在,但并不唯一。3.奇异值分解包括紧奇异值分解和截断奇异值分解。紧奇异值分解是与原始矩阵等秩的奇异值分解,截断奇异值分解是比原始矩阵低秩的奇异值分解。4.奇异值分解有明确的几何解释。奇异值分解对应三个连续的线性变换:一个旋转变换,一个缩放变换和另一个旋转变换第一个和第三个旋转变换分别基于空间的标准正交基进行。5.设矩阵$A$的奇异值分解为$A = U \Sigma V ^ { T }$,则有$$\left. \begin{array} { l } { A ^ { T } A = V ( \Sigma ^ { T } \Sigma ) V ^ { T } } \\ { A A ^ { T } = U ( \Sigma \Sigma ^ { T } ) U ^ { T } } \end{array} \right.$$即对称矩阵$A^TA$和$AA^T$的特征分解可以由矩阵$A$的奇异值分解矩阵表示。6.矩阵$A$的奇异值分解可以通过求矩阵$A^TA$的特征值和特征向量得到:$A^TA$的特征向量构成正交矩阵$V$的列;从$A^TA$的特征值$\lambda _ { j }$的平方根得到奇异值$\sigma _ { i } $,即$$\sigma _ { j } = \sqrt { \lambda _ { j } } , \quad j = 1,2 , \cdots , n$$对其由大到小排列,作为对角线元素,构成对角矩阵$\Sigma$;求正奇异值对应的左奇异向量,再求扩充的$A^T$的标准正交基,构成正交矩阵$U$的列。7.矩阵$A = [ a _ { i j } ] _ { m \times n }$的弗罗贝尼乌斯范数定义为$$\| A \| _ { F } = ( \sum _ { i = 1 } ^ { m } \sum _ { j = 1 } ^ { n } ( a _ { i j } ) ^ { 2 } ) ^ { \frac { 1 } { 2 } }$$在秩不超过$k$的$m \times n$矩阵的集合中,存在矩阵$A$的弗罗贝尼乌斯范数意义下的最优近似矩阵$X$。秩为$k$的截断奇异值分解得到的矩阵$A_k$能够达到这个最优值。奇异值分解是弗罗贝尼乌斯范数意义下,也就是平方损失意义下的矩阵最优近似。8.任意一个实矩阵$A$可以由其外积展开式表示$$A = \sigma _ { 1 } u _ { 1 } v _ { 1 } ^ { T } + \sigma _ { 2 } u _ { 2 } v _ { 2 } ^ { T } + \cdots + \sigma _ { n } u _ { n } v _ { n } ^ { T }$$其中$u _ { k } v _ { k } ^ { T }$为$m \times n$矩阵,是列向量$u _ { k }$和行向量$v _ { k } ^ { T }$的外积,$\sigma _ { k }$为奇异值,$u _ { k } , v _ { k } ^ { T } , \sigma _ { k }$通过矩阵$A$的奇异值分解得到。 ---任意一个$m$ x $n$ 矩阵,都可以表示为三个矩阵的乘积(因子分解)形式,分别是$m$阶**正交矩阵**,由**降序**排列的**非负**的对角线元素组成的$m$ x $n$ 矩形对角矩阵,和$n$阶**正交矩阵**,称为该矩阵的奇异值分解。矩阵的奇异值分解一定存在,但不唯一。 奇异值分解可以看作是矩阵数据压缩的一种方法,即用因子分解的方式近似地表示原始矩阵,这种近似是在平方损失意义下的最优近似。 矩阵的奇异值分解是指,将一个非零的$m$ x $n$ **实矩阵**$A, A\in R^{m\times n}$表示为一下三个实矩阵乘积形式的运算: $A = U\Sigma V^{T}$, 其中 $U$ 是 $m$ 阶正交矩阵, $V$ 是 $n$ 阶正交矩阵,$\Sigma$ 是由降序排列的非负的对角线元素组成的$m$ x $n$矩形对角矩阵。称为$A$ 的奇异值分解。 $U$的列向量称为左奇异向量, $V$的列向量称为右奇异向量。 奇异值分解不要求矩阵$A$ 是方阵,事实上矩阵的奇异值分解可以看作方阵的对角化的推广。 **紧奇奇异值分解**是与原始矩阵等秩的奇异值分解, **截断奇异值分解**是比原始矩阵低秩的奇异值分解。 ---------------------------------------------------------------------------------------------------------------------------------
###Code
# 实现奇异值分解, 输入一个numpy矩阵,输出 U, sigma, V
# https://zhuanlan.zhihu.com/p/54693391
import numpy as np
#基于矩阵分解的结果,复原矩阵
def rebuildMatrix(U, sigma, V):
a = np.dot(U, sigma)
a = np.dot(a, np.transpose(V))
return a
#基于特征值的大小,对特征值以及特征向量进行排序。倒序排列
def sortByEigenValue(Eigenvalues, EigenVectors):
index = np.argsort(-1 * Eigenvalues)
Eigenvalues = Eigenvalues[index]
EigenVectors = EigenVectors[:, index]
return Eigenvalues, EigenVectors
#对一个矩阵进行奇异值分解
def SVD(matrixA, NumOfLeft=None):
#NumOfLeft是要保留的奇异值的个数,也就是中间那个方阵的宽度
#首先求transpose(A)*A
matrixAT_matrixA = np.dot(np.transpose(matrixA), matrixA)
#然后求右奇异向量
lambda_V, X_V = np.linalg.eig(matrixAT_matrixA)
lambda_V, X_V = sortByEigenValue(lambda_V, X_V)
#求奇异值
sigmas = lambda_V
sigmas = list(map(lambda x: np.sqrt(x)
if x > 0 else 0, sigmas)) #python里很小的数有时候是负数
sigmas = np.array(sigmas)
sigmasMatrix = np.diag(sigmas)
if NumOfLeft == None:
rankOfSigmasMatrix = len(list(filter(lambda x: x > 0,
sigmas))) #大于0的特征值的个数
else:
rankOfSigmasMatrix = NumOfLeft
sigmasMatrix = sigmasMatrix[0:rankOfSigmasMatrix, :] #特征值为0的奇异值就不要了
#计算右奇异向量
X_U = np.zeros(
(matrixA.shape[0], rankOfSigmasMatrix)) #初始化一个右奇异向量矩阵,这里直接进行裁剪
for i in range(rankOfSigmasMatrix):
X_U[:, i] = np.transpose(np.dot(matrixA, X_V[:, i]) / sigmas[i])
#对右奇异向量和奇异值矩阵进行裁剪
X_V = X_V[:, 0:NumOfLeft]
sigmasMatrix = sigmasMatrix[0:rankOfSigmasMatrix, 0:rankOfSigmasMatrix]
#print(rebuildMatrix(X_U, sigmasMatrix, X_V))
return X_U, sigmasMatrix, X_V
A = np.array([[1, 1, 1, 2, 2], [0, 0, 0, 3, 3], [0, 0, 0, 1, 1], [1, 1, 1, 0, 0],
[2, 2, 2, 0, 0], [5, 5, 5, 0, 0], [1, 1, 1, 0, 0]])
A
X_U, sigmasMatrix, X_V = SVD(A, NumOfLeft=3)
X_U
sigmasMatrix
X_V
# rebuild from U, sigma, V
rebuildMatrix(X_U, sigmasMatrix, X_V)
###Output
_____no_output_____
###Markdown
same as A.
###Code
from PIL import Image
import requests
from io import BytesIO
url = 'https://images.mulberry.com/i/mulberrygroup/RL5792_000N651_L/small-hampstead-deep-amber-small-classic-grain-ayers/small-hampstead-deep-amber-small-classic-grain-ayers?v=3&w=304'
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img
###Output
_____no_output_____ |
nbs/12a_examples.glue-benchmark-sweeps.ipynb | ###Markdown
GLUE: hyperparameter search
###Code
from transformers import AutoModelForSequenceClassification
from fastai.text.all import *
from fastai.callback.wandb import *
from fasthugs.learner import TransLearner
from fasthugs.data import TransformersTextBlock, TextGetter, get_splits
from datasets import load_dataset, concatenate_datasets
import wandb
import gc
%env WANDB_ENTITY=fastai_community
%env WANDB_PROJECT=glue-benchmark
###Output
env: WANDB_ENTITY=fastai_community
env: WANDB_PROJECT=glue-benchmark
###Markdown
Setup Let's define main settings for the run in one place:
###Code
ds_name = 'glue'
model_name = "distilroberta-base"
max_len = 512
bs = 32
val_bs = bs*2
n_epoch = 4
lr = 2e-5
opt_func = Adam
diff_lr_decay_factor = 0
GLUE_TASKS = ["cola", "mnli", "mnli-mm", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]
def validate_task():
assert task in GLUE_TASKS
from fastai.metrics import MatthewsCorrCoef, F1Score, PearsonCorrCoef, SpearmanCorrCoef
glue_metrics = {
'cola':[MatthewsCorrCoef()],
'sst2':[accuracy],
'mrpc':[F1Score(), accuracy],
'stsb':[PearsonCorrCoef(), SpearmanCorrCoef()],
'qqp' :[F1Score(), accuracy],
'mnli':[accuracy],
'qnli':[accuracy],
'rte' :[accuracy],
'wnli':[accuracy],
}
glue_textfields = {
'cola':['sentence', None],
'sst2':['sentence', None],
'mrpc':['sentence1', 'sentence2'],
'stsb':['sentence1', 'sentence2'],
'qqp' :['question1', 'question2'],
'mnli':['premise', 'hypothesis'],
'qnli':['question', 'sentence'],
'rte' :['sentence1', 'sentence2'],
'wnli':['sentence1', 'sentence2'],
}
glue_num_labels = {'mnli':3, 'stsb':1}
#collapse_input
def layerwise_splitter(model):
emb = L(model.base_model.embeddings)
layers = L(model.base_model.encoder.layer.children())
clf = L(m for m in list(model.children())[1:] if params(m))
groups = emb + layers + clf
return groups.map(params)
###Output
_____no_output_____
###Markdown
Running a GLUE task
###Code
task = 'sst2'
validate_task()
ds = load_dataset(ds_name, task)
valid_ = 'validation-matched' if task=='mnli' else 'validation'
len(ds['train']), len(ds[valid_])
train_idx, valid_idx = get_splits(ds, valid=valid_)
train_ds = concatenate_datasets([ds['train'], ds[valid_]])
train_ds[0]
###Output
_____no_output_____
###Markdown
Here I use number of characters a proxy for length of tokenized text to speed up `dls` creation.
###Code
lens = train_ds.map(lambda s: {'len': sum([len(s[i]) for i in glue_textfields[task] if i])},
remove_columns=train_ds.column_names, num_proc=2, keep_in_memory=True)
train_lens = lens.select(train_idx)['len']
valid_lens = lens.select(valid_idx)['len']
dblock = DataBlock(blocks = [TransformersTextBlock(pretrained_model_name=model_name), CategoryBlock()],
get_x=TextGetter(*glue_textfields[task]),
get_y=ItemGetter('label'),
splitter=IndexSplitter(valid_idx))
%%time
dl_kwargs=[{'res':train_lens}, {'val_res':valid_lens}]
dls = dblock.dataloaders(train_ds, bs=bs, val_bs=val_bs, dl_kwargs=dl_kwargs)
dls.show_batch(max_n=4)
###Output
_____no_output_____
###Markdown
Single run
###Code
WANDB_NAME = f'{ds_name}-{task}-{model_name}'
GROUP = f'{ds_name}-{task}-{model_name}-{lr:.0e}'
if diff_lr_decay_factor: GROUP += f"diff_lr_{diff_lr_decay_factor}"
NOTES = f'finetuning {model_name} with {opt_func.__name__} lr={lr:.0e}'
TAGS =[model_name, ds_name, opt_func.__name__]
#hide_output
wandb.init(reinit=True, project="fasthugs", entity="fastai_community",
name=WANDB_NAME, group=GROUP, notes=NOTES, tags=TAGS);
#hide_output
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=glue_num_labels.get(task, 2))
metrics = glue_metrics[task]
learn = TransLearner(dls, model, metrics=metrics, opt_func=opt_func, splitter=layerwise_splitter).to_fp16()
# learn.summary()
if diff_lr_decay_factor != 0:
k = len(layerwise_splitter(model))
lr = slice(lr*diff_lr_decay_factor**k,lr)
metric_to_monitor = metrics[0].name if isinstance(metrics[0], Metric) else metrics[0].__name__
cbs = [WandbCallback(log_preds=False, log_model=False), SaveModelCallback(monitor=metric_to_monitor)]
learn.fit_one_cycle(4, lr, cbs=cbs)
learn.show_results()
# test_dl = dls.test_dl(ds['test'])
# preds = learn.get_preds(dl=test_dl)
del learn
gc.collect()
torch.cuda.empty_cache()
###Output
_____no_output_____
###Markdown
Sweeps
###Code
wandb.login()
def train():
with wandb.init() as run:
cfg = run.config
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=glue_num_labels.get(task, 2))
metrics = glue_metrics[task]
k = len(layerwise_splitter(model))
if cfg.diff_lr_decay_factor: lr = slice(cfg.lr*cfg.diff_lr_decay_factor**k,cfg.lr)
learn = TransLearner(dls, model, metrics=metrics, opt_func=Adam, splitter=layerwise_splitter).to_fp16()
learn.fit_one_cycle(n_epoch, cfg.lr, wd=cfg.wd, cbs=[WandbCallback(log_preds=False, log_model=False)])
del learn
gc.collect()
torch.cuda.empty_cache()
torch.cuda.synchronize()
metrics = glue_metrics[task]
metric_to_monitor = metrics[0].name if isinstance(metrics[0], Metric) else metrics[0].__name__
sweep_name = f"glue-{task}-sweep"
sweep_config = {
"project":"glue-benchmark",
"entity": "fastai_cimmunity",
"name": sweep_name,
"method": "random",
"parameters": {
"lr": {"values":[1e-5,2e-5,3e-5,5e-5, 1e-4, 3e-4]},
"wd": {"values":[0.,1e-2,5e-2]},
"diff_lr_decay_factor":{"values":[0., 0.9, 0.8, 0.7, 0.6]}
},
"metric":{"goal": "maximise", "name": metric_to_monitor},
"early_terminate": {"type": "hyperband", "s": 2, "eta": 3, "max_iter": 40}
}
sweep_id = wandb.sweep(sweep_config, project='glue-benchmark', entity="fastai_community")
wandb.agent(sweep_id, function=train)
wandb.finish()
###Output
_____no_output_____
###Markdown
Another task example: MultiNLI
###Code
task = 'mnli'
validate_task()
ds = load_dataset(ds_name, task)
train_idx, valid_idx = get_splits(ds, valid='validation_matched')
train_ds = concatenate_datasets([ds['train'], ds['validation_matched']])
train_ds[0]
lens = train_ds.map(lambda s: {'len': len(s['premise'])+len(s['hypothesis'])}, remove_columns=train_ds.column_names, num_proc=4, keep_in_memory=True)
train_lens = lens.select(train_idx)['len']
valid_lens = lens.select(valid_idx)['len']
dblock = DataBlock(blocks = [TransformersTextBlock(pretrained_model_name=model_name),
CategoryBlock()],
get_x=TextGetter('premise', 'hypothesis'),
get_y=ItemGetter('label'),
splitter=IndexSplitter(valid_idx))
%%time
dl_kwargs=[{'res':train_lens}, {'val_res':valid_lens}]
dls = dblock.dataloaders(train_ds, bs=bs, val_bs=val_bs, dl_kwargs=dl_kwargs, num_workers=4)
dls.show_batch(max_n=4)
lr=5e-5
wd=0.01
WANDB_NAME = f'{ds_name}-{task}-{model_name}'
GROUP = f'{ds_name}-{task}-{model_name}-{lr:.0e}'
NOTES = f'finetuning {model_name} with Adam lr={lr:.0e}'
TAGS =[model_name, ds_name, 'adam']
#hide_output
wandb.init(reinit=True, project="glue-benchmark", entity="fastai_community",
name=WANDB_NAME, group=GROUP, notes=NOTES, tags=TAGS);
#hide_output
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3)
metrics = glue_metrics[task]
learn = TransLearner(dls, model, metrics=metrics).to_fp16()
metric_to_monitor = metrics[0].name if isinstance(metrics[0], Metric) else metrics[0].__name__
cbs = [WandbCallback(log_preds=False, log_model=False)]
# cbs += [SaveModelCallback(monitor=metric_to_monitor)]
learn.fit_one_cycle(4, lr, wd=wd, cbs=cbs)
learn.show_results()
valid_mm_dl = dls.test_dl(ds['validation_mismatched'], with_labels=True)
learn.validate(dl=valid_mm_dl)
###Output
_____no_output_____ |
estar/examples/ESTAR_synth_wave.ipynb | ###Markdown
Задаём функцию, позволяющую оценивать значений слагаемых уравнения. Это исполнение использует тензоры значений токенов на сетке и через поэлементное умножение тензоры, получает значения слагаемого. Далее, тензор преобразуется в вектор для использования в качестве признака в регрессии.
###Code
def derivative_evaluator(term, normalize, eval_params):
'''
Example of the evaluator of token values, appropriate for case of derivatives with pre-calculated values, defined on grid, that take form of tensors
Parameters
----------
term : term.Term, or numpy.ndarray
Object for term of the equation, or its gene, for which the evaluation is done; necessary for the evaluation.
eval_params : dict
Dictionary, containing parameters of the evaluator: in this example, they are
'token matrices' : list/numpy.martix of token (derivatives) values on the grid, 'parameter_indexes' : dictionary of orders of token parameters during the encoding.
In simplest case of only power parameter: 'parameter_indexes':{'power':0}.
Returns
----------
value : numpy.ndarray
Vector of the evaluation of the token values, that shall be used as target, or feature during the LASSO regression.
'''
assert 'token_matrices' in eval_params and 'parameter_indexes' in eval_params
if type(term) == Term:
term = term.gene
token_matrices = eval_params['token_matrices']
value = np.copy(token_matrices[0])
for var_idx in np.arange(term.shape[0]):
power = (term[var_idx + eval_params['parameter_indexes']['power']])
value *= eval_params['token_matrices'][int(var_idx / (float(eval_params['parameter_indexes']['power']+1)))] ** int(power)
if normalize:
value = normalize_ts(value)
value = value.reshape(np.prod(value.shape))
return value
###Output
_____no_output_____
###Markdown
Проводим препроцессинг данных и вычисляем значения производных на сетке. Вычисления довольно трудоёмкие => распараллелены, но всё равно могут занять относительно много времени, особенно если считать на ПК. Результаты препроцессинга сохраняем в отдельный файл, чтобы использовать при повторных запусках основного алгоритма. Это экономит время! Для примера используется решение волнового уравнения.В этом примере мы рассмотрим задачу поиска уравнения по синтетическим данным, полученным из решения волнового уравнения: $\frac{\partial^2 u}{\partial t^2} = \frac{\partial^2 u}{\partial x_1^2} + \frac{\partial^2 u}{\partial x_2^2}$,которое отражает эволюцию некоторой величины $u$ в двумерной области. Данные для этого эксперимента можно взять по ссылке: https://drive.google.com/open?id=1joW0zTwkSGLJVpyWxDqoSMzTvRItX24J
###Code
op_file_name = path + '/Preprocessing/Derivatives.npy'
filename = path + '/Preprocessing/wave_HP.npy'
poolsize = 4
if 'npy' in filename:
field = np.load(filename)
else:
shape = (201, 201, 201)
field = np.loadtxt(filename)
field = field.reshape(shape)
field = np.transpose(field, (2, 0, 1))
Preprocess_derivatives(field, op_file_name, mp_poolsize=poolsize)
###Output
Executing on grid with uniform nodes:
Start: 2020-03-03 18:33:33.448178 ; Finish: 2020-03-03 19:19:32.216328
Preprocessing runtime: 0:45:58.768150
###Markdown
Загружаем значения в узлах сетки для исходной функции и её производных; из них формируем тензор для дальнейшего использования. Также задаём границы области, по которому будет вычисляться уравнение (в примере - обрезаем начало и конец временного ряда + по 15 элементов с каждой границы, чтобы использовать более "качественные" производные)
###Code
u_initial = np.load('Preprocessing/Wave_HP/wave_HP.npy') # Пропишите путь к файлу с исходным полем
u_initial = np.transpose(u_initial, (2, 0, 1))
print(u_initial.shape)
derivatives = np.load('Preprocessing/Wave_HP/Derivatives.npy') # Пропишите путь к файлу с производными
variables = np.ones((2 + derivatives.shape[1], ) + u_initial.shape)
variables[1, :] = u_initial
for i_outer in range(0, derivatives.shape[1]):
variables[i_outer+2] = derivatives[:, i_outer].reshape(variables[i_outer+2].shape)
skipped_elems = 15
timeslice = (skipped_elems, -skipped_elems)
variables = variables[:, timeslice[0]:timeslice[1], skipped_elems:-skipped_elems, skipped_elems:-skipped_elems]
###Output
(101, 101, 101)
###Markdown
Получаем названия токенов для производных, используя функцию **Define_Derivatives()**. Она получает названия токенов в порядке: 1, u, $\frac{\partial u}{\partial x_1}$, $\frac{\partial^2 u}{\partial x_1^2}$, ... , $\frac{\partial u}{\partial x_2}$, $\frac{\partial^2 u}{\partial x_2^2}$, ...Далее зададим параметры для токенов: в этом примере единственным параметром является степень токена, используемого в слагаемом. Например, если 'power' = 2 для токена $\frac{\partial u}{\partial x_1}$, то в слагаемом будет $ (\frac{\partial u}{\partial x_1})^2 $. Также зададим слагаемые, которые будут в каждом уравнении: константу и исходную функцию.
###Code
token_names = Define_Derivatives(u_initial.ndim, max_order = 2)
print(token_names)
token_parameters = collections.OrderedDict([('power', (0, 3))])
basic_terms = [{'1':{'power':1}},
{'1':{'power':1}, 'u':{'power':1}}]
###Output
('1', 'u', 'du/dx1', 'd^2u/dx1^2', 'du/dx2', 'd^2u/dx2^2', 'du/dx3', 'd^2u/dx3^2')
###Markdown
Задаём объект для обучения уравнения:
###Code
Trainer = Equation_Trainer(tokens = token_names, token_params = token_parameters,
evaluator = derivative_evaluator,
evaluator_params = {'token_matrices':variables, 'parameter_indexes':{'power':0}},
basic_terms = basic_terms)
###Output
_____no_output_____
###Markdown
Так как мы не знаем, какой параметр $\alpha$ - коэффициент регуляризации в LASSO, позволяет получить правильную структуру уравнения, мы проводим запуск по сетке из гиперпараметров модели. Сетка в примере строится только по одному параметру ($\alpha$), но в общем виде допустимо задавать сетки сразу по нескольким. Для остальные гиперпараметров модели задаём соответствующие значения. В случае, если каждый параметр задаётся одним значением, то их допустимо подавать сразу в метод Train.
###Code
Trainer.Parameters_grid(('alpha', 'a_proc', 'r_crossover', 'r_param_mutation', 'r_mutation',
'mut_chance', 'pop_size', 'eq_len', 'max_factors'),
((0.01, 0.16, 4), 0.2, 0.6, 0.8, 0.5, 0.8, 20, 6, 2))
###Output
_____no_output_____
###Markdown
Запускаем обучение и получаем искомое уравнение в символьной форме
###Code
Trainer.Train(epochs = 50)
###Output
Using parameters from grid
Achieved best fitness: 0.0050783488205190215 with alpha = 0.01
Discovered equation:
- { d^2u/dx1^2 : {'power': 1.0}} + -0.0001963014668564108 * { u : {'power': 1.0}} + 1.003430266231036 * { d^2u/dx2^2 : {'power': 1.0}} + 0.000465301995916656 * { du/dx1 : {'power': 1.0}} + 1.003430266231037 * { d^2u/dx3^2 : {'power': 1.0}} = 0
Achieved best fitness: 0.004485088661452463 with alpha = 0.06
Discovered equation:
- { d^2u/dx1^2 : {'power': 1.0}} + 1.0013594947213769 * { d^2u/dx3^2 : {'power': 1.0}} + -0.00020581754275531233 * { u : {'power': 1.0}} + 1.001359494721376 * { d^2u/dx2^2 : {'power': 1.0}} = 0
Achieved best fitness: 0.0038249581919527357 with alpha = 0.10999999999999999
Discovered equation:
- { d^2u/dx1^2 : {'power': 1.0}} + 1.0935985825740213 * { d^2u/dx2^2 : {'power': 1.0}} + 1.0935985825740218 * { d^2u/dx3^2 : {'power': 1.0}} = 0
Achieved best fitness: 0.003611457117138705 with alpha = 0.16
Discovered equation:
- { d^2u/dx1^2 : {'power': 1.0}} + 1.0935985825740213 * { d^2u/dx2^2 : {'power': 1.0}} + 1.0935985825740218 * { d^2u/dx3^2 : {'power': 1.0}} = 0
###Markdown
Для основной части исследуемого интервала $\alpha$ мы получаем структуры уравнения в формате$\frac{\partial^2 u}{\partial t^2} = a_1 \frac{\partial^2 u}{\partial x_1^2} + a_2 \frac{\partial^2 u}{\partial x_2^2}$, где коэффициенты $a_1$ и $a_2$ отличаются от исходных, равных 1, в силу погрешностей, в большей степени связанных с численным решением исходного дифференциального уравнения, а также с погрешностями вычисления производных. При слишком низких значениях $\alpha$ вектор коэффициентов уравнения недостаточно разрежен, и в уравнении присутствуют лишние слагаемые, хотя и обладают незначительными весами.
###Code
Trainer.Parameters_grid(('alpha', 'a_proc', 'r_crossover', 'r_param_mutation', 'r_mutation',
'mut_chance', 'pop_size', 'eq_len', 'max_factors'),
((0.3, 0.4, 2), 0.2, 0.6, 0.8, 0.5, 0.8, 20, 6, 2))
Trainer.Train(epochs = 50)
###Output
Using parameters from grid
Achieved best fitness: 0.00227045231823162 with alpha = 0.3
Discovered equation:
- { du/dx1 : {'power': 1.0} d^2u/dx1^2 : {'power': 1.0}} + -0.0004731879609027912 * { u : {'power': 1.0}} + -0.6650816595841387 * { d^2u/dx2^2 : {'power': 1.0}} + -0.6650816595841381 * { d^2u/dx3^2 : {'power': 1.0}} = 0
Achieved best fitness: 0.002505082208075159 with alpha = 0.4
Discovered equation:
- { d^2u/dx1^2 : {'power': 1.0}} + 1.0935985825740213 * { d^2u/dx2^2 : {'power': 1.0}} + 1.0935985825740218 * { d^2u/dx3^2 : {'power': 1.0}} = 0
###Markdown
Можно отметить, что в силу стохастического характера построения уравнений для описания системы и протекающей эволюции, возможны случаи, когда алгоритм не сходится к лучшему решениюб а остается в некотором локальном оптимуме функции приспособленности. Так в случае уравнения для $\alpha = 0.3$, получается неправильное уравнение.Индикатором этого является сравнительно низкое значение функции приспособленности, отражающей "качество" уравнени, которое превышается "правильными" структурами даже при больших значениях коэффициента регуляризации. Детали задания фитнес-функции представлены в разделе wiki на github-странице проекта и в соответстующих статьях. Для избежания таких локальных оптимумов, алгоритм рекомендуется запускать несколько раз с одними и теми же данными и выбором значений, соответсвующих максимуму функции приспособленности.
###Code
Trainer.Parameters_grid(('alpha', 'a_proc', 'r_crossover', 'r_param_mutation', 'r_mutation',
'mut_chance', 'pop_size', 'eq_len', 'max_factors'),
((2, 3, 2), 0.2, 0.6, 0.8, 0.5, 0.8, 20, 6, 2))
Trainer.Train(epochs = 50)
###Output
Using parameters from grid
Achieved best fitness: 0.0006686093848979459 with alpha = 2.0
Discovered equation:
- { du/dx1 : {'power': 1.0}} = 0
Achieved best fitness: 0.0006686093848979459 with alpha = 3.0
Discovered equation:
- { du/dx1 : {'power': 1.0}} = 0
|
Atividades/Regression/Ex-Regression.ipynb | ###Markdown
Loading Data
###Code
# https://developers.google.com/machine-learning/crash-course/california-housing-data-description
dataset_path = "../datasets/housing/housing.csv"
housing = pd.read_csv(dataset_path)
housing.head()
###Output
_____no_output_____
###Markdown
Prepare the data for ML Shuffle and Split dataset into training & test
###Code
from sklearn.model_selection import StratifiedShuffleSplit
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
print(len(strat_train_set), "train +", len(strat_test_set), "test")
###Output
16512 train + 4128 test
###Markdown
Create Train set and Labels
###Code
# train
housing = strat_train_set.drop("median_house_value", axis=1) # drop labels for training set
housing_labels = strat_train_set["median_house_value"].copy()
###Output
_____no_output_____
###Markdown
Create pipelines
###Code
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
from combined_attributes import CombinedAttributesAdder
num_pipeline = Pipeline([
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
from future_encoders import ColumnTransformer
from future_encoders import OneHotEncoder
num_attribs = list(housing.drop('ocean_proximity', axis=1))
cat_attribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", OneHotEncoder(), cat_attribs),
])
###Output
_____no_output_____
###Markdown
Prepare data
###Code
# prepared train
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared.shape
housing_labels.shape
housing_prepared
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
pred = lin_reg.predict(some_data_prepared)
print("Predictions: ", pred)
print("Labels: ", list(some_labels))
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
from sklearn.metrics import mean_absolute_error
lin_mae = mean_absolute_error(housing_labels, housing_predictions)
lin_mae
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(random_state = 42)
tree_reg.fit(housing_prepared, housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
def displayscores(scores):
print("Score: ", scores)
print("Mean: ", scores.mean())
print("Standard: ", scores.std())
from sklearn.model_selection import cross_val_score
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
displayscores(lin_rmse_scores)
tree_scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-tree_scores)
displayscores(tree_rmse_scores)
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor(random_state=42)
forest_reg.fit(housing_prepared, housing_labels)
housing_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
displayscores(forest_rmse_scores)
from sklearn.model_selection import GridSearchCV
param_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]}
]
forest_reg = RandomForestRegressor(random_state=42)
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring="neg_mean_squared_error", return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
grid_search.best_params_
grid_search.best_estimator_
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres['mean_test_score'], cvres['params']):
print(np.sqrt(-mean_score), params)
X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_model = grid_search.best_estimator_
final_predictions = final_model.predict(X_test_prepared)
final_predictions
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
final_rmse
###Output
_____no_output_____ |
workshop/notebooks/02a_image_manipulation_nibabel.ipynb | ###Markdown
Using Python for neuroimaging dataThe primary goal of this section is to become familiar with loading, modifying, saving, and visualizing neuroimages in Python. A secondary goal is to develop a conceptual understanding of the data structures involved, to facilitate diagnosing problems in data or analysis pipelines.To these ends, we'll be exploring two libraries: [nibabel](http://nipy.org/nibabel/) and [nilearn](https://nilearn.github.io/). Each of these projects has excellent documentation. While this should get you started, it is well worth your time to look through these sites.This notebook only covers nibabel, see the notebook [`02b_image_manipulation_nilearn.ipynb`](02b_image_manipulation_nilearn.ipynb) for more information about nilearn. NibabelNibabel is a low-level Python library that gives access to a variety of imaging formats, with a particular focus on providing a common interface to the various **volumetric** formats produced by scanners and used in common neuroimaging toolkits. - NIfTI-1 - NIfTI-2 - SPM Analyze - FreeSurfer .mgh/.mgz files - Philips PAR/REC - Siemens ECAT - DICOM (limited support)It also supports **surface** file formats - GIFTI - FreeSurfer surfaces, labels and annotations**Connectivity** - CIFTI-2**Tractography** - TrackViz .trk filesAnd a number of related formats.**Note:** Almost all of these can be loaded through the `nibabel.load` interface. Setup
###Code
# Image settings
from nilearn import plotting
import pylab as plt
%matplotlib inline
import numpy as np
import nibabel as nb
###Output
_____no_output_____
###Markdown
Loading and inspecting images in `nibabel`
###Code
# Load a functional image of subject 01
img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz')
# Let's look at the header of this file
print(img)
###Output
_____no_output_____
###Markdown
This data-affine-header structure is common to volumetric formats in nibabel, though the details of the header will vary from format to format. Access specific parametersIf you're interested in specific parameters, you can access them very easily, as the following examples show.
###Code
data = img.get_fdata()
data.shape
affine = img.affine
affine
header = img.header['pixdim']
header
###Output
_____no_output_____
###Markdown
Note that in the `'pixdim'` above contains the voxel resolution (`4., 4., 3.999`), as well as the TR (`2.5`). AsideWhy not just `img.data`? Working with neuroimages can use a lot of memory, so nibabel works hard to be memory efficient. If it can read some data while leaving the rest on disk, it will. `img.get_fdata()` reflects that it's doing some work behind the scenes. Quirk - `img.get_fdata_dtype()` shows the type of the data on disk - `img.get_fdata().dtype` shows the type of the data that you're working withThese are not always the same, and not being clear on this [has caused problems](https://github.com/nipy/nibabel/issues/490). Further, modifying one does not update the other. This is especially important to keep in mind later when saving files.
###Code
print((data.dtype, img.get_data_dtype()))
###Output
_____no_output_____
###Markdown
DataThe data is a simple numpy array. It has a shape, it can be sliced and generally manipulated as you would any array.
###Code
plt.imshow(data[:, :, data.shape[2] // 2, 0].T, cmap='Greys_r')
print(data.shape)
###Output
_____no_output_____
###Markdown
Exercise 1:Load the T1 data from subject 1. Plot the image using the same volume indexing as before. Also, print the shape of the data.
###Code
# Work on solution here
t1 = nb.load('/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz')
data = t1.get_fdata()
plt.imshow(data[:, :, data.shape[2] // 2].T, cmap='Greys_r')
print(data.shape)
###Output
_____no_output_____
###Markdown
`img.orthoview()`Nibabel has its own viewer, which can be accessed through **`img.orthoview()`**. This viewer scales voxels to reflect their size, and labels orientations.**Warning:** `img.orthoview()` may not work properly on OS X. Sidenote to plotting with `orthoview()`As with other figures, f you initiated `matplotlib` with `%matplotlib inline`, the output figure will be static. If you use `orthoview()` in a normal IPython console, it will create an interactive window, and you can click to select different slices, similar to `mricron`. To get a similar experience in a jupyter notebook, use `%matplotlib notebook`. But don't forget to close figures afterward again or use `%matplotlib inline` again, otherwise, you cannot plot any other figures.
###Code
%matplotlib notebook
img.orthoview()
###Output
_____no_output_____
###Markdown
AffineThe affine is a 4 x 4 numpy array. This describes the transformation from the voxel space (indices [i, j, k]) to the reference space (distance in mm (x, y, z)).It can be used, for instance, to discover the voxel that contains the origin of the image:
###Code
x, y, z, _ = np.linalg.pinv(affine).dot(np.array([0, 0, 0, 1])).astype(int)
print("Affine:")
print(affine)
print
print("Center: ({:d}, {:d}, {:d})".format(x, y, z))
###Output
_____no_output_____
###Markdown
The affine also encodes the axis orientation and voxel sizes:
###Code
nb.aff2axcodes(affine)
nb.affines.voxel_sizes(affine)
nb.aff2axcodes(affine)
nb.affines.voxel_sizes(affine)
t1.orthoview()
###Output
_____no_output_____
###Markdown
HeaderThe header is a nibabel structure that stores all of the metadata of the image. You can query it directly, if necessary:
###Code
t1.header['descrip']
###Output
_____no_output_____
###Markdown
But it also provides interfaces for the more common information, such as `get_zooms`, `get_xyzt_units`, `get_qform`, `get_sform`).
###Code
t1.header.get_zooms()
t1.header.get_xyzt_units()
t1.header.get_qform()
t1.header.get_sform()
###Output
_____no_output_____
###Markdown
Normally, we're not particularly interested in the header or the affine. But it's important to know they're there. And especially, to remember to copy them when making new images, so that derivatives stay aligned with the original image. `nib-ls`Nibabel comes packaged with a command-line tool to print common metadata about any (volumetric) neuroimaging format nibabel supports. By default, it shows (on-disk) data type, dimensions and voxel sizes.
###Code
!nib-ls /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz
###Output
_____no_output_____
###Markdown
We can also inspect header fields by name, for instance, `descrip`:
###Code
!nib-ls -H descrip /data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz
###Output
_____no_output_____
###Markdown
Creating and saving imagesSuppose we want to save space by rescaling our image to a smaller datatype, such as an unsigned byte. To do this, we first need to take the data, change its datatype and save this new data in a new NIfTI image with the same header and affine as the original image.
###Code
# First, we need to load the image and get the data
img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz')
data = img.get_fdata()
# Now we force the values to be between 0 and 255
# and change the datatype to unsigned 8-bit
rescaled = ((data - data.min()) * 255. / (data.max() - data.min())).astype(np.uint8)
# Now we can save the changed data into a new NIfTI file
new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header)
nb.save(new_img, '/tmp/rescaled_image.nii.gz')
###Output
_____no_output_____
###Markdown
Let's look at the datatypes of the data array, as well as of the nifti image:
###Code
print((new_img.get_fdata().dtype, new_img.get_data_dtype()))
###Output
_____no_output_____
###Markdown
That's not optimal. Our data array has the correct type, but the on-disk format is determined by the header, so saving it with `img.header` will not do what we want. Also, let's take a look at the size of the original and new file.
###Code
orig_filename = img.get_filename()
!du -hL /tmp/rescaled_image.nii.gz $orig_filename
###Output
_____no_output_____
###Markdown
So, let's correct the header issue with the `set_data_dtype()` function:
###Code
img.set_data_dtype(np.uint8)
# Save image again
new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header)
nb.save(new_img, '/tmp/rescaled_image.nii.gz')
print((new_img.get_fdata().dtype, new_img.get_data_dtype()))
###Output
_____no_output_____
###Markdown
Perfect! Now the data types are correct. And if we look at the size of the image we even see that it got a bit smaller.
###Code
!du -hL /tmp/rescaled_image.nii.gz
###Output
_____no_output_____
###Markdown
Using Python for neuroimaging dataThe primary goal of this section is to become familiar with loading, modifying, saving, and visualizing neuroimages in Python. A secondary goal is to develop a conceptual understanding of the data structures involved, to facilitate diagnosing problems in data or analysis pipelines.To these ends, we'll be exploring two libraries: [nibabel](http://nipy.org/nibabel/) and [nilearn](https://nilearn.github.io/). Each of these projects has excellent documentation. While this should get you started, it is well worth your time to look through these sites.This notebook only covers nibabel, see the notebook [`02b_image_manipulation_nilearn.ipynb`](02b_image_manipulation_nilearn.ipynb) for more information about nilearn. NibabelNibabel is a low-level Python library that gives access to a variety of imaging formats, with a particular focus on providing a common interface to the various **volumetric** formats produced by scanners and used in common neuroimaging toolkits. - NIfTI-1 - NIfTI-2 - SPM Analyze - FreeSurfer .mgh/.mgz files - Philips PAR/REC - Siemens ECAT - DICOM (limited support)It also supports **surface** file formats - GIFTI - FreeSurfer surfaces, labels and annotations**Connectivity** - CIFTI-2**Tractography** - TrackViz .trk filesAnd a number of related formats.**Note:** Almost all of these can be loaded through the `nibabel.load` interface. Setup
###Code
# Image settings
from nilearn import plotting
import pylab as plt
%matplotlib inline
import numpy as np
import nibabel as nb
###Output
_____no_output_____
###Markdown
Loading and inspecting images in `nibabel`
###Code
# Load a functional image of subject 01
img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz')
# Let's look at the header of this file
print(img)
###Output
_____no_output_____
###Markdown
This data-affine-header structure is common to volumetric formats in nibabel, though the details of the header will vary from format to format. Access specific parametersIf you're interested in specific parameters, you can access them very easily, as the following examples show.
###Code
data = img.get_fdata()
data.shape
affine = img.affine
affine
header = img.header['pixdim']
header
###Output
_____no_output_____
###Markdown
Note that in the `'pixdim'` above contains the voxel resolution (`4., 4., 3.999`), as well as the TR (`2.5`). AsideWhy not just `img.data`? Working with neuroimages can use a lot of memory, so nibabel works hard to be memory efficient. If it can read some data while leaving the rest on disk, it will. `img.get_fdata()` reflects that it's doing some work behind the scenes. Quirk - `img.get_fdata_dtype()` shows the type of the data on disk - `img.get_fdata().dtype` shows the type of the data that you're working withThese are not always the same, and not being clear on this [has caused problems](https://github.com/nipy/nibabel/issues/490). Further, modifying one does not update the other. This is especially important to keep in mind later when saving files.
###Code
print((data.dtype, img.get_data_dtype()))
###Output
_____no_output_____
###Markdown
DataThe data is a simple numpy array. It has a shape, it can be sliced and generally manipulated as you would any array.
###Code
plt.imshow(data[:, :, data.shape[2] // 2, 0].T, cmap='Greys_r')
print(data.shape)
###Output
_____no_output_____
###Markdown
Exercise 1:Load the T1 data from subject 1. Plot the image using the same volume indexing as before. Also, print the shape of the data.
###Code
t1 = nb.load('/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz')
data = t1.get_fdata()
plt.imshow(data[:, :, data.shape[2] // 2].T, cmap='Greys_r')
print(data.shape)
# Work on solution here
###Output
_____no_output_____
###Markdown
`img.orthoview()`Nibabel has its own viewer, which can be accessed through **`img.orthoview()`**. This viewer scales voxels to reflect their size, and labels orientations.**Warning:** `img.orthoview()` may not work properly on OS X. Sidenote to plotting with `orthoview()`As with other figures, f you initiated `matplotlib` with `%matplotlib inline`, the output figure will be static. If you use `orthoview()` in a normal IPython console, it will create an interactive window, and you can click to select different slices, similar to `mricron`. To get a similar experience in a jupyter notebook, use `%matplotlib notebook`. But don't forget to close figures afterward again or use `%matplotlib inline` again, otherwise, you cannot plot any other figures.
###Code
%matplotlib notebook
img.orthoview()
###Output
_____no_output_____
###Markdown
AffineThe affine is a 4 x 4 numpy array. This describes the transformation from the voxel space (indices [i, j, k]) to the reference space (distance in mm (x, y, z)).It can be used, for instance, to discover the voxel that contains the origin of the image:
###Code
x, y, z, _ = np.linalg.pinv(affine).dot(np.array([0, 0, 0, 1])).astype(int)
print("Affine:")
print(affine)
print
print("Center: ({:d}, {:d}, {:d})".format(x, y, z))
###Output
_____no_output_____
###Markdown
The affine also encodes the axis orientation and voxel sizes:
###Code
nb.aff2axcodes(affine)
nb.affines.voxel_sizes(affine)
nb.aff2axcodes(affine)
nb.affines.voxel_sizes(affine)
t1.orthoview()
###Output
_____no_output_____
###Markdown
HeaderThe header is a nibabel structure that stores all of the metadata of the image. You can query it directly, if necessary:
###Code
t1.header['descrip']
###Output
_____no_output_____
###Markdown
But it also provides interfaces for the more common information, such as `get_zooms`, `get_xyzt_units`, `get_qform`, `get_sform`).
###Code
t1.header.get_zooms()
t1.header.get_xyzt_units()
t1.header.get_qform()
t1.header.get_sform()
###Output
_____no_output_____
###Markdown
Normally, we're not particularly interested in the header or the affine. But it's important to know they're there. And especially, to remember to copy them when making new images, so that derivatives stay aligned with the original image. `nib-ls`Nibabel comes packaged with a command-line tool to print common metadata about any (volumetric) neuroimaging format nibabel supports. By default, it shows (on-disk) data type, dimensions and voxel sizes.
###Code
!nib-ls /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz
###Output
_____no_output_____
###Markdown
We can also inspect header fields by name, for instance, `descrip`:
###Code
!nib-ls -H descrip /data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz
###Output
_____no_output_____
###Markdown
Creating and saving imagesSuppose we want to save space by rescaling our image to a smaller datatype, such as an unsigned byte. To do this, we first need to take the data, change its datatype and save this new data in a new NIfTI image with the same header and affine as the original image.
###Code
# First, we need to load the image and get the data
img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz')
data = img.get_fdata()
# Now we force the values to be between 0 and 255
# and change the datatype to unsigned 8-bit
rescaled = ((data - data.min()) * 255. / (data.max() - data.min())).astype(np.uint8)
# Now we can save the changed data into a new NIfTI file
new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header)
nb.save(new_img, '/tmp/rescaled_image.nii.gz')
###Output
_____no_output_____
###Markdown
Let's look at the datatypes of the data array, as well as of the nifti image:
###Code
print((new_img.get_fdata().dtype, new_img.get_data_dtype()))
###Output
_____no_output_____
###Markdown
That's not optimal. Our data array has the correct type, but the on-disk format is determined by the header, so saving it with `img.header` will not do what we want. Also, let's take a look at the size of the original and new file.
###Code
orig_filename = img.get_filename()
!du -hL /tmp/rescaled_image.nii.gz $orig_filename
###Output
_____no_output_____
###Markdown
So, let's correct the header issue with the `set_data_dtype()` function:
###Code
img.set_data_dtype(np.uint8)
# Save image again
new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header)
nb.save(new_img, '/tmp/rescaled_image.nii.gz')
print((new_img.get_fdata().dtype, new_img.get_data_dtype()))
###Output
_____no_output_____
###Markdown
Perfect! Now the data types are correct. And if we look at the size of the image we even see that it got a bit smaller.
###Code
!du -hL /tmp/rescaled_image.nii.gz
###Output
_____no_output_____ |
ExerciciosDeDecisao - Feito.ipynb | ###Markdown
Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo.
###Code
num1 = input("Digite um valor ")
if int(num1) > 0:
print("O numero {0} é positivo".format(num1))
else :
print("O numero {0} é negativo".format(num1))
###Output
_____no_output_____
###Markdown
Faça um Programa que verifique se uma letra digitada é "F" ou "M". Conforme a letra escrever: F - Feminino, M - Masculino, Sexo Inválido.
###Code
letra = input("Digite o sexo F-Feminino M-Masculino ").upper()
if letra =="F":
print("O sexo é Feminino")
elif letra == "M":
print("O sexo é Masculino")
else :
print("O sexo é invalido")
###Output
_____no_output_____
###Markdown
Faça um Programa que verifique se uma letra digitada é vogal ou consoante.
###Code
vogal = ('A','E','I','O','U')
print(vogal)
letra = input("Digite uma letra ").upper()
if int(len(letra)) != 1:
print("você digitou uma palavra")
else:
if letra != vogal[i]:
print("A letra é uma consoante")
else:
print("A letra é uma vogal")
###Output
_____no_output_____
###Markdown
Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar:
* A mensagem "Aprovado", se a média alcançada for maior ou igual a sete;
* Mensagem "Reprovado", se a média for menor do que sete;
* A mensagem "Aprovado com Distinção", se a média for igual a dez.
###Code
var1 = input("Digite sua nota referente a p1 ")
var2 = input("Digite sua nota referente a p2 ")
media = (int(var1) + int(var2)) / 2
if media >= 7 and media < 10:
print("Você foi Aprovado")
elif media == 10:
print("Você foi Aprovado com a maior nota meus parabêns")
elif media < 7:
print("Você foi Reprovado")
else:
print("Você digitou algum numero errado sua media foi {0} os numeros digitados são {1} {2}".format(media,var1,var2))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia três números e mostre o maior deles.
###Code
var1 = input("Digite um numero ")
var2 = input("Digite qualquer valor ")
var3 = input("Digite qualquer valor ")
if (var1 < var2) and (var1 < var3):
print("O valor {0} é o maior numero".format(var1))
elif var2 < var3:
print("O valor {0} é o maior numero".format(var2))
else:
print("O valor {0} é o maior numero".format(var3))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia três números e mostre o maior e o menor deles.
###Code
var1 = input("Digite um numero ")
var2 = input("Digite qualquer valor ")
var3 = input("Digite qualquer valor ")
if (var1 < var2) and (var1 < var3):
print("O valor {0} é o maior numero".format(var1))
if var2 > var3:
print("O valor {0} é o menor numero".format(var3))
else:
print("O valor {0} é o menor numero".format(var2))
elif var2 < var3:
print("O valor {0} é o maior numero".format(var2))
if var1 > var3:
print("O valor {0} é o menor numero".format(var3))
else:
print("O valor {0} é o menor numero".format(var1))
else:
print("O valor {0} é o maior numero".format(var3))
if var1 < var2:
print("O valor {0} é o menor numero".format(var1))
else:
print("O valor {0} é o menor numero".format(var2))
###Output
_____no_output_____
###Markdown
Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, sabendo que a decisão é sempre pelo mais barato.
###Code
var1 = input("Informe o valor do produto ")
var2 = input("Informe o valor de outro produto ")
var3 = input("Informe o valor de outro produto ")
if (var1 < var2) and (var1 < var3):
print("O valor {0} é o mais barato".format(var1))
elif var2 < var3:
print("O valor {0} é o mais barato".format(var2))
else:
print("O valor {0} é o mais barato".format(var3))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia três números e mostre-os em ordem decrescente.
###Code
var1 = input("Digite um numero ")
var2 = input("Digite qualquer valor ")
var3 = input("Digite qualquer valor ")
if (var1 > var2) and (var1 > var3):
print("O valor {0} é o maior".format(var1))
if var2 > var3:
print("O valor {0} é o medio".format(var2))
print("O valor {0} é o menor".format(var3))
else:
print("O valor {0} é o medio".format(var3))
print("O valor {0} é o menor".format(var2))
elif var2 > var3:
print("O valor {0} é o maior".format(var2))
if var3 > var1:
print("O valor {0} é o medio".format(var3))
print("O valor {0} é o menor".format(var1))
else:
print("O valor {0} é o medio".format(var1))
print("O valor {0} é o menor".format(var3))
else:
print("O valor {0} é o maior".format(var3))
if var2 > var1:
print("O valor {0} é o medio".format(var2))
print("O valor {0} é o menor".format(var1))
else:
print("O valor {0} é o medio".format(var1))
print("O valor {0} é o menor".format(var2))
###Output
_____no_output_____
###Markdown
Faça um Programa que pergunte em que turno você estuda. Peça para digitar M-matutino ou V-Vespertino ou N- Noturno. Imprima a mensagem "Bom Dia!", "Boa Tarde!" ou "Boa Noite!" ou "Valor Inválido!", conforme o caso.
###Code
letra = input("Digite o periodo da sua turma M-matutino V-vespertino ou N-noturno ").upper()
if letra =="M":
print("Bom Dia!")
elif letra == "V":
print("Boa Tarde!")
elif letra == "N":
print("Boa Noite")
else :
print("Valor invalido")
###Output
_____no_output_____
###Markdown
As Organizações Tabajara resolveram dar um aumento de salário aos seus colaboradores e lhe contraram para desenvolver o programa que calculará os reajustes.
Faça um programa que recebe o salário de um colaborador e o reajuste segundo o seguinte critério, baseado no salário atual:
* Salários até R$ 280,00 (incluindo) : aumento de 20%;
* Salários entre R$ 280,00 e R$ 700,00 : aumento de 15%;
* Salários entre R$ 700,00 e R$ 1500,00 : aumento de 10%;
* Salários de R$ 1500,00 em diante : aumento de 5% Após
o aumento ser realizado, informe na tela:
* O salário antes do reajuste;
* O percentual de aumento aplicado;
* O valor do aumento;
* O novo salário, após o aumento.
###Code
sal = float(input("Digite seu salario, obs utilize ponto para marca as casas apos a virgula "))
if 280.00 >= sal :
aum = sal * 0.20
print("O seu salario era {0} o percentual aplicado é 20% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum))
elif 280.00 > sal == 700.00 or sal < 700.00:
aum = sal * 0,15
print("O seu salario era {0} o percentual aplicado é 15% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum))
elif 700.00 > sal == 1500.00 or sal < 1500.00:
aum = sal * 0.10
print("O seu salario era {0} o percentual aplicado é 10% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum))
else :
aum = sal * 0.05
print("O seu salario era {0} o percentual aplicado é 5% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum))
###Output
_____no_output_____
###Markdown
Faça um programa para o cálculo de uma folha de pagamento, sabendo que os descontos são do Imposto de Renda, que depende do salário bruto (conforme tabela abaixo) e 3% para o Sindicato e que o FGTS corresponde a 11% do Salário Bruto, mas não é descontado (é a empresa que deposita). O Salário Líquido corresponde ao Salário Bruto menos os descontos. O programa deverá pedir ao usuário o valor da sua hora e a quantidade de horas trabalhadas no mês.
Desconto do IR:
* Salário Bruto até 900 (inclusive) - isento
* Salário Bruto até 1500 (inclusive) - desconto de 5%
* Salário Bruto até 2500 (inclusive) - desconto de 10%
* Salário Bruto acima de 2500 - desconto de 20% Imprima na tela as informações, dispostas conforme o exemplo abaixo. No exemplo o valor da hora é 5 e a quantidade de hora é 220.

###Code
hora_trab = float(input("Digite suas horas trabalhadas no mes em questão "))
valor_hora = float(input("Digite o valor da sua hora trabalhada "))
sal_brut = hora_trab * valor_hora
if sal_brut <= 900:
inss = sal_brut * 0.10
fgts = sal_brut * 0.11
sindicato = sal_brut * 0.03
desc = inss + sindicato
sal_liq = sal_brut - desc
print("Seu salario bruto é {0}".format(sal_brut))
print("Você é isento de imposto de rende pois sua renda é inferior a R$900,00")
print("você pagara ao inss um total de R${0}".format(inss))
print("Sua empresa pagara um valor de R${0} ao inss".format(fgts))
print("Você pagara ao sindicado um valor de {0}")
print("Seus descontos somados são {0}".format(desc))
print("O seu salario liquido é {0}".format(sal_liq))
elif sal_brut <= 1500:
ir = sal_brut * 0.05
inss = sal_brut * 0.10
fgts = sal_brut * 0.11
sindicato = sal_brut * 0.03
desc = ir + inss + sindicato
sal_liq = sal_brut - desc
print("Seu salario bruto é {0}".format(sal_brut))
print("Você pagara de imposto de renda {0}".format(ir))
print("você pagara ao inss um total de R${0}".format(inss))
print("Sua empresa pagara um valor de R${0} ao inss".format(fgts))
print("Você pagara ao sindicado um valor de {0}".format(sindicato))
print("Seus descontos somados são {0}".format(desc))
print("O seu salario liquido é {0}".format(sal_liq))
elif sal_brut <= 2500:
ir = sal_brut * 0.10
inss = sal_brut * 0.10
fgts = sal_brut * 0.11
sindicato = sal_brut * 0.03
desc = ir + inss + sindicato
sal_liq = sal_brut - desc
print("Seu salario bruto é {0}".format(sal_brut))
print("Você pagara de imposto de renda {0}".format(ir))
print("você pagara ao inss um total de R${0}".format(inss))
print("Sua empresa pagara um valor de R${0} ao inss".format(fgts))
print("Você pagara ao sindicado um valor de {0}".format(sindicato))
print("Seus descontos somados são {0}".format(desc))
print("O seu salario liquido é {0}".format(sal_liq))
else:
ir = sal_brut * 0.05
inss = sal_brut * 0.10
fgts = sal_brut * 0.11
sindicato = sal_brut * 0.03
desc = ir + inss + sindicato
sal_liq = sal_brut - desc
print("Seu salario bruto é {0}".format(sal_brut))
print("Você pagara de imposto de renda {0}".format(ir))
print("você pagara ao inss um total de R${0}".format(inss))
print("Sua empresa pagara um valor de R${0} ao inss".format(fgts))
print("Você pagara ao sindicado um valor de {0}".format(sindicato))
print("Seus descontos somados são {0}".format(desc))
print("O seu salario liquido é {0}".format(sal_liq))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia um número e exiba o dia correspondente da semana. (1-Domingo, 2- Segunda, etc.), se digitar outro valor deve aparecer valor inválido.
###Code
dia = int(input("Digite de 1 - 7 para os dias da semana, obs domingo começa 1 e sabado começa com 7 "))
if dia == 1:
print("Bem Vindo ao Domingo")
elif dia == 2:
print("Bem Vindo a Segunda-Feira")
elif dia == 3:
print("Bem Vindo a Terça-Feira")
elif dia == 4:
print("Bem Vindo a Quarta-Feira")
elif dia == 5:
print("Bem Vindo a Quinta-Feira")
elif dia == 6:
print("Bem Vindo a Sexta-Feira")
elif dia == 7:
print("Bem Vindo a Sabado")
else:
print("Você digitou algum numero errado")
###Output
_____no_output_____
###Markdown
Faça um programa que lê as duas notas parciais obtidas por um aluno numa disciplina ao longo de um semestre, e calcule a sua média. A atribuição de conceitos obedece à tabela abaixo:

###Code
p1 = float(input("Digite o valor da sua P1 "))
p2 = float(input("Digite o valor da sua P2 "))
media = (p1 + p2) / 2
if 9.0 <= media <= 10:
print("Sua media foi {0} seu conceito é A você está Aprovado".format(media))
elif 7.5 <= media < 9.0:
print("Sua media foi {0} seu conceito é B você está Aprovado".format(media))
elif 6.0 <= media < 7.5:
print("Sua media foi {0} seu conceito é C você está Aprovado".format(media))
elif 4.0 <= media < 6.0:
print("Sua media foi {0} seu conceito é D você está Reprovado".format(media))
elif 0 <= media < 4.0:
print("Sua media foi {0} seu conceito é E você está Reprovado".format(media))
else:
print("Você digitou algum numero errado")
###Output
_____no_output_____
###Markdown

###Code
var1 = float(input("Digite o valor do lado do triangulo "))
var2 = float(input("Digite o valor do lado do triangulo "))
var3 = float(input("Digite o valor do lado do triangulo "))
if var1 == 0 or var2 == 0 or var3 == 0:
print("Você digitou algo errado pois não se forma um triangulo")
else:
if var1 == var2 == var3:
print("O Triangulo e Equilatero todos os seus lados são iguais")
elif var1 == var2 or var2 == var3 or var1 == var3:
print("O Triangulo e Isósceles tem pelo menos dois lados iguais")
else:
print("Os triangulo e Escaleno nenhum lado e igual")
###Output
_____no_output_____
###Markdown

###Code
a = float(input("Digite o valor do lado do triangulo "))
if a != 0:
b = float(input("Digite o valor do lado do triangulo "))
c = float(input("Digite o valor do lado do triangulo "))
delta = pow(b,2) - 4 * a * c
if delta == 0:
calc = -b / 2 * a
print("O valor de delta é zero possui só uma raiz que é {0}".format(calc))
elif delta > 0:
calc1 = -b + math.sqrt(delta) / 2 * a
calc2 = -b - math.sqrt(delta) / 2 * a
print("O valor de delta é positivo possui duas raizes a positiva é {0} e sua negativa é {1}".format(calc1,calc2))
else:
print("O valor de delta é negativo, impossibilitando o calculo da raiz o valor de delta ficou {0}".format(delta))
else:
print("O valor de A e igual a zero portando não é uma equação do segundo grau")
###Output
_____no_output_____
###Markdown
Faça um Programa que peça um número correspondente a um determinado ano e em seguida informe se este ano é ou não bissexto.
###Code
var = int(input("Digite o ano por favor "))
if var % 400 or var % 4 and var != 0:
print("O ano {0} é bisexto!".format(var))
else:
print("Não é bisexto")
###Output
_____no_output_____
###Markdown
Faça um Programa que peça uma data no formato dd-mm-aaaa e determine se a mesma é uma data válida.
###Code
def calendario(data):
# faz o split e transforma em números
dia, mes, ano = map(int, data.split('/'))
# mês ou ano inválido (só considera do ano 1 em diante), retorna False
if mes < 1 or mes > 12 or ano <= 0:
return False
# verifica qual o último dia do mês
if mes in (1, 3, 5, 7, 8, 10, 12):
ultimo_dia = 31
elif mes == 2:
# verifica se é ano bissexto
if (ano % 4 == 0) and (ano % 100 != 0 or ano % 400 == 0):
ultimo_dia = 29
else:
ultimo_dia = 28
else:
ultimo_dia = 30
# verifica se o dia é válido
if dia < 1 or dia > ultimo_dia:
return False
return print("Data Valida")
data = input("Digite sua data")
calendario(data)
###Output
_____no_output_____
###Markdown

###Code
var = input("Digite um numero, obs menor que 1000 ")
if len(var) != 4:
if len(var) == 3:
cent = var[0]
dez = var[1]
unid = var [2]
print("O valor do seu numero em centena é {0} em dezenas {1} e em unidades é {2}".format(cent,dez,unid))
elif len(var) == 2:
dez = var[0]
unid = var [1]
print("O valor do seu numero em dezenas é {0} e em unidades é {1}".format(dez,unid))
else:
unid = var [0]
print("O valor do seu numero em unidades é {0}".format(unid))
else:
print("Você digitou um numero maior que 1000")
###Output
_____no_output_____
###Markdown

###Code
p1 = float(input("Digite o valor da sua P1 "))
p2 = float(input("Digite o valor da sua P2 "))
media = (p1 + p2) / 2
if media < 10:
if media == 10:
print("Sua media foi {0} você está Aprovado com distinção".format(media))
elif media > 7:
print("Sua media foi {0} você está Aprovado".format(media))
elif media < 7:
print("Sua media foi {0} você está Reprovado".format(media))
else:
print("Você digitou algum numero errado")
###Output
_____no_output_____
###Markdown

###Code
valor = int(input("Digite o quanto deseja sacar "))
if 600 >= valor >= 10:
nota100 = 0
nota50 = 0
nota10 = 0
nota5 = 0
nota1 = 0
while(valor > 0):
if valor >= 100:
nota100 +=1
valor = valor - 100
elif 50 < valor < 100:
nota50 +=1
valor = valor - 50
elif 10 < valor < 50:
nota10 +=1
valor = valor - 10
print("Teste")
elif 5 < valor < 10:
nota5 +=1
valor = valor - 5
elif valor < 5:
nota1 +=1
valor = valor - 1
print("Notas a serem retirada {0} de 100 reais {1} de 50 reais {2} de 10 reais {3} de 5 reaisde de {4} 1 reais".format(nota100,nota50,nota10,nota5,nota1))
else:
print("O valor que deseja sacar e muito alto ou muito baixo")
###Output
_____no_output_____
###Markdown
Faça um Programa que peça um número inteiro e determine se ele é par ou impar. Dica: utilize o operador módulo (resto da divisão).
###Code
def imparPar(a):
calc = a % 2
if calc == 0:
return print("Par")
else:
return print("Impar")
a = int(input("Digite qualquer valor "))
imparPar(a)
###Output
_____no_output_____
###Markdown
Faça um Programa que peça um número e informe se o número é inteiro ou decimal. Dica: utilize uma função de arredondamento.
###Code
a = float(input("Digite qualquer valor "))
if round(a) == a:
print ("Esse numero é inteiro")
else:
print ("Esse numero é um decimal")
###Output
Digite qualquer valor 10.5
Esse numero é um decimal
###Markdown

###Code
valor1 = float(input("Digite um valor "))
valor2 = float(input("Digite um valor "))
operacao = str(input("Digite qual operação você dejesa fazer A-Multiplicação B-Soma C-Subtração D-Divisão ")).upper()
if operacao == "A" or operacao == "*":
calc = valor1 * valor2
elif operacao == "B" or operacao == "+":
calc = valor1 + valor2
elif operacao == "C" or operacao == "-":
calc = valor1 - valor2
elif operacao == "D" or operacao == "/":
if valor2 != 0:
calc = valor1 / valor2
else:
print("não é possivel dividir por zero")
else:
print("você digitou alguma operação invalida")
print(" ")
print("Escolha uma das opções a baixo")
print("A) Par ou Ímpar")
print("B) Postivo ou Negativo")
letra = str(input("C)Inteiro ou decimal ")).upper()
print("O valor da operação é {0}".format(calc))
if letra == "A":
if (calc % 2) == 0:
print("O valor é par")
else:
print("O valor é impar")
elif letra == "B":
if calc > 0:
print("O resultado da operação é positivo")
elif calc = 0:
print("O resultado da operação é neutro")
else:
print("O resultado da operação é negativa")
elif letra == "C":
if round(calc) == calc:
print ("Esse numero é inteiro")
else:
print ("Esse numero é um decimal")
###Output
Digite um valor 10
Digite um valor 10
Digite qual operação você dejesa fazer A-Multiplicação B-Soma C-Subtração D-Divisão -
Escolha uma das opções a baixo
A) Par ou Ímpar
B) Postivo ou Negativo
C)Inteiro ou decimal b
O valor da operação é 0.0
O resultado da operação é negativa
###Markdown

###Code
a = []
print("vamos lhe fazer algumas perguntas digite S para sim e N para não")
a.append(input("Você telefonou para a vítima?").upper())
a.append(input("Você esteve no local do crime?").upper())
a.append(input("Você mora perto da vítima?").upper())
a.append(input("Você devia para a vítima?").upper())
a.append(input("Você já trabalhou com a vítima?").upper())
soma = 0
for i in range(5):
if a[i] == "S":
soma +=1
if soma == 2:
print("Suspeito")
elif soma == 3 or soma == 4:
print("Cúmplice")
elif soma == 5:
print("Assassino")
else:
print("Inocente")
###Output
_____no_output_____
###Markdown

###Code
combust = input(("Digite qual combustível G - Gasolina ou A - Álcool ").upper())
litros = int(input("Quantos litros deu "))
print(combust)
if combust == "a":
if litros <= 20:
saldo = litros * 1.90
desc = saldo - (saldo * 0.03)
else:
saldo = litros * 1.90
desc = saldo - (saldo * 0.05)
print("Você comprou em {0} litros e o valor do álcool e 1.90 seu valor é {1}".format(litros,desc))
elif combust == "g":
if litros <= 20:
saldo = litros * 2.50
desc = saldo - (saldo * 0.04)
else:
saldo = litros * 2.50
desc = saldo - (saldo * 0.06)
print("Você comprou em {0} litros e o valor do Gasolina e 2.50 seu valor é {1}".format(litros,desc))
else:
print("Você digitou algo errado")
###Output
_____no_output_____
###Markdown

###Code
vezes = True
i = 0
valor = 0
alor1 = 0
while vezes == True:
fruta = str(input("Digite a fruta que deseja M para morango A para maça").upper())
kg = int(input("Quantos kilos você quer"))
if fruta == "m":
if kg < 5:
valor = 2.50 * kg
else:
if kg > 8:
desc = 1
valor = 2.20 * kg
else:
if kg < 5:
valor1 = 1.80 * kg
else:
if kg > 8:
desc = 1
valor1 = 1.50 * kg
t = str(input("deseja comprar mais frutas S-Sim e N-Não").upper())
if t == "N" or i == 2:
vezes = False
print(i)
break
i +=1
print(i)
if i != 1:
soma = valor + valor1
soma = soma - (soma * 0.10)
print("Você comprou {0} é teve um desconto de 10%".format(soma))
else:
if valor1 == 0:
valor = valor1
print("Você comprou {0}".format(valor))
###Output
_____no_output_____
###Markdown

###Code
carne = str(input("Qual carne deseja compra lembrando que você só pode comprar uma tipo de carne F-File duplo A-Alcatra P-Picanha ").lower())
kg = int(input("Digite a quantidade de carne "))
cartao = str(input("Você vai pagar no T-Cartao Tabajara ou a D-dinheiro ").lower())
if(cartao == "t"):
if(carne =="f"):
if(kg < 5):
calc = kg * 4.90
desc = calc - (calc * 0.10)
print("Você comprou a carne File Duplo seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc))
else:
calc = kg * 5.80
desc = calc - (calc * 0.10)
print("Você comprou a carne File Duplo seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc))
if(carne =="a"):
if(kg < 5):
calc = kg * 5.90
desc = calc - (calc * 0.10)
print("Você comprou a carne Alcatra seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc))
else:
calc = kg * 6.80
desc = calc - (calc * 0.10)
print("Você comprou a carne Alcatra seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc))
elif(carne =="p"):
if(kg < 5):
calc = kg * 6.90
desc = calc - (calc * 0.10)
print("Você comprou a carne Picanha seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc))
else:
calc = kg * 7.80
desc = calc - (calc * 0.10)
print("Você comprou a carne Picanha seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc))
else:
print("Você digitou um numero errado")
else:
if(carne == "f"):
if(kg < 5):
calc = kg * 4.90
print("Você comprou a carne File Duplo seu total é {0} R$".format(calc))
else:
calc = kg * 5.80
print("Você comprou a carne File Duplo seu total é {0} R$".format(calc))
elif(carne =="a"):
if(kg < 5):
calc = kg * 5.90
print("Você comprou a carne Alcatra seu total é {0} R$".format(calc))
else:
calc = kg * 6.80
desc = calc - (calc * 0.10)
print("Você comprou a carne Alcatra seu total é {0} R$".format(calc))
elif(carne =="p"):
if(kg < 5):
calc = kg * 6.90
print("Você comprou a carne Picanha seu total é {0} R$".format(calc))
else:
calc = kg * 7.80
print("Você comprou a carne Picanha seu total é {0} R$".format(calc))
else:
print("Você digitou um numero errado")
###Output
_____no_output_____ |
ProcTrack-BarPlots.ipynb | ###Markdown
Create Target of Interest
###Code
net.create_target(target_name='Creative Cloud.exe',
new_node=False)
net.assign_attribute('targetIDX')
df = net._return_df()
df
df2 = df[df['EventCls']== 'Registry']
df2
import seaborn as sb; import numpy as np
df2['Dur'] = df2['Dur'].astype(np.float)
sb.boxplot(x='Oper', y='Dur', hue='Cat', data=df2, showfliers=False)
net._unique('Oper')
net._return_df()
net.plot_network(save=True)
## Add the labels to the ndes
labels = dict(zip(GA.nodes(),df['ProcName']))
GA = nx.relabel_nodes(GA,labels)
from Sandy.graphing import graph
#bb = nx.betweenness_centrality(GA)
attr = dict(zip(GA.nodes(),df['Target']))
nx.set_node_attributes(GA, attr,'Time')
## Add the colormapping for attributes
#attr2 = dict(zip(GA.nodes(),df['Size']))
#nx.set_node_attributes(GA, attr2,'Size')
n_colors, c_map, pal = graph.create_color_map(GA,'Time',sb_palette="RdBu_r")
#n_colors2, c_map2, pal = graph.create_color_map(GA,'Size',sb_palette="Greys")
import seaborn as sb
nx.info(GA)
attributes = [GA.node[label]['Time'] for label in GA.nodes()]
attributes_unique = list(set(attributes))
palette = sb.color_palette("RdBu_r", 2).as_hex()
color_map = dict(zip(attributes_unique, palette))
node_colors = [color_map[attribute] for attribute in attributes]
fig = plt.figure()
layout = nx.spring_layout(GA)
nx.draw_networkx_nodes(GA,pos=layout,node_size=20,alpha=0.8,node_color=n
fig = plt.figure()
layout = nx.spring_layout(GA)
nx.draw_networkx_nodes(GA,pos=layout,node_size=20,alpha=1,node_color=node_colors)
nx.draw_networkx_edges(GA,pos=layout,width=0.8,style='dotted',edge_color='red')
#edge_cmap=plt.cm.Blues)
nx.draw_networkx_labels(GA,pos=layout,font_size=4)
plt.show()
fig.savefig('testTarget.png',format='png', dpi=1000, bbox_inches = 'tight')
###Output
_____no_output_____ |
docs/notebooks/examples/1D_simulation(macro_amorphous)/plot_0_protein_GB1.ipynb | ###Markdown
Protein GB1, ¹³C and ¹⁵N (I=1/2)¹³C/¹⁵N (I=1/2) spinning sideband simulation. The following is the spinning sideband simulation of a macromolecule, protein GB1. The$^{13}\text{C}$ and $^{15}\text{N}$ CSA tensor parameters were obtainedfrom Hung `et al.` [f1]_, which consists of 42 $^{13}\text{C}\alpha$,44 $^{13}\text{CO}$, and 44 $^{15}\text{NH}$ tensors. In the followingexample, instead of creating 130 spin systems, we download the spin systems froma remote file and load it directly to the Simulator object.
###Code
import matplotlib.pyplot as plt
from mrsimulator import Simulator
from mrsimulator.methods import BlochDecaySpectrum
from mrsimulator import signal_processing as sp
###Output
_____no_output_____
###Markdown
Create the Simulator object and load the spin systems from an external file.
###Code
sim = Simulator()
file_ = "https://sandbox.zenodo.org/record/687656/files/protein_GB1_15N_13CA_13CO.mrsys"
sim.load_spin_systems(file_) # load the spin systems.
print(f"number of spin systems = {len(sim.spin_systems)}")
all_sites = sim.sites().to_pd()
all_sites.head()
###Output
_____no_output_____
###Markdown
Create a $^{13}\text{C}$ Bloch decay spectrum method.
###Code
method_13C = BlochDecaySpectrum(
channels=["13C"],
magnetic_flux_density=11.74, # in T
rotor_frequency=3000, # in Hz
spectral_dimensions=[
dict(
count=8192,
spectral_width=5e4, # in Hz
reference_offset=2e4, # in Hz
label=r"$^{13}$C resonances",
)
],
)
###Output
_____no_output_____
###Markdown
Since the spin systems contain both $^{13}\text{C}$ and $^{15}\text{N}$sites, let's also create a $^{15}\text{N}$ Bloch decay spectrum method.
###Code
method_15N = BlochDecaySpectrum(
channels=["15N"],
magnetic_flux_density=11.74, # in T
rotor_frequency=3000, # in Hz
spectral_dimensions=[
dict(
count=8192,
spectral_width=4e4, # in Hz
reference_offset=7e3, # in Hz
label=r"$^{15}$N resonances",
)
],
)
###Output
_____no_output_____
###Markdown
Add the methods to the Simulator object and run the simulation
###Code
# Add the methods.
sim.methods = [method_13C, method_15N]
# Run the simulation.
sim.run()
# Get the simulation data from the respective methods.
data_13C = sim.methods[0].simulation # method at index 0 is 13C Bloch decay method.
data_15N = sim.methods[1].simulation # method at index 1 is 15N Bloch decay method.
###Output
_____no_output_____
###Markdown
Add post-simulation signal processing.
###Code
processor = sp.SignalProcessor(
operations=[sp.IFFT(), sp.apodization.Exponential(FWHM="10 Hz"), sp.FFT()]
)
# apply post-simulation processing to data_13C
processed_data_13C = processor.apply_operations(data=data_13C).real
# apply post-simulation processing to data_15N
processed_data_15N = processor.apply_operations(data=data_15N).real
###Output
_____no_output_____
###Markdown
The plot of the simulation after signal processing.
###Code
fig, ax = plt.subplots(
1, 2, subplot_kw={"projection": "csdm"}, sharey=True, figsize=(9, 4)
)
ax[0].plot(processed_data_13C, color="black", linewidth=0.5)
ax[0].invert_xaxis()
ax[1].plot(processed_data_15N, color="black", linewidth=0.5)
ax[1].set_ylabel(None)
ax[1].invert_xaxis()
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Protein GB1, ¹³C and ¹⁵N (I=1/2)¹³C/¹⁵N (I=1/2) spinning sideband simulation. The following is the spinning sideband simulation of a macromolecule, protein GB1. The$^{13}\text{C}$ and $^{15}\text{N}$ CSA tensor parameters were obtainedfrom Hung `et al.` [f1]_, which consists of 42 $^{13}\text{C}\alpha$,44 $^{13}\text{CO}$, and 44 $^{15}\text{NH}$ tensors. In the followingexample, instead of creating 130 spin systems, we download the spin systems froma remote file and load it directly to the Simulator object.
###Code
import matplotlib.pyplot as plt
from mrsimulator import Simulator
from mrsimulator.methods import BlochDecaySpectrum
from mrsimulator import signal_processing as sp
###Output
_____no_output_____
###Markdown
Create the Simulator object and load the spin systems from an external file.
###Code
sim = Simulator()
file_ = "https://sandbox.zenodo.org/record/687656/files/protein_GB1_15N_13CA_13CO.mrsys"
sim.load_spin_systems(file_) # load the spin systems.
print(f"number of spin systems = {len(sim.spin_systems)}")
all_sites = sim.sites().to_pd()
all_sites.head()
###Output
_____no_output_____
###Markdown
Create a $^{13}\text{C}$ Bloch decay spectrum method.
###Code
method_13C = BlochDecaySpectrum(
channels=["13C"],
magnetic_flux_density=11.74, # in T
rotor_frequency=3000, # in Hz
spectral_dimensions=[
{
"count": 8192,
"spectral_width": 5e4, # in Hz
"reference_offset": 2e4, # in Hz
"label": r"$^{13}$C resonances",
}
],
)
###Output
_____no_output_____
###Markdown
Since the spin systems contain both $^{13}\text{C}$ and $^{15}\text{N}$sites, let's also create a $^{15}\text{N}$ Bloch decay spectrum method.
###Code
method_15N = BlochDecaySpectrum(
channels=["15N"],
magnetic_flux_density=11.74, # in T
rotor_frequency=3000, # in Hz
spectral_dimensions=[
{
"count": 8192,
"spectral_width": 4e4, # in Hz
"reference_offset": 7e3, # in Hz
"label": r"$^{15}$N resonances",
}
],
)
###Output
_____no_output_____
###Markdown
Add the methods to the Simulator object and run the simulation
###Code
# Add the methods.
sim.methods = [method_13C, method_15N]
# Run the simulation.
sim.run()
# Get the simulation data from the respective methods.
data_13C = sim.methods[0].simulation # method at index 0 is 13C Bloch decay method.
data_15N = sim.methods[1].simulation # method at index 1 is 15N Bloch decay method.
###Output
_____no_output_____
###Markdown
Add post-simulation signal processing.
###Code
processor = sp.SignalProcessor(
operations=[sp.IFFT(), sp.apodization.Exponential(FWHM="10 Hz"), sp.FFT()]
)
# apply post-simulation processing to data_13C
processed_data_13C = processor.apply_operations(data=data_13C).real
# apply post-simulation processing to data_15N
processed_data_15N = processor.apply_operations(data=data_15N).real
###Output
_____no_output_____
###Markdown
The plot of the simulation after signal processing.
###Code
fig, ax = plt.subplots(
1, 2, subplot_kw={"projection": "csdm"}, sharey=True, figsize=(9, 4)
)
ax[0].plot(processed_data_13C, color="black", linewidth=0.5)
ax[0].invert_xaxis()
ax[1].plot(processed_data_15N, color="black", linewidth=0.5)
ax[1].set_ylabel(None)
ax[1].invert_xaxis()
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/oligopoly/readingCsvOutput_par_corr_BWter.ipynb | ###Markdown
select rows (time steps) in the databaseactivating the cell below before running the whole program [a:b] => from a+1 to b [:b] => fron init to b [a:] => fron a+1 to end
###Code
#ts_df =ts_df [0:45]
#str_df=str_df[0:45]
###Output
_____no_output_____
###Markdown
***Parameters***
###Code
par_df.astype(str,errors='ignore')
###Output
_____no_output_____
###Markdown
***Modified parameters***
###Code
modPars_df.astype(str,errors='ignore')
###Output
_____no_output_____
###Markdown
***Time series, data collected at the end of each period***
###Code
if len(ts_df.columns) == 6:
ts_df.columns = \
['unempl.','totalProfit','totalProd.','plannedP.','price','wage']
# to have shorter names
if len(ts_df.columns) == 8:
ts_df.columns = \
['unempl.','totalProfit','totalProd.','plannedP.', 'cQ','hPSd','price','wage']
# to have shorter names
ts_df
ts_df.describe()
ts_df.corr(method="pearson").style.format("{:.2}")
###Output
_____no_output_____
###Markdown
The origin of the partial_corr source is [https://gist.github.com/fabianp/9396204419c7b638d38f](https://gist.github.com/fabianp/9396204419c7b638d38f)At [http://en.wikipedia.org/wiki/Partial_correlationUsing_linear_regression](http://en.wikipedia.org/wiki/Partial_correlationUsing_linear_regression) we have the explanation of the need of augmenting the data matrix with a 1 to allow for a constant term in the regression.
###Code
wn.filterwarnings(action="ignore") # to eliminate a warning about
#LAPACK lib
np.set_printoptions(precision=2,suppress=True)
ts=ts_df.values
ts_int = np.hstack((np.ones((ts.shape[0],1)), ts))
out1=partial_corr.partial_corr(ts_int)[1:, 1:]
out1
ts=ts_df.drop(columns="plannedP.").values
ts_int = np.hstack((np.ones((ts.shape[0],1)), ts))
out2=partial_corr.partial_corr(ts_int)[1:, 1:]
out2
ts=ts_df.drop(columns="totalProd.").values
ts_int = np.hstack((np.ones((ts.shape[0],1)), ts))
out3=partial_corr.partial_corr(ts_int)[1:, 1:]
out3
ts2_df=ts_df
if len(ts_df.columns) == 6:
ts2_df.columns = \
['unempl','totalProfit','totalProd','plannedP','price','wage']
if len(ts_df.columns) == 8:
ts2_df.columns = \
['unempl','totalProfit','totalProd','plannedP','cQ','hPSd','price','wage']
result = sm.ols(formula="totalProfit ~ price + wage + totalProd + unempl", \
data=ts2_df).fit()
print (result.summary())
###Output
_____no_output_____
###Markdown
***Structural infos, data collected at the beginning of each period***
###Code
str_df
###Output
_____no_output_____
###Markdown
levels of grayhttps://en.wikipedia.org/wiki/Shades_of_gray
###Code
myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="*",
color=["OrangeRed","LawnGreen","Blue","Violet","lightblue","Pink","Gray","Brown"])
myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ')
myPlot.right_ax.set_ylabel('hPriceSd, price, wage')
myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5)
myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5)
myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="",
color=["lightgray","Black","Black","Black","Gray","lightgray","lightgray","lightgray"],
style=['-', '--', '-.', ':','-', '--', '-.'],
linewidth=1.)
myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ')
myPlot.right_ax.set_ylabel('hPriceSd, price, wage')
myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5)
myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5)
myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="",
color=["silver","Black","Black","Black","Gray","slategray","slategray","slategray"],
style=['-', '--', '-.', ':','-', '--', '-.'],
linewidth=2.)
myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ')
myPlot.right_ax.set_ylabel('hPriceSd, price, wage')
myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5)
myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5)
str_df.plot(figsize=(11,8),secondary_y='workers',marker="*",color=["r","b"])
str_df.plot(figsize=(11,8),secondary_y='workers',marker="*",color=["black",
"lightgrey"])
str_df.plot(figsize=(11,8),linewidth=2.0,secondary_y='workers',marker="*",color=["black",
"gray"])
###Output
_____no_output_____
###Markdown
Best solutions to produce a LaTeX table from these data (the example is related to ts_df.corr table): corr=ts_df.corr(method='pearson') print corr.to_latex() "print" to have the output nicely formatted; copy and paste it to LaTeX and the result works. To output is included within: \begin{table}[htbp] ... output above ... \label{a label} \caption{a caption} \end{table}We add also size specifications (\footnotesize in this case) and the usual [htbp] specification with \begin{table}[htbp]Other solutions:1. online [http://www.tablesgenerator.com](http://www.tablesgenerator.com), reading the csv file;2. using a converter as [http://html2latex.sourceforge.net](http://html2latex.sourceforge.net). The first method is applied in the cells below.
###Code
corr=ts_df.corr(method='pearson')
def ff(x):
return '%1.2f' % x
if len(ts_df.columns) == 6:
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 8:
print ("\\begin{table}[!htbp]\n{\\tiny \center")
print (corr.to_latex(formatters=[ff,ff,ff,ff,ff,ff,ff,ff]))
print("}\n\\caption{Correlations among the time series of the model,"+\
" with xxx}")
print("\\label{correlations xxx}\n\\end{table}")
ou=out1
if len(ts_df.columns) == 6:
names=['unempl.','totalProfit','totalProd.','plannedP.','price','wage']
if len(ts_df.columns) == 8:
names=['unempl.','totalProfit','totalProd.','plannedP.','cQ','hPSd','price','wage']
if len(ts_df.columns) == 6:
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 8:
print ("\\begin{table}[!htbp]\n{\\tiny \center")
if len(ts_df.columns) == 6:
print ("\\begin{tabular}{lrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & plannedP. & price & wage \\\\"+\
"\n\\midrule")
if len(ts_df.columns) == 8:
print ("\\begin{tabular}{lrrrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & plannedP. & cQ & hPSd & price & wage \\\\"+\
"\n\\midrule")
for i in range(len(ou)):
print(names[i], end="")
for j in range(len(ou[i])):
print(" & %.2f" % ou[i,j], end="")
print(" \\\\")
print("\\bottomrule\n\\end{tabular}")
print("}\n\\caption{Partial correlations among the time series of the model,"+\
" with xxx}")
print("\\label{partial correlations xxx}\n\\end{table}")
ou=out2
if len(ts_df.columns) == 6:
names=['unempl.','totalProfit','totalProd.','price','wage']
if len(ts_df.columns) == 8:
names=['unempl.','totalProfit','totalProd.','cQ','hPSd','price','wage']
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 6:
print ("\\begin{tabular}{lrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & price & wage \\\\"+\
"\n\\midrule")
if len(ts_df.columns) == 8:
print ("\\begin{tabular}{lrrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & cQ & hPSd & price & wage \\\\"+\
"\n\\midrule")
for i in range(len(ou)):
print(names[i], end="")
for j in range(len(ou[i])):
print(" & %.2f" % ou[i,j], end="")
print(" \\\\")
print("\\bottomrule\n\\end{tabular}")
print("}\n\\caption{Partial correlations (no plannedProduction) among the time series of the model,"+\
" with xxx}")
print("\\label{partial correlations (no plannedP.) xxx}\n\\end{table}")
ou=out3
if len(ts_df.columns) == 6:
names=['unempl.','totalProfit','plannedP.','price','wage']
if len(ts_df.columns) == 8:
names=['unempl.','totalProfit','plannedP.','cQ','hPSd','price','wage']
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 6:
print ("\\begin{tabular}{lrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & plannedP. & price & wage \\\\"+\
"\n\\midrule")
if len(ts_df.columns) == 8:
print ("\\begin{tabular}{lrrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & plannedP. & cQ & hPSd & price & wage \\\\"+\
"\n\\midrule")
for i in range(len(ou)):
print(names[i], end="")
for j in range(len(ou[i])):
print(" & %.2f" % ou[i,j], end="")
print(" \\\\")
print("\\bottomrule\n\\end{tabular}")
print("}\n\\caption{Partial correlations (no totalProduction) among the time series of the model,"+\
" with xxx}")
print("\\label{partial correlations (no totalProd.) xxx}\n\\end{table}")
###Output
_____no_output_____
###Markdown
*Data from each firm in each period*
###Code
if firms: print(firms_df.describe())
else: print('no data for each firm in each period')
###Output
_____no_output_____
###Markdown
*Managing parameter list*
###Code
ctitle=""
if len(par_df.columns)==2: ctitle=par_df.columns[0]
if len(par_df.columns)==3: ctitle=par_df.columns[1]
if len(ts_df.columns) == 6:
parList=par_df[ctitle].tolist()
valList=par_df["Values"].tolist()
if len(ts_df.columns) == 8:
parList=par_df["Parameter internal names"].tolist()
valList=par_df["Values"].tolist()
# both parList are generated by the 'print' of parameters.py
###Output
_____no_output_____
###Markdown
**dictionay of values*****d_val***it comes from the file \*_par.csv coming from the 'print' of parameters.py**NB** the different versions of the model have different parameters output sequences; the main difference is about the 6 time series case and the 8 time series case in file \*_ts.csv, emerging above\[zip() function take iterables (can be zero or more), makes iterator that aggregates elements based on the iterables passed, and returns an iterator of tuples. zip() function take iterables (can be zero or more), makes iterator that aggregates elements based on the iterables passed, and returns an iterator of tuples\]
###Code
d_val=dict(zip(parList,valList))
d_val
###Output
_____no_output_____
###Markdown
**dictionay of position*****d_pos***the dict of positions (file parPos.csv) comes from a manual work based on the table of parameter definition of appendix B of the book; the goal is that of retrieving the parameters of a specific experiment in dict d_val and assign their values to the correct position in the rows of the table of the values in the different experiments in the parameter value table of Appendix Bthe vector (row) is pre-filled with '-' signs as values not existent in the specific experimentthe case of the par 'checkResConsUnsoldProd' is handled in a special way: the parameter 'checkResConsUnsoldProd' (not affecting the model, but only working on its output) appears 20180829 in 28ter experiment; in the first commit, of 20180830, the name is checkResCons, but quite immediately became checkResConsUnsoldProd; the commit of 20181013 signals the we have the output from parameters.py (the experiment 80 is of 20181009, so without that output); all the experiments from 28ter to 80 have implicitly 'checkResConsUnsoldProd' set to True'w' case is corrected to 'Q'to check for the consistence of the dictionaries, we list unfound parameters in ***d_val*** when searching for values (the master dict is ***d_pos***)
###Code
labelsPositions_df= pd.read_csv('labelsPositions.csv')
#labelsPositions_df
parList2=labelsPositions_df["name"].tolist()
posList=labelsPositions_df["position"].tolist()
d_pos=dict(zip(parList2,posList))
#d_pos
row=['-']*53 # 52 parameters, pos. 0 is used for unuseful values
row[44]='51' # as default value for the par 'startHayekianMarket' for old
# SMAC versions where it was not defined
for _ in range(len(parList)):
if parList[_]=='w': row[d_pos['Q']]=d_val[parList[_]]
if parList[_] in d_pos: row[d_pos[parList[_]]]=d_val[parList[_]]
else: print('not found:',parList[_])
###Output
_____no_output_____
###Markdown
the parameter checkResConsUnsoldProd (not affecting the model, but only working on its output) appears 20180829 in 28ter experiment; in the first commit, of 20180830, the name is checkResCons, but quite immediately became checkResConsUnsoldProd; the commit of 20181013 signals the we have the output from parameters.py (the experiment 80 is of 20181009, so without that output); all the experiments from 28ter to 80 have internally checkResConsUnsoldProd set to Trueso from >= 20180829 to <= 20181009 the val of checkResConsUnsoldProd is True1535414400 is equivalent to 08/28/2018 @ 12:00am (UTC)1539129600 is equivalent to 10/10/2018 @ 12:00am (UTC)
###Code
import platform
def creation_date(path_to_file):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else: #MacOs
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
#converter https://www.unixtimestamp.com
fileTime=creation_date("./"+nameFilePar)
if fileTime >= 1535414400 and fileTime <= 1539129600:
row[8]='True'
#row
#for i in range(1,len(row)-1):
# print(row[i],"& ",end='')
#print(row[-1])
for i in range(1,26):
print(row[i],"& ",end='')
print(row[26])
for i in range(27,len(row)-1):
print(row[i],"& ",end='')
if '[' in row[-1]: row[-1]=row[-1][1:5] # [1:5] is to avoid the [ ] output
print(row[-1])
###Output
_____no_output_____ |
phase3/3.1/3_model_training.ipynb | ###Markdown
cross validation
###Code
days = range(1,32)
month = 1
for day in days:
filename = trips_cluster_data_path + f'/trips_2019_{month}_{day}.csv'
print(filename)
dfs = []
day_len = []
day_trip_count = []
days = range(1,32)
# days = range(1,2)
months = range(1,4)
# months = range(1,2)
for month in months:
for day in days:
try:
sp_seq_df = pd.read_csv(trips_cluster_data_path + f'/lebeled/trips_2019_{month}_{day}.csv', parse_dates=['time_stamp'])
except:
pass
sp_seq_df = sp_seq_df[sp_seq_df['cluster']>-1].sort_values(by=['trip_id', 'time_stamp'])
trip_count = sp_seq_df.trip_id.value_counts()
trip_count = trip_count[trip_count >= 12]
sp_seq_df = sp_seq_df[sp_seq_df.trip_id.isin(trip_count.index)]
sp_seq_df['month'] = month
sp_seq_df['day'] = day
day_len.append(len(sp_seq_df))
day_trip_count.append(len(trip_count))
dfs.append(sp_seq_df)
print(day_len)
print(day_trip_count)
df = pd.concat(dfs, ignore_index=True)
df
df = df[['trip_id', 'sp', 'day', 'month','route_cluster']]
df.head()
###Output
_____no_output_____
###Markdown
get rid of mini cluster
###Code
import collections
import pickle
pkl_filename = train_path+"/QBcluster_q1_treshold12.pkl"
with open(pkl_filename, 'rb') as file:
clusters = pickle.load(file)
print("Nb. clusters:", len(clusters))
print("Cluster sizes:", map(len, clusters))
print("Small clusters:", collections.Counter(clusters < 2)[True], end=' /')
print("Small clusters:", clusters < 2)
print("Streamlines indices of the first cluster:\n", clusters[0].indices)
# print("Centroid of the last cluster:\n", clusters[-1].centroid)
is_mini_cluster = (clusters == 1)
mini_cluster = []
for ci in range(0, len(is_mini_cluster)):
if is_mini_cluster[ci]:
mini_cluster.append(ci)
print(len(mini_cluster))
df.loc[df.route_cluster.isin(mini_cluster), 'route_cluster'] = -1
df.loc[df.route_cluster == -1].sp.value_counts()
###Output
_____no_output_____
###Markdown
X and Label shuffle
###Code
state_names = []
uniq_r_cluster = df.route_cluster.unique()
for cluster in uniq_r_cluster:
state_names.append(str(cluster))
state_names[:5]
X = []
labels = []
for month in months:
for day in days:
day_df = df[(df.day == day) & (df.month == month)]
uniq_trips = day_df.trip_id.unique()
for trip_id in uniq_trips:
trip = day_df[day_df.trip_id==trip_id]
X.append(trip.sp.to_numpy().astype(str))
# labels.append(['None-start']+[str(l) for l in trip.route_cluster.to_list()])
labels.append([str(l) for l in trip.route_cluster.to_list()])
print(X[0][:5])
print(labels[0][:5])
print(len(X))
print(len(labels))
import random
random.seed(42)
X2 = X.copy()
labels2 = labels.copy()
temp = list(zip(X2, labels2))
random.shuffle(temp)
X2, labels2 = zip(*temp)
X2 = list(X2)
labels2 = list(labels2)
print(X[0][:5])
print(X2[0][:5])
print(labels[0][:5])
print(labels2[0][:5])
###Output
_____no_output_____
###Markdown
train and test 10-fold cross validata
###Code
from collections import Counter
import pickle
# pkl_filename = data_path+"/osm_json/dbscan.pkl"
# with open(pkl_filename, 'rb') as file:
# db = pickle.load(file)
# sp_list = db.labels_
# sp_list = set(map(str, sp_list))
def score(model, seq, label):
p = []
prob,path = model.viterbi(seq)
for s in path[1:]:
p.append(s[1].name)
# c = Counter(p)
# tp = 0
# if c[label]:
# tp = c[label]
# total = sum(c.values())
tp = 0
for _state_name, _label in zip(p, label):
if _state_name == _label:
tp+=1
total = len(label)
# print("best prob : {}% ".format(np.exp(prob)*100))
# print("beat path : {}".format(p))
# print("labels : {}".format(label))
# print("sequen : {}".format(seq))
return tp/total
from pomegranate import *
total = len(X2)
fold = int(total/10)
start = 0
starts = [] # [0, 952, 1904, 2856, 3808, 4760, 5712, 6664, 7616, 8568]
for i in range(10):
starts.append(start)
start+=fold
accuracy_train = []
accuracy_test = []
for start in starts: # 10
X_train = X2[:start]+X2[start+fold:]
X_test = X2[start:start+fold]
labels_train = labels2[:start]+labels2[start+fold:]
labels_test = labels2[start:start+fold]
# บรรทัดนี้ปลอดภัยไว้ก่อน
uniq_sp = set()
for seq in X_train:
for sp in seq:
uniq_sp.add(sp)
uniq_label = set()
# for label in labels_train:
for label in labels_train:
for uniq in list(set(label)):
uniq_label.add(uniq)
new_label = []
for label2 in labels_train:
new_label.append(['None-start']+label2)
labels_train = new_label
unseen_sp = sp_list - uniq_sp
# print(unseen_sp)
X_train.append(np.array(list(unseen_sp)))
labels_train.append(['None-start'] + ['-1'] * len(unseen_sp))
model = HiddenMarkovModel.from_samples(
DiscreteDistribution,
n_components=len(uniq_label),
X=X_train,
labels=labels_train,
algorithm='labeled',
state_names=list(uniq_label),
inertia=0.001,
max_iterations=10,
n_jobs=-1
)
accuracy_train_percent = []
for percent in [0.25, 0.5, 0.75, 0.9]:
accu = 0
for seq, label in zip(X_train, labels_train):
sc = score(model, np.array(seq[:int(len(seq)*percent)]), label[1:])
if sc > 0:
# accu += sc
accu += 1
train_score = accu/len(X_train)
accuracy_train_percent.append(train_score)
accuracy_train.append(accuracy_train_percent)
accuracy_test_percent = []
for percent in [0.25, 0.5, 0.75, 0.9]:
accu = 0
for seq, label in zip(X_test, labels_test):
sc = score(model, np.array(seq[:int(len(seq)*percent)]), label[1:])
if sc > 0:
# accu += sc
accu += 1
test_score = accu/len(X_test)
accuracy_test_percent.append(test_score)
accuracy_test.append(accuracy_test_percent)
print(f'Score {start} - {start+fold}')
print(f'Train score')
print(f'\t25% trip traverse : {accuracy_train_percent[0]}')
print(f'\t50% trip traverse : {accuracy_train_percent[1]}')
print(f'\t75% trip traverse : {accuracy_train_percent[2]}')
print(f'\t90% trip traverse : {accuracy_train_percent[3]}')
print(f'Test score')
print(f'\t25% trip traverse : {accuracy_test_percent[0]}')
print(f'\t50% trip traverse : {accuracy_test_percent[1]}')
print(f'\t75% trip traverse : {accuracy_test_percent[2]}')
print(f'\t90% trip traverse : {accuracy_test_percent[3]}')
print('all score')
print('train score: ')
for i in range(4):
sum = 0
for fold in accuracy_train:
sum += fold[i]
print(f'\ttrip traverse {i} : {sum/10}')
print('test score: ')
for i in range(4):
sum = 0
for fold in accuracy_test:
sum += fold[i]
print(f'\ttrip traverse {i} : {sum/10}')
###Output
_____no_output_____ |
notebook/pretraining_model.ipynb | ###Markdown
Model config
###Code
optimizer_config = {
"SGD": {
"learning_rate": 1e-1,
"end_learning_rate": 1e-3
},
"Adam": {
"learning_rate": 1e-3,
"end_learning_rate": 1e-5,
"weight_decay": 0.01,
"epsilon": 1e-8
},
"RAdam": {
"learning_rate": 1e-3,
"end_learning_rate": 2e-5,
"weight_decay": 0.01,
"epsilon": 1e-8
}
}
model_config = {
"epochs": 3,
"num_train_epochs": 3,
"per_gpu_train_batch_size": 16,
"per_gpu_eval_batch_size": 32,
"batch_size": 128,
"max_tokens_length": 512,
"threshold": 0.5,
"optimizer_method": "Adam",
"learning_rate": optimizer_config['Adam']['learning_rate'],
"end_learning_rate": optimizer_config['Adam']['end_learning_rate'],
"weight_decay": optimizer_config['Adam']['weight_decay'],
"epsilon": optimizer_config['Adam']['epsilon'],
"lsm": 0.0,
"hidden_dropout_prob": 0.1,
"max_grad_norm": 1,
"use_warmup": True,
"n_gpu": torch.cuda.device_count(),
"gradient_accumulation_steps": 8,
"output_dir": str(pretrain_model_path / run_id),
"max_steps": 125000,
"logging_steps": 100,
"save_steps": 25000,
"evaluate_during_training": False,
"save_total_limit": 3,
"seed": 9527,
}
###Output
_____no_output_____
###Markdown
Data loader pipeline
###Code
#torch.multiprocessing.set_start_method('spawn')
#datasets.config.IN_MEMORY_MAX_SIZE
@dataclass(eq=False)
class GenerateDatasets:
#files_list: str = field(
# default=None, metadata={"help": "The files list of data path"}
#)
data_path: str = field(
default=None, metadata={"help": "The prefix path of files location"}
)
regex_file_format: str = field(
default='*.parquet', metadata={"help": "The files format."}
)
batch_size: int = field(
default=128, metadata={"help": "Batch size"}
)
is_training: bool = field(
default=True, metadata={"help": "Is use training mode to create data pipeline"}
)
device: str = field(
default='cpu', metadata={"help": "Which device to use [cpu, cuda]"}
)
cache_data_path: str = field(
default=None, metadata={"help": "The path to cache data."}
)
use_streaming_mode: bool = field(
default=False, metadata={"help": "Use streaming mode to download data."}
)
def __post_init__(self):
self.get_files_list = glob.glob(os.path.join(str(self.data_path), self.regex_file_format))
#self.get_files_list = '/home/jupyter/gogolook/data/jp_data/valid_pretraining_data/valid_all-maxseq512_BG.parquet'
self.encoding_columns = ['input_ids', 'token_type_ids', 'attention_mask']
self.target_columns = ['masked_lm_labels', 'next_sentence_labels']
def __call__(self, **kwargs):
# data 已經存在 device (cuda) 裡,所以再用 pin_memory 會出現 error
# RuntimeError: cannot pin 'torch.cuda.LongTensor' only dense CPU tensors can be pinned
dataset = load_dataset('parquet', data_files=self.get_files_list, cache_dir=self.cache_data_path, split='train')
dataset.set_format(type='torch', columns=self.encoding_columns + self.target_columns) # , device=self.device
#dataset = dataset.rename_column(self.target_column, 'labels')
if self.is_training:
drop_last = True
else:
drop_last = False
#dataloader = torch.utils.data.DataLoader(
# dataset,
# batch_size=self.batch_size,
# pin_memory=True,
# shuffle=True,
# drop_last=drop_last,
# num_workers=multiprocessing.cpu_count())
return dataset # dataloader
get_train_dataset = GenerateDatasets(
data_path=training_data_path,
batch_size=model_config['batch_size'],
is_training=True,
device=device,
cache_data_path=cache_data_path)
get_valid_dataset = GenerateDatasets(
data_path=training_data_path,
batch_size=model_config['batch_size'],
is_training=False,
device=device,
cache_data_path=cache_data_path)
train_dataset = get_train_dataset()
valid_dataset = get_valid_dataset()
###Output
WARNING:datasets.builder:Using custom data configuration default-597f3863485f3654
WARNING:datasets.builder:Reusing dataset parquet (/home/jupyter/gogolook/data/cache_data_dir/parquet/default-597f3863485f3654/0.0.0/03dda9603b6ba3760d9d286684a3d7d8ec00448c154f765795485acd3229ecba)
WARNING:datasets.builder:Using custom data configuration default-597f3863485f3654
WARNING:datasets.builder:Reusing dataset parquet (/home/jupyter/gogolook/data/cache_data_dir/parquet/default-597f3863485f3654/0.0.0/03dda9603b6ba3760d9d286684a3d7d8ec00448c154f765795485acd3229ecba)
###Markdown
Testing load from gcs
###Code
#import gcsfs
#from datasets import load_from_disk
#gcs = gcsfs.GCSFileSystem(project='data-research-216307')
#gcs_files_list = gcs.glob('gs://gogolook-ml-data-production/serve-dev/sms/data/experimental_jp_data/train_pretraining_data/*.parquet')
#gcs_files_list = [ "gs://" + path for path in gcs_files_list]
#dataset = load_from_disk(dataset_path="gs://gogolook-ml-data-production/serve-dev/sms/data/experimental_jp_data/train_pretraining_data/", fs=gcs)
# saves encoded_dataset to your s3 bucket
#train_dataset.save_to_disk('gcs://gogolook-ml-data-production/serve-dev/sms/data/experimental_jp_data/preprocessing_dataset', fs=gcs)
#train_dataset.save_to_disk('/home/jupyter/gogolook/data/jp_data/preprocessing_dataset/')
###Output
_____no_output_____
###Markdown
Streaming test
###Code
'''
get_files_list = glob.glob(os.path.join(str(experiment_train_data_path), "*.parquet"))
dataset = load_dataset('parquet', data_files=get_files_list[0], cache_dir=cache_data_path, split='train', streaming=True)
map_dataset = dataset.map(lambda example: (example["input_ids"], example["token_type_ids"], example["attention_mask"]), batched=True, batch_size=64)
shuffled_dataset = map_dataset.shuffle(buffer_size=100, seed=seed)
torch_dataset = shuffled_dataset.with_format("torch")
assert isinstance(torch_dataset, torch.utils.data.IterableDataset)
#sampler = torch.utils.data.Sampler(torch_dataset)
#batch_sampler = torch.utils.data.BatchSampler(sampler, 64, False)
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
split_size = len(dataset.data) // worker_info.num_workers
dataset.data = dataset.data[worker_id * split_size:(worker_id + 1) * split_size]
def worker_init_fn(worker_id):
... worker_info = torch.utils.data.get_worker_info()
... dataset = worker_info.dataset # the dataset copy in this worker process
... overall_start = dataset.start
... overall_end = dataset.end
... # configure the dataset to only process the split workload
... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
... worker_id = worker_info.id
... dataset.start = overall_start + worker_id * per_worker
... dataset.end = min(dataset.start + per_worker, overall_end)
dataloader = torch.utils.data.DataLoader(
torch_dataset,
batch_size=128,
pin_memory=True,
drop_last=False,
num_workers=multiprocessing.cpu_count())
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
split_size = 64 // worker_info.num_workers
dataset.data = dataset.data[worker_id * split_size:(worker_id + 1) * split_size]
dataloader = torch.utils.data.DataLoader(torch_dataset, batch_size=64, worker_init_fn=worker_init_fn, num_workers=multiprocessing.cpu_count())
'''
#get_train_dataset.get_files_list
#get_valid_dataset.get_files_list
#model_config['training_steps'] = len(train_dataloader) * model_config['epochs']
#if model_config['use_warmup']:
# model_config['warmup_steps'] = int(len(train_dataloader) * model_config['epochs'] * 0.1)
# model_config['decay_steps'] = len(train_dataloader) * model_config['epochs']
#else:
# model_config['warmup_steps'] = None
# model_config['decay_steps'] = None
torch.cuda.empty_cache()
albert_config = AlbertConfig.from_json_file(albert_zh_path / 'albert_config' / 'albert_config_tiny.json')
pretrained_model_name_or_path = 'voidful/albert_chinese_tiny'
albert_pretrain_model = AlbertForPreTraining.from_pretrained(
pretrained_model_name_or_path,
config=albert_config,
cache_dir=cache_models_path)
albert_pretrain_model.resize_token_embeddings(corpus_size)
albert_config
if model_config["n_gpu"] > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
#device_ids = [idx for idx in range(torch.cuda.device_count())]
#albert_pretrain_model = nn.DataParallel(albert_pretrain_model, device_ids=device_ids)
###Output
Let's use 4 GPUs!
###Markdown
Define Optimizer
###Code
model_params = list(albert_pretrain_model.named_parameters())
no_decay = ["bias", "gamma", "beta", "LayerNorm.weight"]
optimizer_grounded_parameters_by_name = [
{'params': [n for n, p in model_params if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0 },
{'params': [n for n, p in model_params if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0 }
]
optimizer_grounded_parameters_by_name
from torch.optim.lr_scheduler import _LRScheduler
class PolynomialDecay(_LRScheduler):
def __init__(self, optimizer, decay_steps, end_learning_rate=0.0001, power=0.5, cycle=False, last_epoch=-1, verbose=False):
if decay_steps <= 1.:
raise ValueError('max_decay_steps should be greater than 1.')
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
super(PolynomialDecay, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.")
#dtype = initial_learning_rate.dtype
#end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
#power = math_ops.cast(self.power, dtype)
#global_step_recomp = math_ops.cast(step, dtype)
#decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = self.last_epoch
decay_steps_recomp = self.decay_steps
if self.cycle:
if global_step_recomp == 0:
multiplier = 1.0
else:
multiplier = math.ceil(global_step_recomp / self.decay_steps)
decay_steps_recomp = decay_steps_recomp * multiplier
else:
global_step_recomp = min(global_step_recomp, decay_steps_recomp)
p = global_step_recomp / decay_steps_recomp
ic(self.last_epoch, optimizer.param_groups[0]['lr'], p)
return [((group['lr'] - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate) for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
return [(base_lr - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate for base_lr in self.base_lrs]
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
)
def get_optimizer(config: dict, model: PreTrainedModel, num_training_steps: int):
model_params = list(model.named_parameters())
no_decay = ["bias", "gamma", "beta", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{'params': [p for n, p in model_params if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 1e-2 },
{'params': [p for n, p in model_params if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0 }
]
optimizer = AdamW(optimizer_grouped_parameters, lr=config["learning_rate"], eps=config["epsilon"])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=int(num_training_steps * 0.1), num_training_steps=num_training_steps
)
return optimizer, scheduler
#optimizer = torch.optim.Adam(
# params=optimizer_grounded_parameters,
# lr=model_config["learning_rate"],
# betas=(0.9, 0.98),
# weight_decay=config["weight_decay"],
# eps=config["adam_epsilon"])
#scheduler = LinearWarmupCosineAnnealingLR(
# optimizer,
# warmup_epochs=model_config['warmup_steps'],
# max_epochs=model_config['training_steps'],
# eta_min=model_config["end_learning_rate"])
#optimizer = optim.SGD(sms_model.parameters(), lr=model_config['init_learning_rate'], weight_decay=1e-4)
#optimizer = optim.SGD(filter(lambda p: p.requires_grad, sms_model.parameters()), lr=model_config['init_learning_rate'], weight_decay=1e-4)
#scheduler = CyclicLR(
# optimizer,
# base_lr=1e-5,
# max_lr=model_config['init_learning_rate'],
# step_size_up=model_config['training_steps'] * 1,
# mode='triangular2',
# scale_mode='cycle',
# cycle_momentum=False
#)
#if model_config["use_multi_gpus"]:
#optimizer = nn.DataParallel(optimizer, device_ids=device_ids)
# Define Mertice
from torchmetrics import MetricCollection
metric_collection = MetricCollection([
torchmetrics.Accuracy(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device),
torchmetrics.Precision(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device),
torchmetrics.Recall(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device),
torchmetrics.F1(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device)
], prefix='Train_')
val_metric_collection = MetricCollection([
torchmetrics.Accuracy(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device),
torchmetrics.Precision(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device),
torchmetrics.Recall(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device),
torchmetrics.F1(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device),
], prefix='Val_')
###Output
_____no_output_____
###Markdown
Training model
###Code
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def _sorted_checkpoints(config, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
if not os.path.isdir(config["output_dir"]):
os.makedirs(config["output_dir"], exist_ok=True)
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(config["output_dir"], "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(config, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not config["save_total_limit"]:
return
if config["save_total_limit"] <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(config, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= config["save_total_limit"]:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - config["save_total_limit"])
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
###Output
_____no_output_____
###Markdown
Init wandb
###Code
#wandb.tensorboard.patch(root_logdir=str(tensorboard_path / run_id))
wandb.init(
project=project,
group=group_tag,
job_type=job_type,
name=run_id,
notes=method_tag,
tags=addition_tag,
sync_tensorboard=True,
config={**model_config},
reinit=True
)
wandb_config = wandb.config
###Output
[34m[1mwandb[0m: Currently logged in as: [33myuyuliao20[0m (use `wandb login --relogin` to force relogin)
[34m[1mwandb[0m: wandb version 0.12.1 is available! To upgrade, please run:
[34m[1mwandb[0m: $ pip install wandb --upgrade
###Markdown
Define training stepsm
###Code
from tqdm import tqdm, trange, tqdm_notebook
def training_step(
config: dict,
train_dataset: torch.utils.data.Dataset,
eval_dataset: torch.utils.data.Dataset,
model: PreTrainedModel,
device: str,
init_wandb: object
):
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
train_batch_size = config["per_gpu_train_batch_size"] * max(1, config["n_gpu"])
ic(train_batch_size)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=train_batch_size,
pin_memory=True,
drop_last=True,
num_workers=multiprocessing.cpu_count())
if config["max_steps"] > 0:
t_total = config["max_steps"]
config["num_train_epochs"] = config["max_steps"] // (len(train_dataloader) // config["gradient_accumulation_steps"]) + 1
else:
t_total = len(train_dataloader) // config["gradient_accumulation_steps"] * config["num_train_epochs"]
optimizer, scheduler = get_optimizer(config, model, t_total)
if config["n_gpu"]:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train !
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", config["num_train_epochs"])
logger.info(" Instantaneous batch size per GPU = %d", config["per_gpu_train_batch_size"])
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
train_batch_size * config["gradient_accumulation_steps"]* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", config["gradient_accumulation_steps"])
logger.info(" Total optimization steps = %d", t_total)
set_seed(config["seed"])
global_step = 0
epochs_trained = 0
train_loss, logging_loss = 0.0, 0.0
#train_iterator = trange(
# epochs_trained, int(config["num_train_epochs"]), desc="Epoch", disable=args.local_rank not in [-1, 0]
#)
train_iterator = tqdm_notebook(range(
epochs_trained, int(config["num_train_epochs"])), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
scaler = GradScaler()
model.train()
for epoch in train_iterator:
#epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
epoch_iterator = tqdm_notebook(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
if args.local_rank != -1:
train_sampler.set_epoch(epoch)
for step, batch in enumerate(epoch_iterator):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
token_type_ids = batch['token_type_ids'].to(device)
mlm_labels = batch['masked_lm_labels'].to(device)
sop_labels = batch['next_sentence_labels'].to(device)
with autocast():
# Forward pass
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=mlm_labels,
sentence_order_label=sop_labels
)
assert outputs.prediction_logits.dtype is torch.float16
if config["n_gpu"] > 1:
loss = outputs.loss.mean() # mean() to average on multi-gpu parallel training
if config["gradient_accumulation_steps"] > 1:
loss = loss / config["gradient_accumulation_steps"]
assert loss.dtype is torch.float32
scaler.scale(loss).backward()
train_loss += loss.item()
if (step + 1) % config["gradient_accumulation_steps"] == 0:
#torch.nn.utils.clip_grad_norm_(model.parameters(), config["max_grad_norm"])
# Backward pass
# Zero gradients, perform a backward pass, and update the weights.
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (args.local_rank in [-1, 0]) and (config["logging_steps"] > 0) and (global_step % config["logging_steps"] == 0):
ic(global_step % config["logging_steps"])
# Log metrics
if (args.local_rank == -1 and config["evaluate_during_training"]):
# Only evaluate when single GPU otherwise metrics may not average well
results = evaluate_step(
config=config,
model=model,
dataset=eval_dataset,
device=device,
init_wandb=init_wandb
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
#last_lr = optimizer.param_groups[0]['lr']
#last_lr = scheduler.optimizer.param_groups[0]["lr"]
print("=== Sent event to wandb===")
init_wandb.log({'lr': scheduler.get_lr()[0]}, step=global_step)
show_logs(
_wandb=init_wandb,
loss=(train_loss - logging_loss) / config["logging_steps"],
step=global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (train_loss - logging_loss) / config["logging_steps"], global_step)
logging_loss = train_loss
if (args.local_rank in [-1, 0]) and (config["save_steps"] > 0) and (global_step % config["save_steps"] == 0):
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(config["output_dir"], "{}-{}".format(checkpoint_prefix, global_step))
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
if config["max_steps"]> 0 and global_step > config["max_steps"]:
epoch_iterator.close()
break
if config["max_steps"] > 0 and global_step > config["max_steps"]:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate_step(
config: dict,
model: PreTrainedModel,
dataset: torch.utils.data.Dataset,
device: str,
init_wandb: object,
prefix: Optional[str]="") -> dict:
eval_output_dir = config["output_dir"]
if args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
eval_batch_size = config["per_gpu_eval_batch_size"] * max(1, config["n_gpu"])
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(
dataset,
sampler=eval_sampler,
batch_size=eval_batch_size,
pin_memory=True,
drop_last=False,
num_workers=multiprocessing.cpu_count())
if config["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm_notebook(eval_dataloader, desc="Evaluating"):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
token_type_ids = batch['token_type_ids'].to(device)
mlm_labels = batch['masked_lm_labels'].to(device)
sop_labels = batch['next_sentence_labels'].to(device)
with torch.no_grad():
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=mlm_labels,
sentence_order_label=sop_labels
)
loss = outputs.loss.mean()
eval_loss += loss.item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"loss": eval_loss,
"perplexity": perplexity
}
show_logs(init_wandb, eval_loss, nb_eval_steps, prefix="Eval", perplexity=perplexity.item())
return result
'''
def training_step(model, input_ids, attention_mask, token_type_ids, mlm_labels, sop_labels, scaler, use_multi_gpus=False):
with autocast():
# Forward pass
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=mlm_labels,
sentence_order_label=sop_labels
)
assert outputs.prediction_logits.dtype is torch.float16
loss = outputs.loss.mean()
assert loss.dtype is torch.float32
# Backward pass
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
scaler.scale(loss).backward()
#if use_multi_gpus:
# scaler.step(optimizer.module)
#else:
scaler.step(optimizer)
scaler.update()
#torch.nn.utils.clip_grad_norm_(optimizer_grounded_parameters, max_norm=0.5)
#if epoch > swa_start:
# swa_model.update_parameters(model)
# swa_scheduler.step()
#else:
# scheduler.step()
return loss
@torch.no_grad()
def validataion_step(model, input_ids, attention_mask, token_type_ids, mlm_labels, sop_labels):
with autocast():
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=mlm_labels,
sentence_order_label=sop_labels
)
loss = outputs.loss.mean()
return loss
'''
###Output
_____no_output_____
###Markdown
Training
###Code
print('[RUN ID]: {}'.format(run_id))
torch.cuda.empty_cache()
use_epoch_tracking = False
use_step_tracking = True
#wandb.watch(sms_model, log="all", log_freq=1000)
def show_logs(_wandb, loss, step, is_epoch=False, prefix='Train', **kwargs):
loss = float(loss)
if is_epoch:
_wandb.log({"epoch": step, f"{prefix}_loss": loss}, step=step)
else:
_wandb.log({f"{prefix}_step_loss": loss}, step=step)
#print(f"{prefix} loss after " + str(example_ct).zfill(5) + f" examples: {loss:.3f}")
if "perplexity" in kwargs.keys():
_wandb.log({f"{prefix}_perplexity": kwargs["perplexity"]}, step=step)
def save_model(model, save_model_path):
logging.info("[INFO] Start to save model ...")
#if not save_model_path.parent.exists():
# save_model_path.parent.mkdir()
torch.save(model.state_dict(), save_model_path)
start_time = time.time()
albert_pretrain_model.to(device)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
global_step, tr_loss = training_step(
config=model_config,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
model=albert_pretrain_model,
device=device,
init_wandb=wandb)
ic(" global_step = %s, average loss = %s", global_step, tr_loss)
end_time = time.time()
each_steps_compute_time = (end_time - start_time)
print(each_steps_compute_time)
'''
print('[RUN ID]: {}'.format(run_id))
torch.cuda.empty_cache()
use_epoch_tracking = False
use_step_tracking = True
#wandb.watch(sms_model, log="all", log_freq=1000)
def show_logs(loss, step, is_epoch=False, prefix='Train', **kwargs):
loss = float(loss)
if is_epoch:
wandb.log({"epoch": step, f"{prefix}_loss": loss}, step=step)
else:
wandb.log({f"{prefix}_step_loss": loss}, step=step)
#print(f"{prefix} loss after " + str(example_ct).zfill(5) + f" examples: {loss:.3f}")
if "perplexity" in kwargs.keys():
wandb.log({f"{prefix}_perplexity": kwargs["perplexity"]}, step=step)
def save_model(model, save_model_path):
logging.info("[INFO] Start to save model ...")
#if not save_model_path.parent.exists():
# save_model_path.parent.mkdir()
torch.save(model.state_dict(), save_model_path)
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
for epoch in tqdm(range(model_config['epochs'])): # model_config['epochs']
start_time = time.time()
train_batch_loss = 0
valid_batch_loss = 0
train_perplexity = 0
valid_perplexity = 0
# Training Step
albert_pretrain_model = albert_pretrain_model.train()
for step, train_batch in tqdm(enumerate(train_dataloader),
dynamic_ncols=False,
bar_format="{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}",
total=len(train_dataloader),
leave=True,
unit='steps'):
input_ids = train_batch['input_ids'].to(device)
attention_mask = train_batch['attention_mask'].to(device)
token_type_ids = train_batch['token_type_ids'].to(device)
mlm_labels = train_batch['masked_lm_labels'].to(device)
sop_labels = train_batch['next_sentence_labels'].to(device)
train_loss = training_step(
model=albert_pretrain_model,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
mlm_labels=mlm_labels,
sop_labels=sop_labels,
scaler=scaler,
use_multi_gpus=model_config["use_multi_gpus"]
)
scheduler.step()
train_batch_loss += train_loss.item()
train_perplexity += torch.exp(train_loss)
#if model_config["use_multi_gpus"]:
# last_lr = optimizer.module.param_groups[0]['lr']
#else:
#last_lr = optimizer.param_groups[0]['lr']
last_lr = scheduler.optimizer.param_groups[0]["lr"]
if use_step_tracking:
record_step = (step + 1) + (len(train_dataloader)) * epoch
wandb.log({'learning_rate': last_lr}, step=record_step)
show_logs(
train_batch_loss / record_step,
record_step,
perplexity=train_perplexity.item() / record_step)
save_model_checkpoint_path = str(save_model_path / f'{wandb.run.name}_{epoch}_model_weight.pt')
save_model(albert_pretrain_model, save_model_checkpoint_path)
if use_epoch_tracking:
train_epoch_loss = train_batch_loss / step
wandb.log({'learning_rate': last_lr}, step=epoch)
show_log(train_epoch_loss, epoch, is_epoch=True)
end_time = time.time()
each_steps_compute_time = (end_time - start_time)
print(each_steps_compute_time)
'''
wandb.finish()
###Output
_____no_output_____
###Markdown
Save model
###Code
save_models_path = main_model_path / wandb.run.name
if not save_models_path.exists():
save_models_path.mkdir()
torch.save({
'epoch': epoch,
'model_state_dict': albert_pretrain_model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, str(save_models_path / 'jp_pretrain_model.pt'))
torch.save(albert_pretrain_model.state_dict(), str(save_models_path / 'jp_pretrain_model_weight.pt'))
checkpoint = torch.load(str(save_models_path / 'jp_pretrain_model_weight.pt'))
albert_pretrain_model.load_state_dict(checkpoint)
albert_pretrain_model.module.state_dict().keys()
checkpoint.keys()
assert 1 == 2
###Output
_____no_output_____
###Markdown
Test
###Code
print('[RUN ID]: {}'.format(run_id))
torch.cuda.empty_cache()
use_epoch_tracking = False
use_step_tracking = True
#wandb.watch(sms_model, log="all", log_freq=1000)
def show_logs(loss, step, is_epoch=False, prefix='Train', **kwargs):
loss = float(loss)
if is_epoch:
wandb.log({"epoch": step, f"{prefix}_loss": loss}, step=step)
else:
wandb.log({f"{prefix}_step_loss": loss}, step=step)
#print(f"{prefix} loss after " + str(example_ct).zfill(5) + f" examples: {loss:.3f}")
if "perplexity" in kwargs.keys():
wandb.log({f"{prefix}_perplexity": kwargs["perplexity"]}, step=step)
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
for epoch in tqdm(range(model_config['epochs'])): # model_config['epochs']
start_time = time.time()
train_batch_loss = 0
valid_batch_loss = 0
train_perplexity = 0
valid_perplexity = 0
# Training Step
albert_pretrain_model = albert_pretrain_model.train()
for step, train_batch in tqdm(enumerate(train_dataloader),
dynamic_ncols=False,
bar_format="{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}",
total=len(train_dataloader),
leave=True,
unit='steps'):
input_ids = train_batch['input_ids'].to(device)
attention_mask = train_batch['attention_mask'].to(device)
token_type_ids = train_batch['token_type_ids'].to(device)
mlm_labels = train_batch['masked_lm_labels'].to(device)
sop_labels = train_batch['next_sentence_labels'].to(device)
train_loss = training_step(
model=albert_pretrain_model,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
mlm_labels=mlm_labels,
sop_labels=sop_labels,
scaler=scaler,
use_multi_gpus=model_config["use_multi_gpus"]
)
scheduler.step()
train_batch_loss += train_loss.item()
train_perplexity += torch.exp(train_loss)
#if model_config["use_multi_gpus"]:
# last_lr = optimizer.module.param_groups[0]['lr']
#else:
#last_lr = optimizer.param_groups[0]['lr']
last_lr = scheduler.optimizer.param_groups[0]["lr"]
if use_step_tracking:
record_step = (step + 1) * (epoch + 1)
wandb.log({'learning_rate': last_lr}, step=record_step)
show_logs(
train_batch_loss / record_step,
record_step,
perplexity=train_perplexity.item() / record_step)
if use_epoch_tracking:
train_epoch_loss = train_batch_loss / step
wandb.log({'learning_rate': last_lr}, step=epoch)
show_log(train_epoch_loss, epoch, is_epoch=True)
#train_metric_records = metric_collection.compute()
#wandb.log(train_metric_records, step=epoch)
# Validation Step
albert_pretrain_model = albert_pretrain_model.eval()
for step, valid_batch in tqdm(enumerate(val_dataloader),
dynamic_ncols=False,
bar_format="{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}",
total=len(val_dataloader),
leave=True,
unit='steps'):
input_ids = valid_batch['input_ids'].to(device)
attention_mask = valid_batch['attention_mask'].to(device)
token_type_ids = valid_batch['token_type_ids'].to(device)
mlm_labels = valid_batch['masked_lm_labels'].to(device)
sop_labels = valid_batch['next_sentence_labels'].to(device)
valid_loss = validataion_step(
model=albert_pretrain_model,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
mlm_labels=mlm_labels,
sop_labels=sop_labels
)
valid_batch_loss += valid_loss.item()
valid_perplexity += torch.exp(valid_loss)
if use_step_tracking:
record_step = (step + 1) * (epoch + 1)
show_logs(
valid_batch_loss / record_step,
record_step,
prefix='valid',
perplexity=valid_perplexity.item() / record_step)
#sk_metrics = sklearn_metrics(val_outputs, labels, 'train')
#ic(sk_metrics)
#ic(val_metric_collection(outputs, labels).compute())
#ic(val_metric_collection(outputs, labels))
if use_epoch_tracking:
valid_epoch_loss = valid_batch_loss / step
show_logs(valid_epoch_loss, epoch, is_epoch=True, prefix='Val')
#val_metric_records = val_metric_collection.compute()
#wandb.log(val_metric_records, step=epoch)
loss_template = ("Epoch {}/{} - {:.0f}s {:.0f}ms/step - lr:{:} - loss: {:.6f} - val_loss: {:.6f}")
#metrics_template = (
# """
# categorical_accuracy: {:.4f} - f1_score: {:.4f} - multi_precision: {:.4f} - multi_recall: {:.4f}
# val_categorical_accuracy: {:.4f} - val_f1_score: {:.4f} - val_multi_precision: {:.4f} - val_multi_recall: {:.4f}
# """
#)
end_time = time.time()
each_steps_compute_time = (end_time - start_time)
print(loss_template.format(
epoch,
model_config['epochs'],
each_steps_compute_time,
each_steps_compute_time * 1000 / model_config['training_steps'],
last_lr,
train_epoch_loss,
val_epoch_loss)
)
#print(metrics_template.format(
# train_metric_records['Train_Accuracy'],
# train_metric_records['Train_F1'],
# train_metric_records['Train_Precision'],
# train_metric_records['Train_Recall'],
# val_metric_records['Val_Accuracy'],
# val_metric_records['Val_F1'],
# val_metric_records['Val_Precision'],
# val_metric_records['Val_Recall']
#))
if use_epoch_tracking:
metric_collection.reset()
val_metric_collection.reset()
%matplotlib inline
import math
from torch.optim.lr_scheduler import _LRScheduler
from torch import nn
from torch import cuda
from torch import optim
from torch.optim.swa_utils import AveragedModel, SWALR
from torch.optim.lr_scheduler import CosineAnnealingLR, CyclicLR
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU()
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
net = NeuralNetwork()
optimizer = optim.SGD(net.parameters(), lr = 1e-2)
lambda1 = lambda epoch: 0.2 if epoch % 5 == 0 else 1
lambda2 = lambda epoch: 0.2
#scheduler = optim.lr_scheduler.MultiplicativeLR(optimizer, lr_lambda = lambda2)
#scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5,10,15], gamma=0.1)
#scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma = 0.9)
#scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: PolynomialDecay(step))
class PolynomialDecay(_LRScheduler):
def __init__(self, optimizer, decay_steps, end_learning_rate=0.0001, power=0.5, cycle=False, last_epoch=-1, verbose=False):
if decay_steps <= 1.:
raise ValueError('max_decay_steps should be greater than 1.')
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
super(PolynomialDecay, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.")
#dtype = initial_learning_rate.dtype
#end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
#power = math_ops.cast(self.power, dtype)
#global_step_recomp = math_ops.cast(step, dtype)
#decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = self.last_epoch
decay_steps_recomp = self.decay_steps
if self.cycle:
if global_step_recomp == 0:
multiplier = 1.0
else:
multiplier = math.ceil(global_step_recomp / self.decay_steps)
decay_steps_recomp = decay_steps_recomp * multiplier
else:
global_step_recomp = min(global_step_recomp, decay_steps_recomp)
p = global_step_recomp / decay_steps_recomp
#c(self.last_epoch, optimizer.param_groups[0]['lr'], p)
return [((group['lr'] - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate) for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
return [(base_lr - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate for base_lr in self.base_lrs]
def polynomial_decay_scale_fun(global_steps, initial_learning_rate=1e-2, decay_steps=100, power=0.5, end_learning_rate=1e-5, cycle=False):
if cycle:
if global_steps == 0:
multiplier = 1.0
else:
multiplier = math.ceil(global_steps / decay_steps)
decay_steps = decay_steps * multiplier
else:
global_steps = min(global_steps, decay_steps)
p = global_steps / decay_steps
#ic(global_steps, p)
return (initial_learning_rate - end_learning_rate) * math.pow(1 - p, power) + end_learning_rate
#optimizer = optim.SGD(net.parameters(), lr=1e-2)
optimizer = optim.Adam(net.parameters(), lr=1e-3)
#scheduler = PolynomialDecay(optimizer, decay_steps=1000, end_learning_rate=1e-5)
scheduler = LinearWarmupCosineAnnealingLR(
optimizer,
warmup_epochs=model_config['warmup_steps'],
max_epochs=model_config['training_steps'],
eta_min=model_config["end_learning_rate"])
#scheduler = optim.lr_scheduler.CyclicLR(
# optimizer,
# base_lr=1e-5,
# max_lr=1e-2,
# step_size_up=20,
# scale_fn=polynomial_decay_scale_fun,
# mode='triangular2',
# scale_mode='cycle',
# cycle_momentum=False)
iteration = model_config['epochs']
scheduler_lr_list = []
for epoch in range(1, iteration):
scheduler.step()
#print(epoch, scheduler.get_last_lr()[0])
scheduler_lr_list.append(scheduler.get_last_lr()[0])
plt.xlabel('Training Iterations')
plt.ylabel('Learning Rate')
plt.title("CLR - 'triangular' Policy")
plt.plot(range(1, iteration), scheduler_lr_list)
###Output
_____no_output_____ |
translation_final_lin.ipynb | ###Markdown
Project for Machine Learning- Key words: `NMT`, `Transformer`, `PyTorch`, `Multi30k`
###Code
import math
import time
import torch
import torchtext
import torch.nn as nn
from torch.utils.data import DataLoader
from torchtext.data.utils import get_tokenizer
from torch.optim.lr_scheduler import StepLR,LambdaLR
import matplotlib.pyplot as plt
import numpy as np
from utils import *
from my_transformer_lin import *
% matplotlib inline
SEED = 42
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# torch.use_deterministic_algorithms(True)
###Output
UsageError: Line magic function `%` not found.
###Markdown
Data Prerocessing
###Code
pth_base = "./.data/multi30k/task1/raw/"
train_pths = ('train.de', 'train.en')
val_pths = ('val.de', 'val.en')
test_pths = ('test_2016_flickr.de', 'test_2016_flickr.en')
train_filepaths = [(pth_base + pth) for pth in train_pths]
val_filepaths = [(pth_base + pth) for pth in val_pths]
test_filepaths = [(pth_base + pth) for pth in test_pths]
de_tokenizer = get_tokenizer('spacy', language='de_core_news_sm')
en_tokenizer = get_tokenizer('spacy', language='en_core_web_sm')
de_vocab = build_vocab(train_filepaths[0], de_tokenizer, min_freq=3)
en_vocab = build_vocab(train_filepaths[1], en_tokenizer, min_freq=3)
train_data = data_process(train_filepaths, de_vocab, en_vocab, de_tokenizer, en_tokenizer)
val_data = data_process(val_filepaths, de_vocab, en_vocab, de_tokenizer, en_tokenizer)
test_data = data_process(test_filepaths, de_vocab, en_vocab, de_tokenizer, en_tokenizer)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
print("train size:", len(train_data))
print("val size:", len(val_data))
print("test size:", len(test_data))
print("de vocab size:", len(de_vocab))
print("en vocab size:", len(en_vocab))
###Output
cpu
train size: 29000
val size: 1014
test size: 1000
de vocab size: 5374
en vocab size: 4555
###Markdown
Hyper-parameters Tuning
###Code
SRC_VOCAB_SIZE = len(de_vocab)
TGT_VOCAB_SIZE = len(en_vocab)
BATCH_SIZE = 128
NUM_ENCODER_LAYERS = 3 # no help, 3 is better
NUM_DECODER_LAYERS = 3 # no help, 3 is better
EMB_SIZE = 256
FFN_HID_DIM = 512
NHEAD = 8 # no help, hard converge
DROPOUT = 0.1
NUM_EPOCHS = 50
LEARNING_RATE = 0.0001
# LR_STEP = 30
# warmup_steps = 4000
model_name = "./models/transformer-6-3-1"
PAD_IDX = de_vocab['<pad>']
BOS_IDX = de_vocab['<bos>']
EOS_IDX = de_vocab['<eos>']
train_iter = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, collate_fn=get_collate_fn(PAD_IDX,BOS_IDX,EOS_IDX))
valid_iter = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=True, collate_fn=get_collate_fn(PAD_IDX,BOS_IDX,EOS_IDX))
test_iter = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, collate_fn=get_collate_fn(PAD_IDX,BOS_IDX,EOS_IDX))
###Output
_____no_output_____
###Markdown
Model Setup
###Code
transformer = MyTf(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS,
EMB_SIZE, NHEAD, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE, PAD_IDX,
FFN_HID_DIM, DROPOUT)
transformer = transformer.to(device)
# lrate = lambda step_num: EMB_SIZE**-0.5 * np.minimum(step_num**-0.5,step_num*warmup_steps**-1.5)
# scheduler = StepLR(optimizer, step_size=LR_STEP, gamma=0.1)
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)
optimizer = torch.optim.Adam(transformer.parameters(), lr=LEARNING_RATE, betas=(0.9, 0.98), eps=1e-9)
print(f'The model has {count_parameters(transformer):,} trainable parameters')
###Output
The model has 7,667,147 trainable parameters
###Markdown
Train and Evaluate
###Code
train_loss_curve = []
val_loss_curve = []
min_val_loss = 999
steps = 1
a = (torch.triu(torch.ones((1, 1))) == 1).transpose(0, 1)
torch.save(transformer, model_name+"-best.pth.tar")
for epoch in range(1, NUM_EPOCHS+1):
start_time = time.time()
train_loss = train(transformer, train_iter, optimizer, loss_fn, device)
end_time = time.time()
val_loss = evaluate(transformer, valid_iter, loss_fn, device)
# scheduler.step()
if val_loss < min_val_loss:
min_val_loss = val_loss
transformer.eval()
torch.save(transformer, model_name+"-best.pth.tar")
if epoch % 30 == 0:
transformer.eval()
torch.save(transformer, model_name+"-ckpt-"+str(epoch)+".pth.tar")
train_loss_curve.append(train_loss)
val_loss_curve.append(val_loss)
print((f"Epoch: {epoch}, Train loss: {train_loss:.3f}, Val loss: {val_loss:.3f}, Epoch time = {(end_time - start_time):.3f}s"))
print("min val loss:",min_val_loss)
plt.plot(train_loss_curve)
plt.plot(val_loss_curve)
plt.grid()
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(("train loss","val loss"))
plt.savefig("./images/" + model_name.split(sep="/")[-1] + ".png")
plt.show()
translate(transformer, "eine gruppe von menschen steht vor einem iglu .", de_vocab, en_vocab,de_tokenizer, BOS_IDX, EOS_IDX, device)
###Output
_____no_output_____
###Markdown
Save the Model
###Code
transformer.eval()
torch.save(transformer, model_name + ".pth.tar")
###Output
_____no_output_____
###Markdown
Calculate the BLEU Score
###Code
'''load reference'''
with open(test_filepaths[0], 'r', encoding='utf8') as f:
test_data_ = f.readlines()
'''make predictions'''
predictions = []
for data in test_data_:
temp_trans = translate(transformer, data.lower(), de_vocab, en_vocab, de_tokenizer, BOS_IDX, EOS_IDX, device)
predictions.append(temp_trans[1:-3]+" . \n")
'''update predictions.txt'''
with open("predictions.txt",'w+') as f:
f.writelines(predictions)
'''eliminate <unk>'''
# for i,pre in enumerate(predictions):
# predictions[i] = pre.replace("<unk>"," ")
# '''update predictions.txt'''
# with open("predictions.txt",'w+') as f:
# f.writelines(predictions)
! perl ./multi-bleu.perl -lc reference.txt < predictions.txt
with open(model_name + ".txt",'w+') as f:
f.writelines(predictions)
from torchtext.data.metrics import bleu_score
references_corpus = []
candidate_corpus = []
'''update reference.txt'''
'''update reference.txt'''
with open(test_filepaths[1], 'r', encoding='utf8') as f:
reference = f.readlines()
for i in range(len(reference)):
reference[i] = " ".join(en_tokenizer(reference[i])).lower()
for pred,ref in zip(predictions, reference):
temp = pred.rstrip(" \n").split(" ")
candidate_corpus.append(temp)
temp = ref.rstrip(" \n").split(" ")
references_corpus.append([temp])
bleu_torchtext = bleu_score(candidate_corpus, references_corpus)
print(f'BLEU score = {bleu_torchtext*100:.2f}')
###Output
_____no_output_____ |
sklearn/LSTM Time Series Forecasting.ipynb | ###Markdown
LSTM Time Series ForecastingExamples of LSTM time series forecasting. Here are some articles if you are interested in learning more:* How to Develop LSTM Models for Time Series Forecasting Imports
###Code
from numpy.random import seed
from numpy.random import randn
from numpy import array
from math import sin, fabs, sqrt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,LSTM, ConvLSTM2D, Flatten
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Dataset GenerationWe generate a dataset to be used in the examples
###Code
seed(1)
no = 400
# Generate univariate observations
x = []
y = []
for i in range(0,no):
x.append(sin(i/15) * 20 + sin(i/3.5) * 5 + randn() * 2 + 30)
y.append(i)
# Plot
plt.figure(figsize=(18,5))
plt.ylim(0,60)
plt.scatter(y,x,s=6)
plt.show()
# Settings
n_steps = 4
xt = x[n_steps:]
yt = y[n_steps:]
###Output
_____no_output_____
###Markdown
Useful functions
###Code
def calc_error(xhat):
# Calculate error
mse = 0
mae = 0
for v,vhat in zip(xt, xhat):
mae += fabs(v-vhat)
mse += (v-vhat)**2
mae /= len(xt)
mse /= len(xt)
print("Result: mae={0:.3f}, mse={1:.3f}, rmse={2:.3f}".format(mae, mse, sqrt(mse)))
def plot_result(xhat):
# Plot
plt.figure(figsize=(18,5))
plt.ylim(0,60)
plt.plot(yt, xhat, c="red")
plt.scatter(yt, xt, c="blue", s=6)
plt.show()
def predict(model):
X_test = Xs.reshape((Xs.shape[0], Xs.shape[1], n_features))
xhat = model.predict(X_test).flatten()
return xhat
###Output
_____no_output_____
###Markdown
Data Preparation
###Code
# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# split into samples
Xs, y = split_sequence(x, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = Xs.reshape((Xs.shape[0], Xs.shape[1], n_features))
###Output
_____no_output_____
###Markdown
Vanilla LSTM
###Code
# define model
model = Sequential()
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=200, verbose=0)
# make predictions
xhat = predict(model)
# Show results
calc_error(xhat)
plot_result(xhat)
###Output
Result: mae=2.289, mse=8.054, rmse=2.838
###Markdown
Stacked LSTM
###Code
# define model
model = Sequential()
model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features)))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=200, verbose=0)
# make predictions
xhat = predict(model)
# Show results
calc_error(xhat)
plot_result(xhat)
###Output
Result: mae=2.238, mse=7.804, rmse=2.794
###Markdown
ConvLSTM
###Code
n_seq = 2
n_step = 2
X = Xs.reshape((Xs.shape[0], n_seq, 1, n_step, n_features))
# define model
model = Sequential()
model.add(ConvLSTM2D(filters=64, kernel_size=(1,2), activation='relu', input_shape=(n_seq, 1, n_step, n_features)))
model.add(Flatten())
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=200, verbose=0)
# make predictions
xhat = model.predict(X).flatten()
# Show results
calc_error(xhat)
plot_result(xhat)
###Output
Result: mae=2.293, mse=8.033, rmse=2.834
###Markdown
ForecastingEvaluates how well forecasting works.
###Code
Xs, y = split_sequence(x, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = Xs.reshape((Xs.shape[0], Xs.shape[1], n_features))
X_train = X[:300]
y_train = X[:300]
# define model
model = Sequential()
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X_train, y_train, epochs=200, verbose=0)
# Number of previous values to include
start = 310
# Number of forecasts to evaluate
no_f = 60
# Forecasts
xhat = []
# Metrics
mae = 0
mse = 0
# Iterate over number of forecasts
for i in range(0, no_f):
# Cut out input
Xn = X[start+i-1:start+i]
# Make a one-step forecast
fval = model.predict(Xn).flatten()[0]
xhat.append(fval)
# Actual value
aval = x[start+i]
# Metrics
mae += fabs(fval-aval)
mse += (fval-aval)**2
# Metrics
mae /= no_f
mse /= no_f
print("Result: mae={0:.3f}, mse={1:.3f}, rmse={2:.3f}".format(mae, mse, sqrt(mse)))
# Plot
plt.figure(figsize=(18,5))
plt.ylim(0,60)
plt.plot(yt[start:start+no_f], xhat, c="red")
plt.scatter(yt[start:start+no_f], x[start:start+no_f], c="blue", s=6)
plt.show()
###Output
Result: mae=1.388, mse=3.277, rmse=1.810
|
Probabilistic_Models/NLP_C2_probability_models_W1_Assignment_autocorrect.ipynb | ###Markdown
Assignment 1: Auto CorrectWelcome to the first assignment of Course 2. This assignment will give you a chance to brush up on your python and probability skills. In doing so, you will implement an auto-correct system that is very effective and useful. Outline- [0. Overview](0) - [0.1 Edit Distance](0-1)- [1. Data Preprocessing](1) - [1.1 Exercise 1](ex-1) - [1.2 Exercise 2](ex-2) - [1.3 Exercise 3](ex-3)- [2. String Manipulation](2) - [2.1 Exercise 4](ex-4) - [2.2 Exercise 5](ex-5) - [2.3 Exercise 6](ex-6) - [2.4 Exercise 7](ex-7)- [3. Combining the edits](3) - [3.1 Exercise 8](ex-8) - [3.2 Exercise 9](ex-9) - [3.3 Exercise 10](ex-10)- [4. Minimum Edit Distance](4) - [4.1 Exercise 11](ex-11)- [5. Backtrace (Optional)](5) 0. OverviewYou use autocorrect every day on your cell phone and computer. In this assignment, you will explore what really goes on behind the scenes. Of course, the model you are about to implement is not identical to the one used in your phone, but it is still quite good. By completing this assignment you will learn how to: - Get a word count given a corpus- Get a word probability in the corpus - Manipulate strings - Filter strings - Implement Minimum edit distance to compare strings and to help find the optimal path for the edits. - Understand how dynamic programming worksSimilar systems are used everywhere. - For example, if you type in the word **"I am lerningg"**, chances are very high that you meant to write **"learning"**, as shown in **Figure 1**. Figure 1 0.1 Edit DistanceIn this assignment, you will implement models that correct words that are 1 and 2 edit distances away. - We say two words are n edit distance away from each other when we need n edits to change one word into another. An edit could consist of one of the following options: - Delete (remove a letter): ‘hat’ => ‘at, ha, ht’- Switch (swap 2 adjacent letters): ‘eta’ => ‘eat, tea,...’- Replace (change 1 letter to another): ‘jat’ => ‘hat, rat, cat, mat, ...’- Insert (add a letter): ‘te’ => ‘the, ten, ate, ...’You will be using the four methods above to implement an Auto-correct. - To do so, you will need to compute probabilities that a certain word is correct given an input. This auto-correct you are about to implement was first created by [Peter Norvig](https://en.wikipedia.org/wiki/Peter_Norvig) in 2007. - His [original article](https://norvig.com/spell-correct.html) may be a useful reference for this assignment.The goal of our spell check model is to compute the following probability:$$P(c|w) = \frac{P(w|c)\times P(c)}{P(w)} \tag{Eqn-1}$$The equation above is [Bayes Rule](https://en.wikipedia.org/wiki/Bayes%27_theorem). - Equation 1 says that the probability of a word being correct $P(c|w) $is equal to the probability of having a certain word $w$, given that it is correct $P(w|c)$, multiplied by the probability of being correct in general $P(C)$ divided by the probability of that word $w$ appearing $P(w)$ in general.- To compute equation 1, you will first import a data set and then create all the probabilities that you need using that data set. Part 1: Data Preprocessing
###Code
import re
from collections import Counter
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
As in any other machine learning task, the first thing you have to do is process your data set. - Many courses load in pre-processed data for you. - However, in the real world, when you build these NLP systems, you load the datasets and process them.- So let's get some real world practice in pre-processing the data!Your first task is to read in a file called **'shakespeare.txt'** which is found in your file directory. To look at this file you can go to `File ==> Open `. Exercise 1Implement the function `process_data` which 1) Reads in a corpus (text file)2) Changes everything to lowercase3) Returns a list of words. Options and Hints- If you would like more of a real-life practice, don't open the 'Hints' below (yet) and try searching the web to derive your answer.- If you want a little help, click on the green "General Hints" section by clicking on it with your mouse.- If you get stuck or are not getting the expected results, click on the green 'Detailed Hints' section to get hints for each step that you'll take to complete this function. General Hints General Hints to get started Python input and output Python 're' documentation Detailed Hints Detailed hints if you're stuck Use 'with' syntax to read a file Decide whether to use 'read()' or 'readline(). What's the difference? Choose whether to use either str.lower() or str.lowercase(). What is the difference? Use re.findall(pattern, string) Look for the "Raw String Notation" section in the Python 're' documentation to understand the difference between r'\W', r'\W' and '\\W'. For the pattern, decide between using '\s', '\w', '\s+' or '\w+'. What do you think are the differences?
###Code
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: process_data
def process_data(file_name):
"""
Input:
A file_name which is found in your current directory. You just have to read it in.
Output:
words: a list containing all the words in the corpus (text file you read) in lower case.
"""
words = [] # return this variable correctly
### START CODE HERE ###
with open(file_name,'r') as file:
for line in file:
wordlist=re.findall(r"\w+", line)
for word in wordlist:
words.append(word.lower())
### END CODE HERE ###
return words
###Output
_____no_output_____
###Markdown
Note, in the following cell, 'words' is converted to a python `set`. This eliminates any duplicate entries.
###Code
#DO NOT MODIFY THIS CELL
word_l = process_data('shakespeare.txt')
vocab = set(word_l) # this will be your new vocabulary
print(f"The first ten words in the text are: \n{word_l[0:10]}")
print(f"There are {len(vocab)} unique words in the vocabulary.")
###Output
The first ten words in the text are:
['o', 'for', 'a', 'muse', 'of', 'fire', 'that', 'would', 'ascend', 'the']
There are 6116 unique words in the vocabulary.
###Markdown
Expected Output```PythonThe first ten words in the text are: ['o', 'for', 'a', 'muse', 'of', 'fire', 'that', 'would', 'ascend', 'the']There are 6116 unique words in the vocabulary.``` Exercise 2Implement a `get_count` function that returns a dictionary- The dictionary's keys are words- The value for each word is the number of times that word appears in the corpus. For example, given the following sentence: **"I am happy because I am learning"**, your dictionary should return the following: Key Value I 2 am 2 happy 1 because 1 learning 1 **Instructions**: Implement a `get_count` which returns a dictionary where the key is a word and the value is the number of times the word appears in the list. Hints Try implementing this using a for loop and a regular dictionary. This may be good practice for similar coding interview questions You can also use defaultdict instead of a regualr dictionary, along with the for loop Otherwise, to skip using a for loop, you can use Python's Counter class
###Code
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: get_count
def get_count(word_l):
'''
Input:
word_l: a set of words representing the corpus.
Output:
word_count_dict: The wordcount dictionary where key is the word and value is its frequency.
'''
word_count_dict = {} # fill this with word counts
### START CODE HERE
for word in word_l:
word_count_dict[word]=(word_count_dict[word]+1 if word in word_count_dict.keys() else 1)
### END CODE HERE ###
return word_count_dict
#DO NOT MODIFY THIS CELL
word_count_dict = get_count(word_l)
print(f"There are {len(word_count_dict)} key values pairs")
print(f"The count for the word 'thee' is {word_count_dict.get('thee',0)}")
###Output
There are 6116 key values pairs
The count for the word 'thee' is 240
###Markdown
Expected Output```PythonThere are 6116 key values pairsThe count for the word 'thee' is 240``` Exercise 3Given the dictionary of word counts, compute the probability that each word will appear if randomly selected from the corpus of words.$$P(w_i) = \frac{C(w_i)}{M} \tag{Eqn-2}$$where $C(w_i)$ is the total number of times $w_i$ appears in the corpus.$M$ is the total number of words in the corpus.For example, the probability of the word 'am' in the sentence **'I am happy because I am learning'** is:$$P(am) = \frac{C(w_i)}{M} = \frac {2}{7} \tag{Eqn-3}.$$**Instructions:** Implement `get_probs` function which gives you the probability that a word occurs in a sample. This returns a dictionary where the keys are words, and the value for each word is its probability in the corpus of words. HintsGeneral advice Use dictionary.values() Use sum() The cardinality (number of words in the corpus should be equal to len(word_l). You will calculate this same number, but using the word count dictionary. If you're using a for loop: Use dictionary.keys() If you're using a dictionary comprehension: Use dictionary.items()
###Code
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_probs
def get_probs(word_count_dict):
'''
Input:
word_count_dict: The wordcount dictionary where key is the word and value is its frequency.
Output:
probs: A dictionary where keys are the words and the values are the probability that a word will occur.
'''
probs = {} # return this variable correctly
### START CODE HERE ###
M=0
for word in word_count_dict.keys():
M=M+word_count_dict[word]
for word in word_count_dict.keys():
probs[word]=word_count_dict[word]/M
### END CODE HERE ###
return probs
#DO NOT MODIFY THIS CELL
probs = get_probs(word_count_dict)
print(f"Length of probs is {len(probs)}")
print(f"P('thee') is {probs['thee']:.4f}")
###Output
Length of probs is 6116
P('thee') is 0.0045
###Markdown
Expected Output```PythonLength of probs is 6116P('thee') is 0.0045``` Part 2: String ManipulationsNow, that you have computed $P(w_i)$ for all the words in the corpus, you will write a few functions to manipulate strings so that you can edit the erroneous strings and return the right spellings of the words. In this section, you will implement four functions: * `delete_letter`: given a word, it returns all the possible strings that have **one character removed**. * `switch_letter`: given a word, it returns all the possible strings that have **two adjacent letters switched**.* `replace_letter`: given a word, it returns all the possible strings that have **one character replaced by another different letter**.* `insert_letter`: given a word, it returns all the possible strings that have an **additional character inserted**. List comprehensionsString and list manipulation in python will often make use of a python feature called [list comprehensions](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions). The routines below will be described as using list comprehensions, but if you would rather implement them in another way, you are free to do so as long as the result is the same. Further, the following section will provide detailed instructions on how to use list comprehensions and how to implement the desired functions. If you are a python expert, feel free to skip the python hints and move to implementing the routines directly. Python List Comprehensions embed a looping structure inside of a list declaration, collapsing many lines of code into a single line. If you are not familiar with them, they seem slightly out of order relative to for loops. Figure 2 The diagram above shows that the components of a list comprehension are the same components you would find in a typical for loop that appends to a list, but in a different order. With that in mind, we'll continue the specifics of this assignment. We will be very descriptive for the first function, `deletes()`, and less so in later functions as you become familiar with list comprehensions. Exercise 4**Instructions for delete_letter():** Implement a `delete_letter()` function that, given a word, returns a list of strings with one character deleted. For example, given the word **nice**, it would return the set: {'ice', 'nce', 'nic', 'nie'}. **Step 1:** Create a list of 'splits'. This is all the ways you can split a word into Left and Right: For example, 'nice is split into : `[('', 'nice'), ('n', 'ice'), ('ni', 'ce'), ('nic', 'e'), ('nice', '')]`This is common to all four functions (delete, replace, switch, insert). Figure 3 **Step 2:** This is specific to `delete_letter`. Here, we are generating all words that result from deleting one character. This can be done in a single line with a list comprehension. You can make use of this type of syntax: `[f(a,b) for a, b in splits if condition]` For our 'nice' example you get: ['ice', 'nce', 'nie', 'nic'] Figure 4 Levels of assistanceTry this exercise with these levels of assistance. - We hope that this will make it both a meaningful experience but also not a frustrating experience. - Start with level 1, then move onto level 2, and 3 as needed. - Level 1. Try to think this through and implement this yourself. - Level 2. Click on the "Level 2 Hints" section for some hints to get started. - Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions. - If you are still stuck, look at the images in the "list comprehensions" section above. Level 2 Hints Use array slicing like my_string[0:2] Use list comprehensions or for loops Level 3 Hints splits: Use array slicing, like my_str[0:2], to separate a string into two pieces. Do this in a loop or list comprehension, so that you have a list of tuples. For example, "cake" can get split into "ca" and "ke". They're stored in a tuple ("ca","ke"), and the tuple is appended to a list. We'll refer to these as L and R, so the tuple is (L,R) When choosing the range for your loop, if you input the word "cans" and generate the tuple ('cans',''), make sure to include an if statement to check the length of that right-side string (R) in the tuple (L,R) deletes: Go through the list of tuples and combine the two strings together. You can use the + operator to combine two strings When combining the tuples, make sure that you leave out a middle character. Use array slicing to leave out the first character of the right substring.
###Code
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: deletes
def delete_letter(word, verbose=False):
'''
Input:
word: the string/word for which you will generate all possible words
in the vocabulary which have 1 missing character
Output:
delete_l: a list of all possible strings obtained by deleting 1 character from word
'''
delete_l = []
split_l = []
### START CODE HERE ###
split_l=[(word[:i],word[i:]) for i in range(len(word)+1)]
delete_l=[L+R[1:] for (L,R) in split_l if R]
### END CODE HERE ###
if verbose: print(f"input word {word}, \nsplit_l = {split_l}, \ndelete_l = {delete_l}")
return delete_l
delete_word_l = delete_letter(word="cans",
verbose=True)
###Output
input word cans,
split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')],
delete_l = ['ans', 'cns', 'cas', 'can']
###Markdown
Expected Output```CPPNote: You might get a slightly different result with split_linput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's')], delete_l = ['ans', 'cns', 'cas', 'can']``` Note 1- Notice how it has the extra tuple `('cans', '')`.- This will be fine as long as you have checked the size of the right-side substring in tuple (L,R).- Can you explain why this will give you the same result for the list of deletion strings (delete_l)?```CPPinput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')], delete_l = ['ans', 'cns', 'cas', 'can']``` Note 2If you end up getting the same word as your input word, like this:```Pythoninput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')], delete_l = ['ans', 'cns', 'cas', 'can', 'cans']```- Check how you set the `range`.- See if you check the length of the string on the right-side of the split.
###Code
# test # 2
print(f"Number of outputs of delete_letter('at') is {len(delete_letter('at'))}")
###Output
Number of outputs of delete_letter('at') is 2
###Markdown
Expected output```CPPNumber of outputs of delete_letter('at') is 2``` Exercise 5**Instructions for switch_letter()**: Now implement a function that switches two letters in a word. It takes in a word and returns a list of all the possible switches of two letters **that are adjacent to each other**. - For example, given the word 'eta', it returns {'eat', 'tea'}, but does not return 'ate'.**Step 1:** is the same as in delete_letter() **Step 2:** A list comprehension or for loop which forms strings by swapping adjacent letters. This is of the form: `[f(L,R) for L, R in splits if condition]` where 'condition' will test the length of R in a given iteration. See below. Figure 5 Levels of difficultyTry this exercise with these levels of difficulty. - Level 1. Try to think this through and implement this yourself.- Level 2. Click on the "Level 2 Hints" section for some hints to get started.- Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions. Level 2 Hints Use array slicing like my_string[0:2] Use list comprehensions or for loops To do a switch, think of the whole word as divided into 4 distinct parts. Write out 'cupcakes' on a piece of paper and see how you can split it into ('cupc', 'k', 'a', 'es') Level 3 Hints splits: Use array slicing, like my_str[0:2], to separate a string into two pieces. Splitting is the same as for delete_letter To perform the switch, go through the list of tuples and combine four strings together. You can use the + operator to combine strings The four strings will be the left substring from the split tuple, followed by the first (index 1) character of the right substring, then the zero-th character (index 0) of the right substring, and then the remaining part of the right substring. Unlike delete_letter, you will want to check that your right substring is at least a minimum length. To see why, review the previous hint bullet point (directly before this one).
###Code
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: switches
def switch_letter(word, verbose=False):
'''
Input:
word: input string
Output:
switches: a list of all possible strings with one adjacent charater switched
'''
switch_l = []
split_l = []
### START CODE HERE ###
split_l=[(word[:i],word[i:]) for i in range(len(word)+1)]
switch_l= [L+R[1]+R[0]+R[2:] for (L,R) in split_l if len(R)>1]
### END CODE HERE ###
if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nswitch_l = {switch_l}")
return switch_l
switch_word_l = switch_letter(word="eta",
verbose=True)
###Output
Input word = eta
split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a'), ('eta', '')]
switch_l = ['tea', 'eat']
###Markdown
Expected output```PythonInput word = eta split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a')] switch_l = ['tea', 'eat']``` Note 1You may get this:```PythonInput word = eta split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a'), ('eta', '')] switch_l = ['tea', 'eat']```- Notice how it has the extra tuple `('eta', '')`.- This is also correct.- Can you think of why this is the case? Note 2If you get an error```PythonIndexError: string index out of range```- Please see if you have checked the length of the strings when switching characters.
###Code
# test # 2
print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}")
###Output
Number of outputs of switch_letter('at') is 1
###Markdown
Expected output```CPPNumber of outputs of switch_letter('at') is 1``` Exercise 6**Instructions for replace_letter()**: Now implement a function that takes in a word and returns a list of strings with one **replaced letter** from the original word. **Step 1:** is the same as in `delete_letter()`**Step 2:** A list comprehension or for loop which form strings by replacing letters. This can be of the form: `[f(a,b,c) for a, b in splits if condition for c in string]` Note the use of the second for loop. It is expected in this routine that one or more of the replacements will include the original word. For example, replacing the first letter of 'ear' with 'e' will return 'ear'.**Step 3:** Remove the original input letter from the output. Hints To remove a word from a list, first store its contents inside a set() Use set.discard('the_word') to remove a word in a set (if the word does not exist in the set, then it will not throw a KeyError. Using set.remove('the_word') throws a KeyError if the word does not exist in the set.
###Code
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: replaces
def replace_letter(word, verbose=False):
'''
Input:
word: the input string/word
Output:
replaces: a list of all possible strings where we replaced one letter from the original word.
'''
letters = 'abcdefghijklmnopqrstuvwxyz'
replace_l = []
split_l = []
### START CODE HERE ###
split_l=[(word[:i],word[i:]) for i in range(len(word)+1)]
replace_set=set([L+c+R[1:] for (L,R) in split_l if R for c in letters])
### END CODE HERE ###
# turn the set back into a list and sort it, for easier viewing
replace_l = sorted(list(replace_set))
if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nreplace_l {replace_l}")
return replace_l
replace_l = replace_letter(word='can',
verbose=True)
###Output
Input word = can
split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')]
replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'can', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']
###Markdown
Expected Output**: ```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Note how the input word 'can' should not be one of the output words. Note 1If you get something like this:```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Notice how split_l has an extra tuple `('can', '')`, but the output is still the same, so this is okay. Note 2If you get something like this:```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cana', 'canb', 'canc', 'cand', 'cane', 'canf', 'cang', 'canh', 'cani', 'canj', 'cank', 'canl', 'canm', 'cann', 'cano', 'canp', 'canq', 'canr', 'cans', 'cant', 'canu', 'canv', 'canw', 'canx', 'cany', 'canz', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Notice how there are strings that are 1 letter longer than the original word, such as `cana`.- Please check for the case when there is an empty string `''`, and if so, do not use that empty string when setting replace_l.
###Code
# test # 2
print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}")
###Output
Number of outputs of switch_letter('at') is 1
###Markdown
Expected output```CPPNumber of outputs of switch_letter('at') is 1``` Exercise 7**Instructions for insert_letter()**: Now implement a function that takes in a word and returns a list with a letter inserted at every offset.**Step 1:** is the same as in `delete_letter()`**Step 2:** This can be a list comprehension of the form: `[f(a,b,c) for a, b in splits if condition for c in string]`
###Code
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: inserts
def insert_letter(word, verbose=False):
'''
Input:
word: the input string/word
Output:
inserts: a set of all possible strings with one new letter inserted at every offset
'''
letters = 'abcdefghijklmnopqrstuvwxyz'
insert_l = []
split_l = []
### START CODE HERE ###
split_l=[(word[:i],word[i:]) for i in range(len(word)+1)]
insert_l=[L+c+R for (L,R) in split_l for c in letters]
### END CODE HERE ###
if verbose: print(f"Input word {word} \nsplit_l = {split_l} \ninsert_l = {insert_l}")
return insert_l
insert_l = insert_letter('at', True)
print(f"Number of strings output by insert_letter('at') is {len(insert_l)}")
###Output
Input word at
split_l = [('', 'at'), ('a', 't'), ('at', '')]
insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz']
Number of strings output by insert_letter('at') is 78
###Markdown
Expected output```PythonInput word at split_l = [('', 'at'), ('a', 't'), ('at', '')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz']Number of strings output by insert_letter('at') is 78``` Note 1If you get a split_l like this:```PythonInput word at split_l = [('', 'at'), ('a', 't')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']Number of strings output by insert_letter('at') is 52```- Notice that split_l is missing the extra tuple ('at', ''). For insertion, we actually **WANT** this tuple.- The function is not creating all the desired output strings.- Check the range that you use for the for loop. Note 2If you see this:```PythonInput word at split_l = [('', 'at'), ('a', 't'), ('at', '')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']Number of strings output by insert_letter('at') is 52```- Even though you may have fixed the split_l so that it contains the tuple `('at', '')`, notice that you're still missing some output strings. - Notice that it's missing strings such as 'ata', 'atb', 'atc' all the way to 'atz'.- To fix this, make sure that when you set insert_l, you allow the use of the empty string `''`.
###Code
# test # 2
print(f"Number of outputs of insert_letter('at') is {len(insert_letter('at'))}")
###Output
Number of outputs of insert_letter('at') is 78
###Markdown
Expected output```CPPNumber of outputs of insert_letter('at') is 78``` Part 3: Combining the editsNow that you have implemented the string manipulations, you will create two functions that, given a string, will return all the possible single and double edits on that string. These will be `edit_one_letter()` and `edit_two_letters()`. 3.1 Edit one letter Exercise 8**Instructions**: Implement the `edit_one_letter` function to get all the possible edits that are one edit away from a word. The edits consist of the replace, insert, delete, and optionally the switch operation. You should use the previous functions you have already implemented to complete this function. The 'switch' function is a less common edit function, so its use will be selected by an "allow_switches" input argument.Note that those functions return *lists* while this function should return a *python set*. Utilizing a set eliminates any duplicate entries. Hints Each of the functions returns a list. You can combine lists using the `+` operator. To get unique strings (avoid duplicates), you can use the set() function.
###Code
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: edit_one_letter
def edit_one_letter(word, allow_switches = True):
"""
Input:
word: the string/word for which we will generate all possible wordsthat are one edit away.
Output:
edit_one_set: a set of words with one possible edit. Please return a set. and not a list.
"""
edit_one_set = set()
### START CODE HERE ###
edit_one_list=[]
edit_one_list=delete_letter(word)
edit_one_list+=replace_letter(word)
edit_one_list+=insert_letter(word)
if allow_switches:
edit_one_list+=switch_letter(word)
if word in edit_one_list:
edit_one_list.remove(word)
edit_one_set=set(edit_one_list)
### END CODE HERE ###
return edit_one_set
tmp_word = "at"
tmp_edit_one_set = edit_one_letter(tmp_word)
# turn this into a list to sort it, in order to view it
tmp_edit_one_l = sorted(list(tmp_edit_one_set))
print(f"input word {tmp_word} \nedit_one_l \n{tmp_edit_one_l}\n")
print(f"The type of the returned object should be a set {type(tmp_edit_one_set)}")
print(f"Number of outputs from edit_one_letter('at') is {len(edit_one_letter('at'))}")
###Output
input word at
edit_one_l
['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt']
The type of the returned object should be a set <class 'set'>
Number of outputs from edit_one_letter('at') is 129
###Markdown
Expected Output```CPPinput word at edit_one_l ['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt']The type of the returned object should be a set Number of outputs from edit_one_letter('at') is 129``` Part 3.2 Edit two letters Exercise 9Now you can generalize this to implement to get two edits on a word. To do so, you would have to get all the possible edits on a single word and then for each modified word, you would have to modify it again. **Instructions**: Implement the `edit_two_letters` function that returns a set of words that are two edits away. Note that creating additional edits based on the `edit_one_letter` function may 'restore' some one_edits to zero or one edits. That is allowed here. This accounted for in get_corrections. Hints You will likely want to take the union of two sets. You can either use set.union() or use the '|' (or operator) to union two sets See the documentation Python sets for examples of using operators or functions of the Python set.
###Code
# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: edit_two_letters
def edit_two_letters(word, allow_switches = True):
'''
Input:
word: the input string/word
Output:
edit_two_set: a set of strings with all possible two edits
'''
edit_two_set = set()
### START CODE HERE ###
edit_one_set=edit_one_letter(word)
edit_two_list=[]
for tmpword in edit_one_set:
edit_two_list+=edit_one_letter(tmpword)
edit_two_set=set(edit_two_list)
### END CODE HERE ###
return edit_two_set
tmp_edit_two_set = edit_two_letters("a")
tmp_edit_two_l = sorted(list(tmp_edit_two_set))
print(f"Number of strings with edit distance of two: {len(tmp_edit_two_l)}")
print(f"First 10 strings {tmp_edit_two_l[:10]}")
print(f"Last 10 strings {tmp_edit_two_l[-10:]}")
print(f"The data type of the returned object should be a set {type(tmp_edit_two_set)}")
print(f"Number of strings that are 2 edit distances from 'at' is {len(edit_two_letters('at'))}")
###Output
Number of strings with edit distance of two: 2654
First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag']
Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza']
The data type of the returned object should be a set <class 'set'>
Number of strings that are 2 edit distances from 'at' is 7154
###Markdown
Expected Output```CPPNumber of strings with edit distance of two: 2654First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag']Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza']The data type of the returned object should be a set Number of strings that are 2 edit distances from 'at' is 7154``` Part 3-3: suggest spelling suggestionsNow you will use your `edit_two_letters` function to get a set of all the possible 2 edits on your word. You will then use those strings to get the most probable word you meant to type aka your typing suggestion. Exercise 10**Instructions**: Implement `get_corrections`, which returns a list of zero to n possible suggestion tuples of the form (word, probability_of_word). **Step 1:** Generate suggestions for a supplied word: You'll use the edit functions you have developed. The 'suggestion algorithm' should follow this logic: * If the word is in the vocabulary, suggest the word. * Otherwise, if there are suggestions from `edit_one_letter` that are in the vocabulary, use those. * Otherwise, if there are suggestions from `edit_two_letters` that are in the vocabulary, use those. * Otherwise, suggest the input word.* * The idea is that words generated from fewer edits are more likely than words with more edits.Note: - Edits of one or two letters may 'restore' strings to either zero or one edit. This algorithm accounts for this by preferentially selecting lower distance edits first. Short circuitIn Python, logical operations such as `and` and `or` have two useful properties. They can operate on lists and they have ['short-circuit' behavior](https://docs.python.org/3/library/stdtypes.html). Try these:
###Code
# example of logical operation on lists or sets
print( [] and ["a","b"] )
print( [] or ["a","b"] )
#example of Short circuit behavior
val1 = ["Most","Likely"] or ["Less","so"] or ["least","of","all"] # selects first, does not evalute remainder
print(val1)
val2 = [] or [] or ["least","of","all"] # continues evaluation until there is a non-empty list
print(val2)
###Output
[]
['a', 'b']
['Most', 'Likely']
['least', 'of', 'all']
###Markdown
The logical `or` could be used to implement the suggestion algorithm very compactly. Alternately, if/then constructs could be used. **Step 2**: Create a 'best_words' dictionary where the 'key' is a suggestion and the 'value' is the probability of that word in your vocabulary. If the word is not in the vocabulary, assign it a probability of 0.**Step 3**: Select the n best suggestions. There may be fewer than n. Hints edit_one_letter and edit_two_letters return *python sets*. Sets have a handy set.intersection feature To find the keys that have the highest values in a dictionary, you can use the Counter dictionary to create a Counter object from a regular dictionary. Then you can use Counter.most_common(n) to get the n most common keys. To find the intersection of two sets, you can use set.intersection or the & operator. If you are not as familiar with short circuit syntax (as shown above), feel free to use if else statements instead. To use an if statement to check of a set is empty, use 'if not x:' syntax
###Code
# UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: get_corrections
def get_corrections(word, probs, vocab, n=2, verbose = False):
'''
Input:
word: a user entered string to check for suggestions
probs: a dictionary that maps each word to its probability in the corpus
vocab: a set containing all the vocabulary
n: number of possible word corrections you want returned in the dictionary
Output:
n_best: a list of tuples with the most probable n corrected words and their probabilities.
'''
suggestions = []
n_best = []
### START CODE HERE ###
if(word in vocab):
suggestions.append(word)
n_best.append((word,probs[word]))
else:
tmp_edit_one_set = edit_one_letter(word)
tmp_edit_one_l = sorted(list(tmp_edit_one_set))
if(len(tmp_edit_one_l)>0):
for tmpword in tmp_edit_one_l:
if(tmpword in vocab):
suggestions.append(tmpword)
n_best.append((tmpword,probs[tmpword]))
else:
if(n>1):
tmp_edit_two_set = edit_two_letters(word)
tmp_edit_two_l = sorted(list(tmp_edit_two_set))
if(len(tmp_edit_two_l)>0):
for tmpword in tmp_edit_two_l:
if(tmpword in vocab):
suggestions.append(tmpword)
n_best.append((tmpword,probs[tmpword]))
if(len(suggestions)==0):
suggestions.append(word)
n_best.append(0)
### END CODE HERE ###
if verbose: print("entered word = ", word, "\nsuggestions = ", suggestions)
return n_best
# Test your implementation - feel free to try other words in my word
my_word = 'dys'
tmp_corrections = get_corrections(my_word, probs, vocab, 2, verbose=True) # keep verbose=True
for i, word_prob in enumerate(tmp_corrections):
print(f"word {i}: {word_prob[0]}, probability {word_prob[1]:.6f}")
# CODE REVIEW COMMENT: using "tmp_corrections" insteads of "cors". "cors" is not defined
print(f"data type of corrections {type(tmp_corrections)}")
###Output
entered word = dys
suggestions = ['days', 'dye']
best = [('days', 0.0004103405826836274), ('dye', 1.865184466743761e-05)]
word 0: days, probability 0.000410
word 1: dye, probability 0.000019
data type of corrections <class 'list'>
###Markdown
Expected Output- Note: This expected output is for `my_word = 'dys'`. Also, keep `verbose=True````CPPentered word = dys suggestions = {'days', 'dye'}word 0: days, probability 0.000410word 1: dye, probability 0.000019data type of corrections ``` Part 4: Minimum Edit distanceNow that you have implemented your auto-correct, how do you evaluate the similarity between two strings? For example: 'waht' and 'what'Also how do you efficiently find the shortest path to go from the word, 'waht' to the word 'what'?You will implement a dynamic programming system that will tell you the minimum number of edits required to convert a string into another string. Part 4.1 Dynamic ProgrammingDynamic Programming breaks a problem down into subproblems which can be combined to form the final solution. Here, given a string source[0..i] and a string target[0..j], we will compute all the combinations of substrings[i, j] and calculate their edit distance. To do this efficiently, we will use a table to maintain the previously computed substrings and use those to calculate larger substrings.You have to create a matrix and update each element in the matrix as follows: $$\text{Initialization}$$\begin{align}D[0,0] &= 0 \\D[i,0] &= D[i-1,0] + del\_cost(source[i]) \tag{4}\\D[0,j] &= D[0,j-1] + ins\_cost(target[j]) \\\end{align} $$\text{Per Cell Operations}$$\begin{align} \\D[i,j] =min\begin{cases}D[i-1,j] + del\_cost\\D[i,j-1] + ins\_cost\\D[i-1,j-1] + \left\{\begin{matrix}rep\_cost; & if src[i]\neq tar[j]\\0 ; & if src[i]=tar[j]\end{matrix}\right.\end{cases}\tag{5}\end{align} So converting the source word **play** to the target word **stay**, using an input cost of one, a delete cost of 1, and replace cost of 2 would give you the following table: s t a y 0 1 2 3 4 p 1 2 3 4 5 l 2 3 4 5 6 a 3 4 5 4 5 y 4 5 6 5 4 The operations used in this algorithm are 'insert', 'delete', and 'replace'. These correspond to the functions that you defined earlier: insert_letter(), delete_letter() and replace_letter(). switch_letter() is not used here. The diagram below describes how to initialize the table. Each entry in D[i,j] represents the minimum cost of converting string source[0:i] to string target[0:j]. The first column is initialized to represent the cumulative cost of deleting the source characters to convert string "EER" to "". The first row is initialized to represent the cumulative cost of inserting the target characters to convert from "" to "NEAR". Figure 6 Initializing Distance Matrix Filling in the remainder of the table utilizes the 'Per Cell Operations' in the equation (5) above. Note, the diagram below includes in the table some of the 3 sub-calculations shown in light grey. Only 'min' of those operations is stored in the table in the `min_edit_distance()` function. Figure 7 Filling Distance Matrix Note that the formula for $D[i,j]$ shown in the image is equivalent to:\begin{align} \\D[i,j] =min\begin{cases}D[i-1,j] + del\_cost\\D[i,j-1] + ins\_cost\\D[i-1,j-1] + \left\{\begin{matrix}rep\_cost; & if src[i]\neq tar[j]\\0 ; & if src[i]=tar[j]\end{matrix}\right.\end{cases}\tag{5}\end{align}The variable `sub_cost` (for substitution cost) is the same as `rep_cost`; replacement cost. We will stick with the term "replace" whenever possible. Below are some examples of cells where replacement is used. This also shows the minimum path from the lower right final position where "EER" has been replaced by "NEAR" back to the start. This provides a starting point for the optional 'backtrace' algorithm below. Figure 8 Examples Distance Matrix Exercise 11Again, the word "substitution" appears in the figure, but think of this as "replacement". **Instructions**: Implement the function below to get the minimum amount of edits required given a source string and a target string. Hints The range(start, stop, step) function excludes 'stop' from its output words
###Code
# UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: min_edit_distance
def min_edit_distance(source, target, ins_cost = 1, del_cost = 1, rep_cost = 2):
'''
Input:
source: a string corresponding to the string you are starting with
target: a string corresponding to the string you want to end with
ins_cost: an integer setting the insert cost
del_cost: an integer setting the delete cost
rep_cost: an integer setting the replace cost
Output:
D: a matrix of len(source)+1 by len(target)+1 containing minimum edit distances
med: the minimum edit distance (med) required to convert the source string to the target
'''
# use deletion and insert cost as 1
m = len(source)
n = len(target)
#initialize cost matrix with zeros and dimensions (m+1,n+1)
D = np.zeros((m+1, n+1), dtype=int)
### START CODE HERE (Replace instances of 'None' with your code) ###
# Fill in column 0, from row 1 to row m, both inclusive
for row in range(1,m+1): # Replace None with the proper range
D[row,0] = D[row-1,0]+del_cost
# Fill in row 0, for all columns from 1 to n, both inclusive
for col in range(1,n+1): # Replace None with the proper range
D[0,col] = D[0,col-1]+ins_cost
# Loop through row 1 to row m, both inclusive
for row in range(1,m+1):
# Loop through column 1 to column n, both inclusive
for col in range(1,n+1):
# Intialize r_cost to the 'replace' cost that is passed into this function
r_cost = rep_cost
# Check to see if source character at the previous row
# matches the target character at the previous column,
if source[row-1]==target[col-1]:
# Update the replacement cost to 0 if source and target are the same
r_cost = 0
# Update the cost at row, col based on previous entries in the cost matrix
# Refer to the equation calculate for D[i,j] (the minimum of three calculated costs)
D[row,col] = min(D[row-1,col]+del_cost,D[row,col-1]+ins_cost,D[row-1,col-1]+r_cost)
# Set the minimum edit distance with the cost found at row m, column n
med = D[m,n]
### END CODE HERE ###
return D, med
#DO NOT MODIFY THIS CELL
# testing your implementation
source = 'play'
target = 'stay'
matrix, min_edits = min_edit_distance(source, target)
print("minimum edits: ",min_edits, "\n")
idx = list('#' + source)
cols = list('#' + target)
df = pd.DataFrame(matrix, index=idx, columns= cols)
print(df)
###Output
minimum edits: 4
# s t a y
# 0 1 2 3 4
p 1 2 3 4 5
l 2 3 4 5 6
a 3 4 5 4 5
y 4 5 6 5 4
###Markdown
**Expected Results:** ```CPPminimum edits: 4 s t a y 0 1 2 3 4p 1 2 3 4 5l 2 3 4 5 6a 3 4 5 4 5y 4 5 6 5 4```
###Code
#DO NOT MODIFY THIS CELL
# testing your implementation
source = 'eer'
target = 'near'
matrix, min_edits = min_edit_distance(source, target)
print("minimum edits: ",min_edits, "\n")
idx = list(source)
idx.insert(0, '#')
cols = list(target)
cols.insert(0, '#')
df = pd.DataFrame(matrix, index=idx, columns= cols)
print(df)
###Output
minimum edits: 3
# n e a r
# 0 1 2 3 4
e 1 2 1 2 3
e 2 3 2 3 4
r 3 4 3 4 3
###Markdown
**Expected Results** ```CPPminimum edits: 3 n e a r 0 1 2 3 4e 1 2 1 2 3e 2 3 2 3 4r 3 4 3 4 3``` We can now test several of our routines at once:
###Code
source = "eer"
targets = edit_one_letter(source,allow_switches = False) #disable switches since min_edit_distance does not include them
for t in targets:
_, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one
if min_edits != 1: print(source, t, min_edits)
###Output
_____no_output_____
###Markdown
**Expected Results** ```CPP(empty)```The 'replace()' routine utilizes all letters a-z one of which returns the original word.
###Code
source = "eer"
targets = edit_two_letters(source,allow_switches = False) #disable switches since min_edit_distance does not include them
for t in targets:
_, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one
if min_edits != 2 and min_edits != 1: print(source, t, min_edits)
###Output
eer erfe 3
eer jre 3
eer erne 3
eer bre 3
eer sre 3
eer ore 3
eer erte 3
eer xre 3
eer erwe 3
eer fre 3
eer erle 3
eer erqe 3
eer erze 3
eer erve 3
eer erie 3
eer zre 3
eer erce 3
eer lre 3
eer erae 3
eer erde 3
eer eroe 3
eer hre 3
eer wre 3
eer erue 3
eer mre 3
eer kre 3
eer dre 3
eer rre 3
eer tre 3
eer erhe 3
eer erge 3
eer eer 0
eer are 3
eer ire 3
eer yre 3
eer cre 3
eer pre 3
eer ure 3
eer nre 3
eer erbe 3
eer erse 3
eer erke 3
eer erxe 3
eer gre 3
eer erje 3
eer vre 3
eer qre 3
eer erye 3
eer erpe 3
eer erme 3
###Markdown
**Expected Results** ```CPPeer eer 0```We have to allow single edits here because some two_edits will restore a single edit. SubmissionMake sure you submit your assignment before you modify anything below Part 5: Optional - BacktraceOnce you have computed your matrix using minimum edit distance, how would find the shortest path from the top left corner to the bottom right corner? Note that you could use backtrace algorithm. Try to find the shortest path given the matrix that your `min_edit_distance` function returned.You can use these [lecture slides on minimum edit distance](https://web.stanford.edu/class/cs124/lec/med.pdf) by Dan Jurafsky to learn about the algorithm for backtrace.
###Code
# Experiment with back trace - insert your code here
###Output
_____no_output_____ |
examples/ike_simple.ipynb | ###Markdown
Cloud-optimized loading of NetCDF4/HDF5 in XarrayUsing the new Fsspec "ReferenceFileSystem" functionality
###Code
import xarray as xr
import fsspec
mapper = fsspec.get_mapper("reference://",
fo='s3://pangeo-data-uswest2/esip/adcirc/adcirc_01d_offsets.json',
target_options={'requester_pays': True},
remote_protocol='s3',
remote_options={'requester_pays': True})
ds = xr.open_dataset(mapper, engine="zarr", backend_kwargs={"consolidated": False})
ds.nbytes/1e9
ds.zeta.encoding
ds.zeta
###Output
_____no_output_____
###Markdown
Cloud-optimized loading of NetCDF4/HDF5 in XarrayUsing the new Fsspec "ReferenceFileSystem" functionality
###Code
import xarray as xr
import fsspec
mapper = fsspec.get_mapper("reference://",
references='s3://pangeo-data-uswest2/esip/adcirc/adcirc_01d_offsets.json',
ref_storage_args={'requester_pays': True},
target_protocol='s3',
target_options={'requester_pays': True})
ds = xr.open_zarr(mapper)
ds.nbytes/1e9
ds.zeta.encoding
ds.zeta
###Output
_____no_output_____ |
notebooks/zinb_regression.ipynb | ###Markdown
Regression
###Code
import pandas as pd
import numpy as np
import os
import statsmodels.api as sm
from google.colab import drive
from statsmodels.genmod import families
import statsmodels.discrete.count_model as reg_models
drive.mount('/content/drive')
root_path = "/content/drive/MyDrive/University/Dissertation"
regression_path = "/regression"
regression_file = "/regression_table_with_persp.csv"
reg_df = pd.read_csv(root_path + regression_path + regression_file,
parse_dates=['date'])
reg_df['hatebase_proportion'] = reg_df['tweets_containing_slurs'] / reg_df['total_tweets']
reg_df['perspective_proportion'] = reg_df['tweets_flagged_perspective'] / reg_df['total_perspective_tweets']
# Set to 0 where there's 0 tweets received
reg_df['hatebase_proportion'].fillna(0.0, inplace=True)
reg_df['perspective_proportion'].fillna(0.0, inplace=True)
reg_df['player_rating'].fillna(0.0, inplace=True)
reg_df['player_rating_in_previous_game'].fillna(0.0, inplace=True)
reg_df['club_coefficient'].fillna(0.0, inplace=True)
reg_df.head()
reg_df['day_of_week'] = reg_df['date'].dt.day_name()
reg_df["featured"] = reg_df["featured"].astype(int)
reg_df["featured_in_previous_game"] = reg_df["featured_in_previous_game"].astype(int)
reg_df["matchday"] = reg_df["matchday"].astype(int)
reg_df["red_card"] = reg_df["red_card"].astype(int)
reg_df["penalty"] = reg_df["penalty"].astype(int)
reg_df["penalty_outcome"] = reg_df["penalty_outcome"].astype(int)
reg_df = pd.get_dummies(reg_df, columns=['ethnicity'])
reg_df = pd.get_dummies(reg_df, columns=['result'])
reg_df = pd.get_dummies(reg_df, columns=['result_in_previous_game'])
reg_df = pd.get_dummies(reg_df, columns=['day_of_week'])
reg_df = pd.get_dummies(reg_df, columns=['country'])
reg_df.drop(columns=['country_ranking_points', 'club', 'name', 'date', 'opponent', 'round', 'ethnicity_white', 'result_D', 'result_W'], inplace=True)
print('Hatebase absolute: Mean='+str(np.mean(reg_df['tweets_containing_slurs'])) + ' Variance='+str(np.var(reg_df['tweets_containing_slurs'])))
print('Hatebase proportion: Mean='+str(np.mean(reg_df['hatebase_proportion'])) + ' Variance='+str(np.var(reg_df['hatebase_proportion'])))
print('Perspective absolute: Mean='+str(np.mean(reg_df['tweets_flagged_perspective'])) + ' Variance='+str(np.var(reg_df['tweets_flagged_perspective'])))
print('Perspective proportion: Mean='+str(np.mean(reg_df['perspective_proportion'])) + ' Variance='+str(np.var(reg_df['perspective_proportion'])))
num_obs = len(reg_df)
h_zeroes_abs = len(reg_df[reg_df['tweets_containing_slurs'] == 0])
h_zeroes_prop = len(reg_df[reg_df['hatebase_proportion'] == 0])
p_zeroes_abs = len(reg_df[reg_df['tweets_flagged_perspective'] == 0])
p_zeroes_prop = len(reg_df[reg_df['perspective_proportion'] == 0])
print(f'Total observations: {num_obs}')
print(f'Hatebase absolute: {h_zeroes_abs} zeroes, {100*(h_zeroes_abs / num_obs)}%')
print(f'Hatebase proportion: {h_zeroes_prop} zeroes, {100*(h_zeroes_prop / num_obs)}%')
print(f'Perspective absolute: {p_zeroes_abs} zeroes, {100*(p_zeroes_abs / num_obs)}%')
print(f'Perspective proportion: {p_zeroes_prop} zeroes, {100*(p_zeroes_prop / num_obs)}%')
X = reg_df[['club_coefficient', 'ethnicity_non_white', 'result_L', 'pen', 'day_of_week_Monday', 'day_of_week_Tuesday', 'day_of_week_Wednesday', 'day_of_week_Thursday', 'day_of_week_Friday', 'day_of_week_Saturday', 'day_of_week_Sunday']]
# X = reg_df[['club_coefficient', 'ethnicity_non_white', 'result_L', 'pen']]
X = sm.add_constant(X)
# ZINB for Hatebase absolute number of tweets (since mostly zeroes, and overdispersed)
h_abs_model=reg_models.ZeroInflatedNegativeBinomialP(reg_df['tweets_containing_slurs'], X)
h_abs_res = h_abs_model.fit_regularized()
print(h_abs_res.summary())
# ZIGP for Hatebase proportion of tweets (since mostly zeroes, underdispersed)
h_prop_model=reg_models.ZeroInflatedGeneralizedPoisson(reg_df['hatebase_proportion'], X)
h_prop_res = h_prop_model.fit_regularized()
print(h_prop_res.summary())
# ZINB for Perspective absolute number of tweets (since mostly zeroes, and overdispersed)
p_abs_model=reg_models.ZeroInflatedNegativeBinomialP(reg_df['tweets_flagged_perspective'], X)
p_abs_res = p_abs_model.fit_regularized()
print(p_abs_res.summary())
# ZIGP for Perspective proportion of tweets (since mostly zeroes, underdispersed)
p_prop_model=reg_models.ZeroInflatedGeneralizedPoisson(reg_df['perspective_proportion'], X)
p_prop_res = p_prop_model.fit_regularized()
print(p_prop_res.summary())
###Output
/usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1396: RuntimeWarning: invalid value encountered in log
np.log(a1) - gammaln(endog + 1) - a2 / a1)
/usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1684: RuntimeWarning: overflow encountered in exp
return np.exp(linpred)
/usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1394: RuntimeWarning: invalid value encountered in multiply
a2 = mu + (a1 - 1) * endog
/usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1394: RuntimeWarning: invalid value encountered in add
a2 = mu + (a1 - 1) * endog
/usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1393: RuntimeWarning: overflow encountered in multiply
a1 = 1 + alpha * mu_p
/usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1396: RuntimeWarning: invalid value encountered in true_divide
np.log(a1) - gammaln(endog + 1) - a2 / a1)
|
lessons/5 ETLPipelines/11_duplicatedata_exercise/11_duplicatedata_exercise-solution.ipynb | ###Markdown
Duplicate DataA data set might have duplicate data: in other words, the same record is represented multiple times. Sometimes, it's easy to find and eliminate duplicate data like when two records are exactly the same. At other times, like what was discussed in the video, duplicate data is hard to spot. Exercise 1From the World Bank GDP data, count the number of countries that have had a project totalamt greater than 1 billion dollars (1,000,000,000). To get the count, you'll have to remove duplicate data rows.
###Code
import pandas as pd
# read in the projects data set and do some basic wrangling
projects = pd.read_csv('../data/projects_data.csv', dtype=str)
projects.drop('Unnamed: 56', axis=1, inplace=True)
projects['totalamt'] = pd.to_numeric(projects['totalamt'].str.replace(',', ''))
projects['countryname'] = projects['countryname'].str.split(';', expand=True)[0]
projects['boardapprovaldate'] = pd.to_datetime(projects['boardapprovaldate'])
# TODO: filter the data frame for projects over 1 billion dollars
# TODO: count the number of unique countries in the results
projects[projects['totalamt'] > 1000000000]['countryname'].nunique()
###Output
_____no_output_____
###Markdown
Exercise 2 (challenge)This exercise is more challenging. The projects data set contains data about Yugoslavia, which was an Eastern European country until 1992. Yugoslavia eventually broke up into 7 countries: Bosnia and Herzegovina, Croatia, Kosovo, Macedonia, Montenegro, Serbia, and Slovenia.But the projects dataset has some ambiguity in how it treats Yugoslavia and the 7 countries that came from Yugoslavia. Your task is to find Yugoslavia projects that are probably represented multiple times in the data set.
###Code
# TODO: output all projects for the 'Socialist Federal Republic of Yugoslavia'
# HINT: You can use the exact country name or use the pandas str.contains() method to search for Yugoslavia
projects[projects['countryname'].str.contains('Yugoslavia')]
###Output
_____no_output_____
###Markdown
Yugoslavia officially ended on [April 27th, 1992](https://en.wikipedia.org/wiki/Yugoslavia). In the code cell below, filter for projects with a 'boardapprovaldate' prior to April 27th, 1992 **and** with 'countryname' Bosnia and Herzegovina, Croatia, Kosovo, Macedonia, Serbia **or** Slovenia. You'll see there are a total of 12 projects in the data set that match this criteria. Save the results in the republics variable
###Code
import datetime
# TODO: filter the projects data set for project boardapprovaldate prior to April 27th, 1992 AND with countryname
# of either 'Bosnia and Herzegovina', 'Croatia', 'Kosovo', 'Macedonia', 'Serbia', or 'Sovenia'. Store the
# results in the republics variable
republics = projects[(projects['boardapprovaldate'] < '1992, 4, 27') &
((projects['countryname'].str.contains('Bosnia')) |
(projects['countryname'].str.contains('Croatia')) |
(projects['countryname'].str.contains('Kosovo')) |
(projects['countryname'].str.contains('Macedonia')) |
(projects['countryname'].str.contains('Montenegro')) |
(projects['countryname'].str.contains('Serbia')) |
(projects['countryname'].str.contains('Slovenia')))][['regionname',
'countryname',
'lendinginstr',
'totalamt',
'boardapprovaldate',
'location',
'GeoLocID',
'GeoLocName',
'Latitude',
'Longitude',
'Country',
'project_name']].sort_values('boardapprovaldate')
republics
republics.countryname.unique()
###Output
_____no_output_____
###Markdown
Are these projects also represented in the data labeled Yugoslavia? In the code cell below, filter for Yugoslavia projects approved between February 1st, 1980 and May 23rd, 1989 which are the minimum and maximum dates in the results above. Store the results in the yugoslavia variable.The goal is to see if there are any projects represented more than once in the data set.
###Code
# Return Yugoslavia projects that might overlap with the other country projects
yugoslavia = projects[(projects['countryname'].str.contains('Yugoslavia')) &
(projects['boardapprovaldate'] >= '1980, 2, 1') &
(projects['boardapprovaldate'] <= '1989, 5, 23')][['regionname', 'countryname',
'lendinginstr',
'totalamt',
'boardapprovaldate',
'location',
'GeoLocID',
'GeoLocName',
'Latitude',
'Longitude',
'Country',
'project_name']].sort_values('boardapprovaldate')
yugoslavia.shape
###Output
_____no_output_____
###Markdown
And as a final step, try to see if there are any projects in the republics variable and yugoslavia variable that could be the same project.There are multiple ways to do that. As a suggestion, find unique dates in the republics variable. Then separately find unique dates in the yugoslavia variable. Concatenate (ie append) the results together. And then count the number of times each date occurs in this list. If a date occurs twice, that means the same boardapprovaldate appeared in both the Yugoslavia data as well as in the republics data.You'll should find that there are three suspicious cases:* July 26th, 1983* March 31st, 1987* October 13th, 1987* May 23rd, 1989
###Code
import numpy as np
# TODO: find the unique dates in the republics variable
republic_unique_dates = republics['boardapprovaldate'].unique()
# TODO: find the unique dates in the yugoslavia variable
yugoslavia_unique_dates = yugoslavia['boardapprovaldate'].unique()
# TODO: make a list of the results appending one list to the other
dates = np.append(republic_unique_dates, yugoslavia_unique_dates)
# TODO: print out the dates that appeared twice in the results
unique_dates, count = np.unique(dates, return_counts=True)
for i in range(len(unique_dates)):
if count[i] == 2:
print(unique_dates[i])
###Output
1983-07-26 00:00:00+00:00
1987-03-31 00:00:00+00:00
1987-10-13 00:00:00+00:00
1989-05-23 00:00:00+00:00
###Markdown
ConclusionOn July 26th, 1983, for example, projects were approved for Bosnia and Herzegovina, Croatia, Macedonia, Slovenia, and Yugoslavia. The code below shows the projects for that date. You'll notice that Yugoslavia had two projects, one of which was called "Power Transmission Project (03) Energy Managem...". The projects in the other countries were all called "POWER TRANS.III". This looks like a case of duplicate data. What you end up doing with this knowledge would depend on the context. For example, if you wanted to get a true count for the total number of projects in the data set, should all of these projects be counted as one project? Run the code cell below to see the projects in question.
###Code
import datetime
# run this code cell to see the duplicate data
pd.concat([yugoslavia[yugoslavia['boardapprovaldate'] == datetime.date(1983, 7, 26)], republics[republics['boardapprovaldate'] == datetime.date(1983, 7, 26)]])
###Output
_____no_output_____ |
Deep Learning/Assignments/Assignment 6/homework6.ipynb | ###Markdown
`X` is a `NxK` float matrix where each row (`X[i]`) corresponds to a data point.
###Code
def multivariate_gaussian(X, mean, cov):
d = X.shape[0]
left_term = (2*np.pi)**(-d/2)*np.linalg.det(cov)**(-0.5)
right_term = np.exp(-0.5*np.dot(np.dot((X-mean).T, np.linalg.inv(cov)), X-mean))
return left_term * right_term
def gmm(X, n_classes, n_iter):
n, d = X.shape
# Weights (pi)
pi = np.array([1./ n_classes] * n_classes)
# Mean (mu)
rand_idx = np.random.choice(n, n_classes, replace = False)
mean = np.array(X[rand_idx, :])
# Covariance (sigma)
cov = np.array([np.eye(d)] * n_classes)
# Responsiblities (gamma)
res = np.zeros((n, n_classes))
for _ in range(n_iter):
# E-Step
for i in range(n_classes):
for j in range(n):
res[j, i] = pi[i] * multivariate_gaussian(X[j], mean[i], cov[i])
res = (res.T / np.sum(res, axis = 1)).T # normalize
# M-Step
N_K = np.sum(res, axis=0)
for i in range(n_classes):
# Update mean
mean[i] = 1. / N_K[i] * np.sum(res[:, i].reshape(n, 1) * X , axis=0)
# Update covariance
cov[i] = (1. / N_K[i]) * np.dot((res[:, i].reshape(n,1) * (X - mean[i]) ).T, X- mean[i])
# Update weights
pi = N_K / np.sum(res)
# Pick prediction with largest probability
class_assignments = np.argmax(res, axis=1)
return class_assignments, mean, cov
# Hyper parameters
num_classes = 3
num_iteration = 2000
class_assignments, mean, cov = gmm(X, num_classes, num_iteration) # You may want to tune the number of iterations
###Output
_____no_output_____
###Markdown
Visualization: a Cross Section
###Code
plt.figure(figsize=(9,4))
plt.subplot(121)
for k in range(3):
plt.scatter(X[class_assignments==k, 2], X[class_assignments==k, 1], s=2)
plt.subplot(122)
for k, class_name in enumerate(np.unique(Y)):
plt.scatter(X[Y==class_name, 2], X[Y==class_name, 1], s=2)
plt.savefig(f"figures/cs_{num_classes}_{num_iteration}.png")
###Output
_____no_output_____
###Markdown
Visualization: PCA Projection
###Code
evals, evecs = np.linalg.eigh(np.cov(X.T))
to_crd = lambda x: ((x-x.mean(axis=0))@evecs)[:,-2:]
crds = to_crd(X)
plt.figure(figsize=(9,4))
plt.subplot(121)
for k in range(3):
plt.scatter(crds[class_assignments==k, 0], crds[class_assignments==k, 1], s=2)
plt.scatter(to_crd(mean)[:,0], to_crd(mean)[:,1], s=30, marker='+')
plt.subplot(122)
for k in np.unique(Y):
plt.scatter(crds[Y==k, 0], crds[Y==k, 1], s=2)
plt.savefig(f"figures/pca_{num_classes}_{num_iteration}.png")
###Output
_____no_output_____ |
ingreso de datos.ipynb | ###Markdown
validacion de argumentos en una funcion
###Code
def validacion(x=None, y=None):
if x==None or y==None:
print("tienes q ingresar los dos datos")
return
return x*y
validacion()
validacion(1)
###Output
tienes q ingresar los dos datos
###Markdown
cuando no se sabe la longitud de una lista podemos hacer esto....
###Code
def arg(*lista1):
for i in lista1:
print(i)
x=arg=[1,5,6,8,12,14]
x
def diccionario(**dic):
for i in dic:
print(i)
w=diccionario(nombre="samir",edad=25,carrera="sistemas")
w
###Output
nombre
edad
carrera
###Markdown
COSAS Q SE TIENEN Q HACER AL INICIAR UN SISTEMA PAR EL USUARIO
###Code
try:
c=float(input("ingrese su edad"))
print(c)
except:
print("no ingreso lo que se lel pidio")
while(True):
try:
c=int(input("ingrese su edad"))
print(c)
except:
print("ERROR vuelta a ingresar el dato pedido")
else:
print("Iniciaste secion perfectamente")
break:
###Output
_____no_output_____ |
notebooks/Chapter_24/01_Bivariate_Normal_Distribution.ipynb | ###Markdown
Bivariate Normal Distribution The multivariate normal distribution is defined in terms of a mean vector and a covariance matrix. The units of covariance are often hard to understand, as they are the product of the units of the two variables.Normalizing the covariance so that it is easier to interpret is a good idea. As you have seen in exercises, for jointly distributed random variables $X$ and $Y$ the *correlation* between $X$ and $Y$ is defined as$$r_{X,Y} ~ = ~ \frac{Cov(X, Y)}{\sigma_X\sigma_Y} ~ = ~ E\Big{(} \frac{X-\mu_X}{\sigma_X} \cdot \frac{Y-\mu_Y}{\sigma_Y} \Big{)}~ = ~ E(X^*Y^*)$$where $X^*$ is $X$ in standard units and $Y^*$ is $Y$ in standard units. Properties of Correlation You showed all of these in exercises.- $r_{X,Y}$ depends only on standard units and hence is a pure number with no units- $r_{X,Y} = r_{Y,X}$- $-1 \le r_{X,Y} \le 1$ - If $Y = aX + b$ then $r_{X,Y}$ is $1$ or $-1$ according to whether the sign of $a$ is positive or negative. We say that $r_{X,Y}$ measures the *linear association* between $X$ and $Y$. Variance of a Sum Rewrite the formula for correlation to see that $$Cov(X, Y) ~ = ~ r_{X,Y}\sigma_X\sigma_Y$$So the variance of $X+Y$ is$$\sigma_{X+Y}^2 ~ = ~ \sigma_X^2 + \sigma_Y^2 + 2r_{X,Y}\sigma_X\sigma_Y$$Notice the parallel with the formula for the length of the sum of two vectors, with correlation playing the role of the cosine of the angle between two vectors. If the angle is 90 degrees, the the cosine is 0. This corresponds to correlation being zero and hence the random variables being uncorrelated. We will visualize this idea in the case where the joint distribution of $X$ and $Y$ is bivariate normal. Standard Bivariate Normal Distribution Let $X$ and $Z$ be independent standard normal variables, that is, bivariate normal random variables with mean vector $\mathbf{0}$ and covariance matrix equal to the identity. Now fix a number $\rho$ (that's the Greek letter rho, the lower case r) so that $-1 < \rho < 1$, and let$$\mathbf{A} ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}$$Define a new random variable $Y = \rho X + \sqrt{1-\rho^2}Z$, and notice that$$\begin{bmatrix}X \\Y\end{bmatrix} ~ = ~\begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}X \\Z\end{bmatrix}~ = ~ \mathbf{A}\begin{bmatrix}X \\Z\end{bmatrix}$$So $X$ and $Y$ have the bivariate normal distribution with mean vector $\mathbf{0}$ and covariance matrix$$\mathbf{AIA}^T ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}1 & \rho \\0 & \sqrt{1 - \rho^2}\end{bmatrix}~ = ~ \begin{bmatrix}1 & \rho \\\rho & 1\end{bmatrix}$$We say that $X$ and $Y$ have the *standard bivariate normal distribution with correlation $\rho$*.The graph below shows the empirical distribution of 1000 $(X, Y)$ points in the case $\rho = 0.6$. You can change the value of $rho$ and see how the scatter diagram changes. It will remind you of numerous such simulations in Data 8.
###Code
# Plotting parameters
plt.figure(figsize=(5, 5))
plt.axes().set_aspect('equal')
plt.xlabel('$X$')
plt.ylabel('$Y$', rotation=0)
plt.xticks(np.arange(-4, 4.1))
plt.yticks(np.arange(-4, 4.1))
# X, Z, and Y
x = stats.norm.rvs(0, 1, size=1000)
z = stats.norm.rvs(0, 1, size=1000)
rho = 0.6
y = rho*x + np.sqrt((1-rho**2))*z
plt.scatter(x, y, color='darkblue', s=10);
###Output
_____no_output_____
###Markdown
Correlation as a Cosine We have defined$$Y ~ = ~ \rho X + \sqrt{1 - \rho^2} Z$$where $X$ and $Z$ are i.i.d. standard normal.Let's understand this construction geometrically. A good place to start is the joint density of $X$ and $Z$, which has circular symmetry.
###Code
# HIDDEN
Plot_bivariate_normal([0, 0], [[1, 0], [0, 1]])
plt.xlabel('$X$')
plt.ylabel('$Z$')
plt.gca().set_zlabel('$f(x, z)$')
plt.title('Standard Bivariate Normal Distribution, Correlation = 0');
###Output
_____no_output_____
###Markdown
The $X$ and $Z$ axes are orthogonal. Let's see what happens if we twist them. Take any positive angle $\theta$ degrees and draw a new axis at angle $\theta$ to the original $X$ axis. Every point $(X, Z)$ has a *projection* onto this axis. The figure below shows the projection of the point $(X, Z) = (1, 2)$ onto the gold axis which is at an angle of $\theta$ degress to the $X$ axis. The blue segment is the value of $X$. You get that by dropping the perpendicular from $(1, 2)$ to the horizontal axis. That's called *projecting* $(1, 2)$ onto the horizontal axis. The red segment is the projection of $(1, 2)$ onto the gold axes, obtained by dropping the perpendicular from $(1, 2)$ to the gold axis.Vary the values of $\theta$ in the cell below to see how the projection changes as the gold axis rotates.
###Code
theta = 20
projection_1_2(theta)
###Output
_____no_output_____
###Markdown
Let $Y$ be the length of the red segment, and remember that $X$ is the length of the blue segment. When $\theta$ is very small, $Y$ is almost equal to $X$. When $\theta$ approaches 90 degrees, $Y$ is almost equal to $Z$.A little trigonometry shows that $Y ~ = ~ X \cos(\theta) + Z\sin(\theta)$.
###Code
projection_trig()
###Output
_____no_output_____
###Markdown
Thus$$Y ~ = ~ X\cos(\theta) + Z\sin(\theta) ~ = ~ \rho X + \sqrt{1 - \rho^2}Z$$where $\rho = \cos(\theta)$.The sequence of graphs below illustrates the transformation for $\theta = 30$ degrees.
###Code
theta = 30
projection_1_2(theta)
###Output
_____no_output_____
###Markdown
The bivariate normal distribution is the joint distribution of the blue and red lengths $X$ and $Y$ when the original point $(X, Z)$ has i.i.d. standard normal coordinates. This transforms the circular contours of the joint density surface of $(X, Z)$ into the elliptical contours of the joint density surface of $(X, Y)$.
###Code
cos(theta), (3**0.5)/2
rho = cos(theta)
Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]])
plt.title('Standard Bivariate Normal Distribution, Correlation = '+str(round(rho, 2)));
###Output
_____no_output_____
###Markdown
Small $\theta$ As we observed earlier, when $\theta$ is very small there is hardly any change in the position of the axis. So $X$ and $Y$ are almost equal.
###Code
theta = 2
projection_1_2(theta)
###Output
_____no_output_____
###Markdown
The bivariate normal density of $X$ and $Y$, therefore, is essentially confined to the $X = Y$ line. The correlation $\cos(\theta)$ is large because $\theta$ is small; it is more than 0.999. You can see the plotting function having trouble rendering this joint density surface.
###Code
rho = cos(theta)
rho
Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]])
###Output
_____no_output_____
###Markdown
Orthogonality and Independence When $\theta$ is 90 degrees, the gold axis is orthogonal to the $X$ axis and $Y$ is equal to $Z$ which is independent of $X$.
###Code
theta = 90
projection_1_2(theta)
###Output
_____no_output_____
###Markdown
Bivariate Normal Distribution The multivariate normal distribution is defined in terms of a mean vector and a covariance matrix. The units of covariance are often hard to understand, as they are the product of the units of the two variables.Normalizing the covariance so that it is easier to interpret is a good idea. As you have seen in exercises, for jointly distributed random variables $X$ and $Y$ the *correlation* between $X$ and $Y$ is defined as$$r_{X,Y} ~ = ~ \frac{Cov(X, Y)}{\sigma_X\sigma_Y} ~ = ~ E\Big{(} \frac{X-\mu_X}{\sigma_X} \cdot \frac{Y-\mu_Y}{\sigma_Y} \Big{)}~ = ~ E(X^*Y^*)$$where $X^\*$ is $X$ in standard units and $Y^\*$ is $Y$ in standard units. Properties of Correlation You showed all of these in exercises.- $r_{X,Y}$ depends only on standard units and hence is a pure number with no units- $r_{X,Y} = r_{Y,X}$- $-1 \le r_{X,Y} \le 1$ - If $Y = aX + b$ then $r_{X,Y}$ is $1$ or $-1$ according to whether the sign of $a$ is positive or negative. We say that $r_{X,Y}$ measures the *linear association* between $X$ and $Y$. Variance of a Sum Rewrite the formula for correlation to see that $$Cov(X, Y) ~ = ~ r_{X,Y}\sigma_X\sigma_Y$$So the variance of $X+Y$ is$$\sigma_{X+Y}^2 ~ = ~ \sigma_X^2 + \sigma_Y^2 + 2r_{X,Y}\sigma_X\sigma_Y$$Notice the parallel with the formula for the length of the sum of two vectors, with correlation playing the role of the cosine of the angle between two vectors. If the angle is 90 degrees, the the cosine is 0. This corresponds to correlation being zero and hence the random variables being uncorrelated. We will visualize this idea in the case where the joint distribution of $X$ and $Y$ is bivariate normal. Standard Bivariate Normal Distribution Let $X$ and $Z$ be independent standard normal variables, that is, bivariate normal random variables with mean vector $\mathbf{0}$ and covariance matrix equal to the identity. Now fix a number $\rho$ (that's the Greek letter rho, the lower case r) so that $-1 < \rho < 1$, and let$$\mathbf{A} ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}$$Define a new random variable $Y = \rho X + \sqrt{1-\rho^2}Z$, and notice that$$\begin{bmatrix}X \\Y\end{bmatrix} ~ = ~\begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}X \\Z\end{bmatrix}~ = ~ \mathbf{A}\begin{bmatrix}X \\Z\end{bmatrix}$$So $X$ and $Y$ have the bivariate normal distribution with mean vector $\mathbf{0}$ and covariance matrix$$\mathbf{AIA}^T ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}1 & \rho \\0 & \sqrt{1 - \rho^2}\end{bmatrix}~ = ~ \begin{bmatrix}1 & \rho \\\rho & 1\end{bmatrix}$$We say that $X$ and $Y$ have the *standard bivariate normal distribution with correlation $\rho$*.The graph below shows the empirical distribution of 1000 $(X, Y)$ points in the case $\rho = 0.6$. You can change the value of $rho$ and see how the scatter diagram changes. It will remind you of numerous such simulations in Data 8.
###Code
# Plotting parameters
plt.figure(figsize=(5, 5))
plt.axes().set_aspect('equal')
plt.xlabel('$X$')
plt.ylabel('$Y$', rotation=0)
plt.xticks(np.arange(-4, 4.1))
plt.yticks(np.arange(-4, 4.1))
# X, Z, and Y
x = stats.norm.rvs(0, 1, size=1000)
z = stats.norm.rvs(0, 1, size=1000)
rho = 0.6
y = rho*x + np.sqrt((1-rho**2))*z
plt.scatter(x, y, color='darkblue', s=10);
###Output
_____no_output_____
###Markdown
Correlation as a Cosine We have defined$$Y ~ = ~ \rho X + \sqrt{1 - \rho^2} Z$$where $X$ and $Z$ are i.i.d. standard normal.Let's understand this construction geometrically. A good place to start is the joint density of $X$ and $Z$, which has circular symmetry.
###Code
# NO CODE
Plot_bivariate_normal([0, 0], [[1, 0], [0, 1]])
plt.xlabel('$X$')
plt.ylabel('$Z$')
plt.gca().set_zlabel('$f(x, z)$')
plt.title('Standard Bivariate Normal Distribution, Correlation = 0');
###Output
_____no_output_____
###Markdown
The $X$ and $Z$ axes are orthogonal. Let's see what happens if we twist them. Take any positive angle $\theta$ degrees and draw a new axis at angle $\theta$ to the original $X$ axis. Every point $(X, Z)$ has a *projection* onto this axis. The figure below shows the projection of the point $(X, Z) = (1, 2)$ onto the gold axis which is at an angle of $\theta$ degress to the $X$ axis. The blue segment is the value of $X$. You get that by dropping the perpendicular from $(1, 2)$ to the horizontal axis. That's called *projecting* $(1, 2)$ onto the horizontal axis. The red segment is the projection of $(1, 2)$ onto the gold axes, obtained by dropping the perpendicular from $(1, 2)$ to the gold axis.Vary the values of $\theta$ in the cell below to see how the projection changes as the gold axis rotates.
###Code
theta = 20
projection_1_2(theta)
###Output
_____no_output_____
###Markdown
Let $Y$ be the length of the red segment, and remember that $X$ is the length of the blue segment. When $\theta$ is very small, $Y$ is almost equal to $X$. When $\theta$ approaches 90 degrees, $Y$ is almost equal to $Z$.A little trigonometry shows that $Y ~ = ~ X \cos(\theta) + Z\sin(\theta)$.
###Code
projection_trig()
###Output
_____no_output_____
###Markdown
Thus$$Y ~ = ~ X\cos(\theta) + Z\sin(\theta) ~ = ~ \rho X + \sqrt{1 - \rho^2}Z$$where $\rho = \cos(\theta)$.The sequence of graphs below illustrates the transformation for $\theta = 30$ degrees.
###Code
theta = 30
projection_1_2(theta)
###Output
_____no_output_____
###Markdown
The bivariate normal distribution is the joint distribution of the blue and red lengths $X$ and $Y$ when the original point $(X, Z)$ has i.i.d. standard normal coordinates. This transforms the circular contours of the joint density surface of $(X, Z)$ into the elliptical contours of the joint density surface of $(X, Y)$.
###Code
cos(theta), (3**0.5)/2
rho = cos(theta)
Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]])
plt.title('Standard Bivariate Normal Distribution, Correlation = '+str(round(rho, 2)));
###Output
_____no_output_____
###Markdown
Small $\theta$ As we observed earlier, when $\theta$ is very small there is hardly any change in the position of the axis. So $X$ and $Y$ are almost equal.
###Code
theta = 2
projection_1_2(theta)
###Output
_____no_output_____
###Markdown
The bivariate normal density of $X$ and $Y$, therefore, is essentially confined to the $X = Y$ line. The correlation $\cos(\theta)$ is large because $\theta$ is small; it is more than 0.999. You can see the plotting function having trouble rendering this joint density surface.
###Code
rho = cos(theta)
rho
Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]])
###Output
_____no_output_____
###Markdown
Orthogonality and Independence When $\theta$ is 90 degrees, the gold axis is orthogonal to the $X$ axis and $Y$ is equal to $Z$ which is independent of $X$.
###Code
theta = 90
projection_1_2(theta)
###Output
_____no_output_____ |
DCGAN_model.ipynb | ###Markdown
Synthetic Image Generation with DCGANs in Keras 1. Import Libraries
###Code
%matplotlib inline
import tensorflow as tf
from tensorflow import keras
import numpy as np
import plot_utils
import matplotlib.pyplot as plt
from tqdm import tqdm
print('Tensorflow version:', tf.__version__)
###Output
Tensorflow version: 2.2.0
###Markdown
2. Loading data and Preprocessing the DataI'm using the inbuild MNIST fasion data from tensorflow dataset.
###Code
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.show()
###Output
_____no_output_____
###Markdown
3. Creating Batches of Training Data For now experimenting with 32 batch size.
###Code
batch_size = 32
# This dataset fills a buffer with buffer_size elements,
#then randomly samples elements from this buffer, replacing the selected elements with new elements.
dataset = tf.data.Dataset.from_tensor_slices(x_train).shuffle(1000)
#Combines consecutive elements of this dataset into batches.
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)
#Creates a Dataset that prefetches elements from this dataset
###Output
_____no_output_____
###Markdown
4. Building the Generator Network model for DCGAN using Tf Sequential
###Code
num_features = 100
generator = keras.models.Sequential([
keras.layers.Dense(7 * 7 * 128, input_shape=[num_features]),
keras.layers.Reshape([7, 7, 128]),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(64, (5,5), (2,2), padding="same", activation="selu"),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(1, (5,5), (2,2), padding="same", activation="tanh"),
])
#generate random Noise
noise = tf.random.normal(shape=[1, num_features])
generated_images = generator(noise, training=False)
plot_utils.show(generated_images, 1)
###Output
_____no_output_____
###Markdown
5. Discriminator Network model for DCGAN
###Code
discriminator = keras.models.Sequential([
keras.layers.Conv2D(64, (5,5), (2,2), padding="same", input_shape=[28, 28, 1]),
keras.layers.LeakyReLU(0.2),
keras.layers.Dropout(0.3),
keras.layers.Conv2D(128, (5,5), (2,2), padding="same"),
keras.layers.LeakyReLU(0.2),
keras.layers.Dropout(0.3),
keras.layers.Flatten(),
keras.layers.Dense(1, activation='sigmoid')
])
decision = discriminator(generated_images)
print(decision)
###Output
tf.Tensor([[0.5008253]], shape=(1, 1), dtype=float32)
###Markdown
6. Compiling the Deep Convolutional Generative Adversarial Network (DCGAN) model made using Keras
###Code
discriminator.compile(loss="binary_crossentropy", optimizer="rmsprop")
discriminator.trainable = False
gan = keras.models.Sequential([generator, discriminator])
gan.compile(loss="binary_crossentropy", optimizer="rmsprop")
###Output
_____no_output_____
###Markdown
7. Training Procedure is given below
###Code
from IPython import display
from tqdm import tqdm
seed = tf.random.normal(shape=[batch_size, 100])
from tqdm import tqdm
def train_dcgan(gan, dataset, batch_size, num_features, epochs=5):
generator, discriminator = gan.layers
for epoch in tqdm(range(epochs)):
print("Epoch {}/{}".format(epoch + 1, epochs))
for X_batch in dataset:
noise = tf.random.normal(shape=[batch_size, num_features])
generated_images = generator(noise)
X_fake_and_real = tf.concat([generated_images, X_batch], axis=0)
y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
discriminator.trainable = True
discriminator.train_on_batch(X_fake_and_real, y1)
noise = tf.random.normal(shape=[batch_size, num_features])
y2 = tf.constant([[1.]] * batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y2)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator, epoch + 1, seed)
display.clear_output(wait=True)
generate_and_save_images(generator, epochs, seed)
## Source https://www.tensorflow.org/tutorials/generative/dcgan#create_a_gif
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5, 5, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='binary')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
###Output
_____no_output_____
###Markdown
8. Training a DCGAN model
###Code
x_train_dcgan = x_train.reshape(-1, 28, 28, 1) * 2. - 1.
batch_size = 32
dataset = tf.data.Dataset.from_tensor_slices(x_train_dcgan)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)
%%time
train_dcgan(gan, dataset, batch_size, num_features, epochs=10)
###Output
_____no_output_____
###Markdown
9. Generating Synthetic Images with DCGAN on Fation MNIST data
###Code
noise = tf.random.normal(shape=[batch_size, num_features])
generated_images = generator(noise)
plot_utils.show(generated_images, 8)
## Source: https://www.tensorflow.org/tutorials/generative/dcgan#create_a_gif
import imageio
import glob
anim_file = '/content/dcgan.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
import IPython
display.Image(filename=anim_file)
###Output
_____no_output_____ |
Extract_tag.ipynb | ###Markdown
Test cases
###Code
# test_case = ["I want a high school romance movie with a bit of fantasy adventure.",
# "I want a sport theme drama.",
# "I want a non-human Artificial Intelligence crime series.",
# "I want a show with lots of actions and comedy and little romance.",
# "I want a slice of life anime.",
# "I want a show with lots of actions, comedy and Science Fiction.",
# "I want an ojou-sama anime.",
# "I want a action, romance, comedy show.",
# "I want an action anime.",
# "I want a horror anime.",
test_case = ["I want a show with a lot of action and ninjas.",
"I want a show with fighting robots.",
"I want a show with crazy battles."]
###Output
_____no_output_____
###Markdown
A method to get the tags we needed
###Code
def seperate_genres(nlp):
""" Takes a Spacy core language model,
we use the model to process all the tag and category
we have
Arguments:
nlp -- a spacy core language model
Returns:
{dict} -- a dictionary of category as the key and the
set of tags associated with it as value.
"""
result = {"Genres": ["Action", "Adventure", "Comedy", "Drama", "Ecchi", "Fantasy", "Horror", "Mahou Shoujo",
"Mecha", "Music", "Mystery", "Psychological" ,"Romance", "Science Fiction", "Slice Of Life",
"Sports", "Supernatural", "Thriller"],
"Cast-Main Cast": ["Anti-Hero", "Ensemble Cast", "Female Protagonist", "Male Protagonist",
"Primarily Adult Cast", "Primarily Child Cast", "Primarily Female Cast",
"Primarily Male Cast"],
"Cast-Traits": ["Age Regression", "Agender", "Aliens", "Amnesia", "Angels", "Artificial Intelligence",
"Asexual", "Butler", "Centaur", "Chimera", "Chuunibyou", "Cosplay", "Crossdressing",
"Cyborg", "Delinquents", "Demons", "Detective", "Dinosaurs", "Dissociative Identities",
"Dragons", "Dullahan", "Elf", "Ghost", "Goblin", "Gods", "Gyaru", "Hikikomori", "Idol",
"Kemonomimi", "Kuudere", "Maids", "Mermaid", "Monster Boy", "Monster Girl", "Nekomimi",
"Ninja", "Nudity", "Nun", "Office Lady", "Oiran", "Ojou-Sama", "Pirates", "Robots",
"Samurai", "Shrine Maiden", "Skeleton", "Succubus", "Tanned Skin", "Teacher", "Tomboy"
"Transgender", "Tsundere", "Twins", "Vampire", "Vikings", "Villainess", "VTuber",
"Werewolf", "Witch", "Yandere", "Zombie"],
"Demographic": ["Josei", "Kids", "Seinen", "Shoujo", "Shounen"],
"Setting-Scene": ["Bar", "Circus", "College", "Dungeon", "Foreign", "Language Barrier", "Outdoor",
"Rural", "School", "School Club", "UrbanWork"],
"Setting-Time": ["Achronological Order", "Anachronism", "Dystopian", "Historical", "Time Skip"],
"Setting-Universe": ["Afterlife", "Alternate Universe", "Augmented Reality", "Post-Apocalyptic", "Space",
"Urban Fantasy", "Virtual World"],
"Technical": ["4-Koma", "Achromatic", "Advertisement", "Anthology", "CGI", "Episodic", "Flash",
"Full CGI", "Full Color", "No Dialogue", "POV", "Puppetry", "Rotoscoping", "Stop Motion"],
"Theme-Action": ["Archery", "Battle Royale", "Espionage", "Fugitive", "Guns", "Martial Arts",
"Swordplay"],
"Theme-Arts": ["Acting", "Calligraphy", "Classic Literature", "Drawing", "Fashion", "Food", "Makeup",
"Photography", "Rakugo", "Writing"],
"Theme-Arts-Music": ["Band", "Dancing", "Musical"],
"Theme-Comedy": ["Parody", "Satire", "Slapstick", "Surreal Comedy"],
"Theme-Drama": ["Bullying", "Coming Of Age", "Conspiracy", "Rehabilitation", "Revenge", "Suicide",
"Tragedy"],
"Theme-Fantasy": ["Body Swapping", "Cultivation", "Fairy Tale", "Henshin", "Isekai", "Kaiju", "Magic",
"Mythology", "Shapeshifting", "Steampunk", "Super Power", "Superhero", "Wuxia",
"Youkai"],
"Theme-Game": ["E-Sports", "Video Games"],
"Theme-Game-Card & Board Game": ["Card Battle", "Go", "Karuta", "Mahjong", "Poker", "Shogi"],
"Theme-Game-Sport": ["Airsoft", "American Football", "Athletics", "Badminton", "Baseball", "Basketball",
"Boxing", "Cheerleading", "Cycling", "Fencing", "Fishing", "Fitness", "Football",
"Golf", "Ice Skating", "Judo", "Lacrosse", "Parkour", "Rugby", "Scuba Diving",
"Skateboarding", "Sumo", "Surfing", "Swimming", "Table Tennis", "Tennis",
"Volleyball", "Wrestling"],
"Theme-Other": ["Adoption", "Animals", "Astronomy", "Autobiographical", "Biographical", "Body Horror",
"Cannibalism", "Chibi", "Cosmic Horror", "Crime", "Crossover", "Death Game", "Denpa",
"Drugs", "Economics", "Educational", "Environmental", "Ero Guro", "Gambling",
"Gender Bending", "Gore", "LGBTQ+ Themes", "Lost Civilization", "Medicine",
"Memory Manipulation", "Meta", "Noir", "Otaku Culture", "Pandemic", "Philosophy",
"Politics", "Reincarnation", "Religion", "Slavery", "Software Development",
"Survival", "Terrorism", "Torture", "War"],
"Theme-Other-Organisations": ["Assassins", "Cult", "Firefighters", "Gangs", "Mafia", "Military",
"Police", "Triads", "Yakuza"],
"Theme-Other-Vehicle": ["Aviation", "Cars", "Mopeds", "Motorcycles", "Ships", "Tanks", "Trains"],
"Theme-Romance": ["Age Gap", "Bisexual", "Boys' Love", "Female Harem", "Heterosexual", "Love Triangle",
"Male Harem", "Teens' Love", "Yuri"],
"Theme-Sci Fi": ["Cyberpunk", "Space Opera", "Time Manipulation", "Tokusatsu"],
"Theme-Sci Fi-Mecha": ["Real Robot", "Super Robot"],
"Theme-Slice Of Life": ["Agriculture", "Cute Boys Doing Cute Things", "Cute Girls Doing Cute Things",
"Family Life", "Iyashikei"]}
# These tags are exception os that when we tokenize them and lemamtized them and combined them
# back we don't add space in them
exceptions = ["Anti-Hero", "Ojou-Sama", "Post-Apocalyptic", "4-Koma", "E-Sports"]
exceptions_tag = [exception.lower() for exception in exceptions]
# we go over each tag and we convert them to lower case and lemmatized them
for category, tags in result.items():
new_list = []
for tag in tags:
new_tag = ""
# convert them to lower case
tag = tag.lower()
nlp_new_tag = nlp(tag)
white_space = False
# we lemmatized them
for token in nlp_new_tag:
if white_space is True:
# the if and else statement is to make sure that we don't add space in the beginning
# of the word
new_tag += " " + token.lemma_
else:
# we process the first word of the tag without adding the space
new_tag += token.lemma_
if tag not in exceptions_tag:
white_space = True
# we add the lemmatized tag to the list and update the dict
new_list.append(new_tag)
result.update({category: new_list})
return result
###Output
_____no_output_____
###Markdown
Printing the tag to check
###Code
final_tags = seperate_genres(nlp)
for key,value in final_tags.items():
print(key)
print(value)
###Output
Genres
['action', 'adventure', 'comedy', 'drama', 'ecchi', 'fantasy', 'horror', 'mahou shoujo', 'mecha', 'music', 'mystery', 'psychological', 'romance', 'science fiction', 'slice of life', 'sport', 'supernatural', 'thriller']
Cast-Main Cast
['anti-hero', 'ensemble cast', 'female protagonist', 'male protagonist', 'primarily adult cast', 'primarily child cast', 'primarily female cast', 'primarily male cast']
Cast-Traits
['age regression', 'agender', 'alien', 'amnesia', 'angel', 'artificial intelligence', 'asexual', 'butler', 'centaur', 'chimera', 'chuunibyou', 'cosplay', 'crossdressing', 'cyborg', 'delinquent', 'demon', 'detective', 'dinosaur', 'dissociative identity', 'dragon', 'dullahan', 'elf', 'ghost', 'goblin', 'god', 'gyaru', 'hikikomori', 'idol', 'kemonomimi', 'kuudere', 'maid', 'mermaid', 'monster boy', 'monster girl', 'nekomimi', 'ninja', 'nudity', 'nun', 'office lady', 'oiran', 'ojou-sama', 'pirate', 'robot', 'samurai', 'shrine maiden', 'skeleton', 'succubus', 'tanned skin', 'teacher', 'tomboytransgender', 'tsundere', 'twin', 'vampire', 'viking', 'villainess', 'vtuber', 'werewolf', 'witch', 'yandere', 'zombie']
Demographic
['josei', 'kid', 'seinen', 'shoujo', 'shounen']
Setting-Scene
['bar', 'circus', 'college', 'dungeon', 'foreign', 'language barrier', 'outdoor', 'rural', 'school', 'school club', 'urbanwork']
Setting-Time
['achronological order', 'anachronism', 'dystopian', 'historical', 'time skip']
Setting-Universe
['afterlife', 'alternate universe', 'augment reality', 'post-apocalyptic', 'space', 'urban fantasy', 'virtual world']
Technical
['4-koma', 'achromatic', 'advertisement', 'anthology', 'cgi', 'episodic', 'flash', 'full cgi', 'full color', 'no dialogue', 'pov', 'puppetry', 'rotoscope', 'stop motion']
Theme-Action
['archery', 'battle royale', 'espionage', 'fugitive', 'gun', 'martial art', 'swordplay']
Theme-Arts
['act', 'calligraphy', 'classic literature', 'draw', 'fashion', 'food', 'makeup', 'photography', 'rakugo', 'write']
Theme-Arts-Music
['band', 'dancing', 'musical']
Theme-Comedy
['parody', 'satire', 'slapstick', 'surreal comedy']
Theme-Drama
['bully', 'come of age', 'conspiracy', 'rehabilitation', 'revenge', 'suicide', 'tragedy']
Theme-Fantasy
['body swap', 'cultivation', 'fairy tale', 'henshin', 'isekai', 'kaiju', 'magic', 'mythology', 'shapeshifte', 'steampunk', 'super power', 'superhero', 'wuxia', 'youkai']
Theme-Game
['e-sport', 'video game']
Theme-Game-Card & Board Game
['card battle', 'go', 'karuta', 'mahjong', 'poker', 'shogi']
Theme-Game-Sport
['airsoft', 'american football', 'athletic', 'badminton', 'baseball', 'basketball', 'boxing', 'cheerleading', 'cycling', 'fence', 'fishing', 'fitness', 'football', 'golf', 'ice skating', 'judo', 'lacrosse', 'parkour', 'rugby', 'scuba diving', 'skateboard', 'sumo', 'surf', 'swimming', 'table tennis', 'tennis', 'volleyball', 'wrestling']
Theme-Other
['adoption', 'animal', 'astronomy', 'autobiographical', 'biographical', 'body horror', 'cannibalism', 'chibi', 'cosmic horror', 'crime', 'crossover', 'death game', 'denpa', 'drug', 'economic', 'educational', 'environmental', 'ero guro', 'gamble', 'gender bending', 'gore', 'lgbtq+ theme', 'lose civilization', 'medicine', 'memory manipulation', 'meta', 'noir', 'otaku culture', 'pandemic', 'philosophy', 'politic', 'reincarnation', 'religion', 'slavery', 'software development', 'survival', 'terrorism', 'torture', 'war']
Theme-Other-Organisations
['assassin', 'cult', 'firefighter', 'gang', 'mafia', 'military', 'police', 'triad', 'yakuza']
Theme-Other-Vehicle
['aviation', 'car', 'moped', 'motorcycle', 'ship', 'tank', 'train']
Theme-Romance
['age gap', 'bisexual', "boy ' love", 'female harem', 'heterosexual', 'love triangle', 'male harem', "teen ' love", 'yuri']
Theme-Sci Fi
['cyberpunk', 'space opera', 'time manipulation', 'tokusatsu']
Theme-Sci Fi-Mecha
['real robot', 'super robot']
Theme-Slice Of Life
['agriculture', 'cute boy do cute thing', 'cute girl do cute thing', 'family life', 'iyashikei']
###Markdown
Modifying nlp tokenizer
###Code
# we add the tag to the tokenizer so that it process
# the tag as one token
for tags in final_tags.values():
for tag in tags:
special_case = [{"ORTH": tag}]
nlp.tokenizer.add_special_case(tag, special_case)
def extract_keywords(nlp, sequence):
""" Takes a Spacy core language model,
a text that we need to process
we use the model to process the sequence of text
that was pass in
Arguments:
nlp -- a spacy core language model
sequence -- the text that we want to process
Returns:
{dict} -- a dictionary of keyword as the key and a set
of adjective or adverb use to quantify it.
(Note theat the set contains words that we do not
need so we'll process it afterward)
"""
result = {}
pos_tag = ["PROPN", "NOUN", "ADJ", "VERB"]
descriptive_tag = ["ADJ", "ADV"]
doc = nlp(sequence.lower())
custom_stop_list = [
'theme'
]
for token in doc:
# we ignore puctuation, stop_words(common word such as "is", "are", "what"...),
# and word in our custom_stop_list
if token in nlp.Defaults.stop_words or token.pos_ == "PUNCT" or token.text in custom_stop_list:
continue
# the word might be a keyword so we process it
if token.pos_ in pos_tag:
# we keep track of a list of adjective or adverb describing the keyword we're
# looking at
children_list = set()
for child in token.children:
# we're looking at the other token associate with the keyword we're
# currently looking at
if (child.pos_ in descriptive_tag):
# if the other token is an adverb or an adjective we add it to the set
# we also combine it to form a new word
children_list.add(child.text)
new_text = child.lemma_ + " " + token.lemma_
result.update({new_text: set()})
# this is to take into account for phrase such as "lot of" and "bit of"
if token.dep_ == "pobj":
if token.head.dep_ == "prep":
children_list.add(token.head.head.text + " " + token.head.text)
# we process words connected by conjuction such as and
if token.dep_ == "conj":
if len(children_list) == 0:
# words that are connected by conjuction shouldn't have any
# adjective or adverb describing it. If it does that meant
# that it was an entirely different phrase and we shouldn't add it
children_list.update(result[token.head.lemma_])
result.update({token.lemma_: children_list})
# print(token.text + ", " + token.pos_, children_list)
for token in doc:
if token.dep_ == "compound":
if result.get(token.text) is not None:
updated_value = result.get(token.text)
if result.get(token.head.text) is not None:
updated_value.update(result.get(token.head.text))
result.update({token.text: updated_value})
# from spacy import displacy
# displacy.render(doc, style="dep")
return result
# we print out the keyword that was extracted from the test_case to see if
# it's what we expected
# for test in test_case:
# print(extract_keywords(nlp, test))
def extract_tags(nlp, text, special_tags : dict = None):
""" Takes a Spacy core language model,
a text that we need to process, and a dictionary of tags
we use the model to process the sequence of text
that was pass in and go through the tags to see which are similar
Arguments:
nlp -- a spacy core language model
sequence -- the text that we want to process
special_tags -- the list of tags that we're looking for
Returns:
{dict} -- a dictionary of tags as the key and a value
associated with it so that we only look if that tag
has a rank of the value or above
"""
result_tag = {}
result_media_type = []
keywords = extract_keywords(nlp, text)
custom_stop_list = [
"anime",
"manga",
"movie",
"ova",
"ona"
]
quatifier_descriptions = {
nlp("a lot of"): 1,
nlp("lots of"): 1,
nlp("many") : 1,
nlp("some"): 25,
nlp("average"): 50,
nlp("little"): 75,
nlp("bit of"): 75,
nlp("no"): 0,
}
for keyword, quantifiers in keywords.items():
# if the keyword is in the list
# it's describing what type of media we want
# such as manga, anime or other
if keyword in custom_stop_list:
result_media_type.append(keyword)
continue
# we use nlp to process the keyword
nlp_keyword = nlp(keyword)
search_rank_filter = 1;
for quantifier in quantifiers:
nlp_quantifier = nlp(quantifier)
for description in quatifier_descriptions:
if nlp_quantifier.has_vector and nlp_quantifier.similarity(description) > 0.9:
search_rank_filter = quatifier_descriptions[description]
# we loop through all the tags and see which one are similar
for category, tags in special_tags.items():
for tag in tags:
# we process the tag to be able to compare the word vector
nlp_tag = nlp(tag)
# we have a boolean value to see if the word we're comparing
# have a word vector if yes we compare them
should_compare = nlp_keyword.has_vector and nlp_tag.has_vector
if should_compare and nlp_keyword.similarity(nlp_tag) > 0.8:
result_tag.update({tag: search_rank_filter})
print("tag: ", search_rank_filter, nlp_tag.text, " - ", "text: ", nlp_keyword.text)
return set(result_tag);
special_tags = seperate_genres(nlp);
for test in test_case:
print(test)
extract_tags(nlp, test, special_tags)
print(nlp("bit of").similarity(nlp("little")))
print(nlp("a lot of").similarity(nlp("lots of")))
print(nlp("average").similarity(nlp("high")))
print(nlp("fight").similarity(nlp("action")))
###Output
0.7395083689922443
0.8639847133169465
0.509374629883784
0.47812068532229546
|
data_preprocessing/5/Data_Preprocessing_Notebook.ipynb | ###Markdown
Data Preprocessing for Social Anxiety Detection : Participant 5 *** Participant Details__Gender:__ female __Ethnicity:__ asian __Age:__ 21 __Self-reported Liebowitz social anxiety score:__ 80 __Anxiety category:__ 2 *** Contents__1.Introduction __1.1. Nature of the dataset 1.2. Description of the ML experiments __2.Import packages ____3.Import data __3.1. Import HR data and resample 3.2. Import ST data 3.3. Import EDA data __4.Combine data ____5.Data labelling __5.1. Labelling for experiment (1) and (3) 5.2. Labelling for experiment (2) __6.Data visualisation and export__*** 1. IntroductionThis notebook preprocesses the physiological data needed for the supervised machine learning (ML) experiments that investigate whether subclinical social anxiety in young adults can be detected using physiological data obtained from wearable sensors. 1.1. Nature of the datasetThe dataset consists of Heart Rate (HR) data, Skin Temperature (ST) data and Electrodermal Activity (EDA) data. This physiological data was collected using an E4 Empatica wearable device. Using the default sampling rates of the E4, EDA was measured in microSiemens (μS) at 4 Hz using stainless steel electrodes positioned on the inner side of the wrist. HR was measured in Beats Per Minute (BPM) at 1 Hz using data derived from a Photoplethysmography sensor. ST was measured in degrees Celsius (°C) at 4 Hz using an infrared thermophile. 1.2. Description of the ML experiments__Experiment (1)__ investigates whether models can be trained to classify between baseline and socially anxious states. The data is either labelled '0' during the baseline period and '1' during the anxiety period (during anticipation and reactive anxiety).__Experiment (2)__ investigates whether models can be trained to differentiate between baseline, anticipation anxiety and reactive anxiety states. The data is labelled in three ways, '0' during the baseline period, '1' during the anticipation anxiety period and '2' during the reactive anxiety period.__Experiment (3)__ investigates whether models can be trained to classify between social anxiety experienced by individuals with differing social anxiety severity. The data was segregated based on scores reported using the self-reported version of Liebowitz Social Anxiety Scale (LSAS-SR), the data was is either labelled as '0' for individuals in anxiety category 1 (LSAS-SR:50-64) or labelled as '1' for individuals in anxiety category 2 (LSAS-SR:65-80).*** 2.Import packages
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
3.Import and combine data 3.1. Import HR data and upsampleHR is imported and upsampled to 4Hz similar to ST and EDA. The data is then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting.
###Code
hr = pd.read_csv("HR.csv")
hr.index = pd.date_range('2020-03-04', periods = len(hr), freq='1S')
#resampling HR to 4Hz
hr_resample = hr.resample('0.25S').ffill()
#Applying moving average filter
rolling = hr_resample.rolling(window=9)
hr_filtered = rolling.mean()
#Plotting the comparison
fig, (ax1, ax2) = plt.subplots(2, 1)
hr_resample[2600:2700].plot( ax=ax1, legend=False, color = 'indigo')
ax1.yaxis.set_label_text("HR (BPM)")
ax1.xaxis.set_label_text('Time(min)')
ax1.set_title("Resampled HR")
ax1.grid(which='both', alpha=2)
hr_filtered[2600:2700].plot( ax=ax2, legend=False, color = 'indigo')
ax2.yaxis.set_label_text("HR (BPM)")
ax2.xaxis.set_label_text('Time(min)')
ax2.set_title("Resampled HR After Filtering")
ax2.grid(which='both', alpha=2)
fig.set_size_inches(15, 5)
fig.subplots_adjust(hspace=0.7)
plt.show()
###Output
_____no_output_____
###Markdown
3.2. Import ST data The ST data is imported and then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting.
###Code
st = pd.read_csv("ST.csv")
st.index = pd.date_range('2020-03-04', periods = len(st), freq='0.25S')
#Applying moving average filter
rolling = st.rolling(window=15)
st_filtered = rolling.mean()
#Plotting the comparison
fig, (ax1, ax2) = plt.subplots(2, 1)
st[2600:2700].plot( ax=ax1, legend=False, color = 'indigo')
ax1.yaxis.set_label_text("ST (°C)")
ax1.xaxis.set_label_text('Time(min)')
ax1.set_title("Raw ST")
ax1.grid(which='both', alpha=2)
st_filtered[2600:2700].plot( ax=ax2, legend=False, color = 'indigo')
ax2.yaxis.set_label_text("ST (°C)")
ax2.xaxis.set_label_text('Time(min)')
ax2.set_title("ST After Filtering")
ax2.grid(which='both', alpha=2)
fig.set_size_inches(15, 5)
fig.subplots_adjust(hspace=0.7)
plt.show()
###Output
_____no_output_____
###Markdown
3.3. Import EDA data The EDA data is imported and then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting. The EDA data is also range corrected in order to remove inter-individual differences, more details about the range correction method can be found in the paper.
###Code
eda = pd.read_csv("EDA.csv")
eda.index = pd.date_range('2020-03-04', periods = len(eda), freq='0.25S')
#Applying moving average filter
rolling = eda.rolling(window=15)
eda_filtered = rolling.mean()
#Range corrected EDA - value - min/max-min
eda_corrected = (eda_filtered - 1.015)/(4.983-1.015)
#Plotting the comparison
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
eda[2600:2800].plot( ax=ax1, legend=False, color = 'indigo')
ax1.yaxis.set_label_text("EDA (μS)")
ax1.xaxis.set_label_text('Time(min)')
ax1.set_title("Raw EDA")
ax1.grid(which='both', alpha=2)
eda_filtered[2600:2800].plot( ax=ax2, legend=False, color = 'indigo')
ax2.yaxis.set_label_text("EDA (μS)")
ax2.xaxis.set_label_text('Time(min)')
ax2.set_title("EDA After Filtering")
ax2.grid(which='both', alpha=2)
eda_corrected[2600:2800].plot( ax=ax3, legend=False, color = 'indigo')
ax3.yaxis.set_label_text("EDA (μS)")
ax3.xaxis.set_label_text('Time(min)')
ax3.set_title("Range corrected EDA")
ax3.grid(which='both', alpha=2)
fig.set_size_inches(15, 6)
fig.subplots_adjust(hspace=1.3)
eda_filtered=eda_corrected
plt.show()
#eda[480:5846].min()
#eda[480:5846].max()
###Output
_____no_output_____
###Markdown
4.Combine data
###Code
df = pd.concat([hr_filtered, st_filtered, eda_filtered], ignore_index=True, axis = 1 )
df = df.T.reset_index(drop=True).T
display(df.describe())
###Output
_____no_output_____
###Markdown
5.Data labellingThe data was labelled for three different experiments. The anxiety duration in data cells etc. was calculated using a spreadsheet and the timestamps recorded during the experiments.
###Code
#insert column specifically for labels
df.insert(3,3,0)
display(df.describe())
###Output
_____no_output_____
###Markdown
5.1. Labelling for experiment (1) and (3)For experiment (1) the data was labelled '1' (allocated to the social anxiety class) from when the task was announced to when the task was finished. The first 2 minutes from the baseline period were also discarded to account for acclimisation, the data after the task was also discarded.For experiment (3) only the data in the anxious period (from task announcement to task end) was extracted and labelled. This individual falls into anxiety catergory 2 based on their LSAS-SR scores therefore their anxious data is labelled '1'. Data is then shuffled and a certain number of samples is taken.
###Code
experiment_df = df
#duration (labels) of anxiety duration (both anticipation and reactive, labelled '1')
experiment_df[3][2647:5846] = 1
display(experiment_df[3].value_counts())
#removing the data after the task had ended
experiment_df = experiment_df.drop(experiment_df.index[5846:])
#experiment 1 - removing the first 2 mins of the baseline period to account for acclimisation
experiment1_df = experiment_df.drop(experiment_df.index[:480])
display(experiment1_df[3].value_counts())
experiment1_df.to_csv("experiment_1.csv")
#experiment 3 - removing baseline period
experiment3_df = experiment_df.drop(experiment_df.index[:2647])
display(experiment3_df[3].value_counts())
#shuffling and extracting a set number of samples
idx = np.random.permutation(experiment3_df.index)
shuffled = experiment3_df.reindex(idx, axis=0)
shuffled = shuffled.reset_index(drop=True)
shuffled = shuffled.drop(shuffled.index[1667:])
shuffled.to_csv("experiment_3.csv")
###Output
C:\Users\Ruksana\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
after removing the cwd from sys.path.
###Markdown
5.2. Labelling for experiment (2) For experiment (2) the data was labelled '1' during the anticipation anxiety stage (task announcement to task start) and labelled '2' during the reactive anxiety stage (task start to task end). The first 2 minutes from the baseline period were also discarded to account for acclimisation, the data after the task was also discarded.
###Code
experiment2_df = df
#duration (labels) of task prep (anticipation anxiety duration, labelled '1')
experiment2_df[3][2647:4766] = 1
#duration (labels) of task execution (reactive anxiety duration, labelled '2')
experiment2_df[3][4766:5846] = 2
display(experiment2_df[3].value_counts())
#removing the data after the task had ended
experiment2_df = experiment2_df.drop(experiment2_df.index[5846:])
#removing the first 2 mins of the baseline period to account for acclimisation
experiment2_df = experiment2_df.drop(experiment2_df.index[:528])
display(experiment2_df[3].value_counts())
experiment2_df.to_csv("experiment_2.csv")
###Output
C:\Users\Ruksana\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
after removing the cwd from sys.path.
C:\Users\Ruksana\Anaconda3\lib\site-packages\ipykernel_launcher.py:6: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
###Markdown
6.Data visualisationThe physiological data and experiment (1) and (2) labels were plotted. Pearson correlation matrices were also formulated for the dataset used in experiment (1) and (2).
###Code
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1)
ax1.set_title('Combined Physiological Data and Experiment Labels (1 & 2)', fontsize = 15)
experiment1_df[0].plot(ax=ax1, legend=False, color='indigo')
ax1.yaxis.set_label_text("HR (BPM)")
ax1.xaxis.set_label_text('Time(min)')
ax1.grid(which='both', alpha=2)
experiment1_df[1].plot(ax=ax2, legend=False, color='indigo')
ax2.yaxis.set_label_text("ST (°C)")
ax2.xaxis.set_label_text('Time(min)')
ax2.grid(which='both', alpha=2)
experiment1_df[2].plot(ax=ax3, legend=False, color='indigo')
ax3.yaxis.set_label_text("Range Corrected EDA (μS)")
ax3.xaxis.set_label_text('Time(min)')
ax3.grid(which='both', alpha=2)
experiment1_df[3].plot(ax=ax4, legend=False, color='indigo')
ax4.yaxis.set_label_text("Experiment (1) labels")
ax4.xaxis.set_label_text('Time(min)')
ax4.grid(which='both', alpha=2)
experiment2_df[3].plot(ax=ax5, legend=False, color='indigo')
ax5.yaxis.set_label_text("Experiment (2) labels")
ax5.xaxis.set_label_text('Time(min)')
ax5.grid(which='both', alpha=2)
fig.set_size_inches(15, 14)
fig.subplots_adjust(hspace=0.4)
plt.show()
#Correlation matrix with Experiment 1 (binary labels)
labeldata = ['HR', 'ST', 'EDA','Labels']
sns.heatmap(experiment1_df.corr(method = 'pearson'), vmin=0, vmax=1, annot=True, cmap="YlGnBu", yticklabels = labeldata, xticklabels =labeldata)
fig = plt.gcf()
#Correlation matrix with Experiment 2 (Mult-class labels)
sns.heatmap(experiment2_df.corr(method = 'pearson'), vmin=0, vmax=1, annot=True, cmap="YlGnBu", yticklabels = labeldata, xticklabels =labeldata)
fig = plt.gcf()
###Output
_____no_output_____ |
data_visualization/01-exercise-hello-seaborn.ipynb | ###Markdown
**This notebook is an exercise in the [Data Visualization](https://www.kaggle.com/learn/data-visualization) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/hello-seaborn).**--- In this exercise, you will write your first lines of code and learn how to use the coding environment for the course! SetupFirst, you'll learn how to run code, and we'll start with the code cell below. (Remember that a **code cell** in a notebook is just a gray box containing code that we'd like to run.)- Begin by clicking inside the code cell. - Click on the blue triangle (in the shape of a "Play button") that appears to the left of the code cell.- If your code was run sucessfully, you will see `Setup Complete` as output below the cell. The code cell below imports and configures the Python libraries that you need to complete the exercise.Click on the cell and run it.
###Code
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
# Set up code checking
import os
if not os.path.exists("../input/fifa.csv"):
os.symlink("../input/data-for-datavis/fifa.csv", "../input/fifa.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.data_viz_to_coder.ex1 import *
print("Setup Complete")
###Output
_____no_output_____
###Markdown
The code you just ran sets up the system to give you feedback on your work. You'll learn more about the feedback system in the next step. Step 1: Explore the feedback systemEach exercise lets you test your new skills with a real-world dataset. Along the way, you'll receive feedback on your work. You'll see if your answer is right, get customized hints, and see the official solution (_if you'd like to take a look!_).To explore the feedback system, we'll start with a simple example of a coding problem. Follow the following steps in order:1. Run the code cell below without making any edits. It will show the following output: > Check: When you've updated the starter code, `check()` will tell you whether your code is correct. You need to update the code that creates variable `one` This means you need to change the code to set the variable `one` to something other than the blank provided below (`____`).2. Replace the underline with a `2`, so that the line of code appears as `one = 2`. Then, run the code cell. This should return the following output:> Incorrect: Incorrect value for `one`: `2` This means we still have the wrong answer to the question.3. Now, change the `2` to `1`, so that the line of code appears as `one = 1`. Then, run the code cell. The answer should be marked as Correct. You have now completed this problem!
###Code
# Fill in the line below
one = 1
# Check your answer
step_1.check()
###Output
_____no_output_____
###Markdown
In this exercise, you were responsible for filling in the line of code that sets the value of variable `one`. **Don't edit the code that checks your answer.** You'll need to run the lines of code like `step_1.check()` and `step_2.check()` just as they are provided.This problem was relatively straightforward, but for more difficult problems, you may like to receive a hint or view the official solution. Run the code cell below now to receive both for this problem.
###Code
step_1.hint()
step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Load the dataYou are ready to get started with some data visualization! You'll begin by loading the dataset from the previous tutorial. The code you need is already provided in the cell below. Just run that cell. If it shows Correct result, you're ready to move on!
###Code
# Path of the file to read
fifa_filepath = "../input/fifa.csv"
# Read the file into a variable fifa_data
fifa_data = pd.read_csv(fifa_filepath, index_col="Date", parse_dates=True)
# Check your answer
step_2.check()
###Output
_____no_output_____
###Markdown
Next, recall the difference between comments and executable code:- **Comments** are preceded by a pound sign (``) and contain text that appear faded and italicized. They are completely ignored by the computer when the code is run.- **Executable code** is code that is run by the computer.In the code cell below, every line is a comment:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```If you run the code cell below without making any changes, it won't return any output. Try this now!
###Code
# Uncomment the line below to receive a hint
#step_2.hint()
# Uncomment the line below to see the solution
#step_2.solution()
###Output
_____no_output_____
###Markdown
Next, remove the pound sign before `step_2.hint()` so that the code cell above appears as follows:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```When we remove the pound sign before a line of code, we say we **uncomment** the line. This turns the comment into a line of executable code that is run by the computer. Run the code cell now, which should return the Hint as output.Finally, uncomment the line to see the solution, so the code cell appears as follows:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```Then, run the code cell. You should receive both a Hint and the Solution.If at any point you're having trouble with coming up with the correct answer to a problem, you are welcome to obtain either a hint or the solution before completing the cell. (So, you don't need to get a Correct result before running the code that gives you a Hint or the Solution.) Step 3: Plot the dataNow that the data is loaded into the notebook, you're ready to visualize it! Run the next code cell without changes to make a line chart. The code may not make sense yet - you'll learn all about it in the next tutorial!
###Code
# Set the width and height of the figure
plt.figure(figsize=(16,6))
# Line chart showing how FIFA rankings evolved over time
sns.lineplot(data=fifa_data)
# Check your answer
step_3.a.check()
###Output
_____no_output_____
###Markdown
Some questions won't require you to write any code. Instead, you'll interpret visualizations.As an example, consider the question: Considering only the years represented in the dataset, which countries spent at least 5 consecutive years in the 1 ranked spot?To receive a Hint, uncomment the line below, and run the code cell.
###Code
#step_3.b.hint()
###Output
_____no_output_____
###Markdown
Once you have an answer, check the Solution to get credit for completing the problem and to ensure your interpretation is right.
###Code
# Check your answer (Run this code cell to receive credit!)
step_3.b.solution()
###Output
_____no_output_____ |
ML_Retraining_Pipeline/Clean_Training_Data.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. Licensed under the MIT license. Clean Training Data This notebook will clean the training dataset and load the cleaned data into a spark database for training the models.
###Code
paths = ['abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/full_dataset/', 'abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/clickstream/processed/']
full_dataset = spark.read.parquet(*paths)
# remove all null values from category and brand
filtered_df = full_dataset.filter((full_dataset.category_code != 'null') & (full_dataset.brand != 'null'))
#filter on construction and remove misplaced brands
construction_df = filtered_df.filter((filtered_df.category_code.contains('construction')) & (filtered_df.brand != 'apple') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'oystercosmetics')& (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'polaris') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'maybellinenewyork') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'nokia') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'fila') & (filtered_df.brand != 'milanicosmetics') & (filtered_df.brand != 'shoesrepublic') &(filtered_df.brand != 'hp')&(filtered_df.brand != 'jbl'))
#filter on electronics and remove misplaced brands
electronic_df = filtered_df.filter((filtered_df.category_code.contains('electronics'))& (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma'))
#filter on apparel and remove misplaced brands
apparel_df = filtered_df.filter((filtered_df.category_code.contains('apparel')) & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'canon')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp')& (filtered_df.brand != 'nikon') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'x-digital') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'toysmax') & (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'playdoh') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'tv-shop') & (filtered_df.brand != 'xp-pen') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'm-audio') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'hyundai'))
#filtered on computers and removed misplaced brands
computer_df = filtered_df.filter((filtered_df.category_code.contains('computers')) & (filtered_df.brand != 'fila') & (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hotwheels') & (filtered_df.brand != 'taftoys') & (filtered_df.brand != 'barbi') & (filtered_df.brand != 'fitbit') & (filtered_df.brand != 'nike'))
#filtered on appliances and removed misplaced brands
appliance_df = filtered_df.filter((filtered_df.category_code.contains('appliances')) & (filtered_df.brand != 'fila')& (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'toshiba')& (filtered_df.brand != 'hp')& (filtered_df.brand != 'nokia')&(filtered_df.brand != 'hyundai')& (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'colorkid') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'logitech'))
#filtered on auto and removed misplaced brands
auto_df = filtered_df.filter((filtered_df.category_code.contains('auto')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp'))
#filtered on furniture and removed misplaced brands
furniture_df = filtered_df.filter((filtered_df.category_code.contains('furniture')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'lg')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hyundai')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'dell') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'newsuntoys') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') )
#filtered on kids and removed misplaced brands
kids_df = filtered_df.filter((filtered_df.category_code.contains('kids')) & (filtered_df.brand != 'tefal')& (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'samsung'))
#filtered on sports and removed misplaced brands
sports_df = filtered_df.filter((filtered_df.category_code.contains('sport')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'hp') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'mersedes-benz') & (filtered_df.brand != 'toyland') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'ikea') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'bmw') & (filtered_df.brand != 'jeep') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'asus') & (filtered_df.brand != 'hyundai'))
#filtered on country_yard and removed misplaced brands
country_df = filtered_df.filter((filtered_df.category_code.contains('country_yard')) & (filtered_df.brand != 'nike')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'vans') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'columbia') & (filtered_df.brand != 'adidas')& (filtered_df.brand != 'apple'))
#filtered on stationary and removed misplaced brands
stationery_df = filtered_df.filter((filtered_df.category_code.contains('stationery')) & (filtered_df.brand !='hyundai') & (filtered_df.brand !='puma') & (filtered_df.brand !='nike') & (filtered_df.brand !='jeep') & (filtered_df.brand !='jaguar') & (filtered_df.brand !='toyota') & (filtered_df.brand !='shoesrepublic') & (filtered_df.brand !='tefal') & (filtered_df.brand !='fila'))
#filtered on accessories and removed misplaced brands
accessories_df = filtered_df.filter((filtered_df.category_code == 'accessories.umbrella') |(filtered_df.category_code == 'accessories.wallet') |(filtered_df.category_code == 'accessories.bag') &(filtered_df.brand != 'hyundai'))
medicine_df = filtered_df.filter((filtered_df.category_code.contains('medicine')) & (filtered_df.brand != 'ikea'))
# combine all the separated DataFrames into one to load into a table.
df = medicine_df.union(accessories_df)
df = df.union(stationery_df)
df = df.union(country_df)
df = df.union(sports_df)
df = df.union(kids_df)
df = df.union(furniture_df)
df = df.union(auto_df)
df = df.union(appliance_df)
df = df.union(computer_df)
df = df.union(apparel_df)
df = df.union(electronic_df)
df = df.union(construction_df)
# load the cleaned data to a spark database
df.write.saveAsTable("retailaidb.cleaned_dataset")
###Output
_____no_output_____
###Markdown
Clean Training Data This notebook will clean the training dataset and load the cleaned data into a spark database for training the models.
###Code
paths = ['abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/full_dataset/', 'abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/clickstream/processed/']
full_dataset = spark.read.parquet(*paths)
# remove all null values from category and brand
filtered_df = full_dataset.filter((full_dataset.category_code != 'null') & (full_dataset.brand != 'null'))
#filter on construction and remove misplaced brands
construction_df = filtered_df.filter((filtered_df.category_code.contains('construction')) & (filtered_df.brand != 'apple') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'oystercosmetics')& (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'polaris') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'maybellinenewyork') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'nokia') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'fila') & (filtered_df.brand != 'milanicosmetics') & (filtered_df.brand != 'shoesrepublic') &(filtered_df.brand != 'hp')&(filtered_df.brand != 'jbl'))
#filter on electronics and remove misplaced brands
electronic_df = filtered_df.filter((filtered_df.category_code.contains('electronics'))& (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma'))
#filter on apparel and remove misplaced brands
apparel_df = filtered_df.filter((filtered_df.category_code.contains('apparel')) & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'canon')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp')& (filtered_df.brand != 'nikon') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'x-digital') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'toysmax') & (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'playdoh') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'tv-shop') & (filtered_df.brand != 'xp-pen') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'm-audio') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'hyundai'))
#filtered on computers and removed misplaced brands
computer_df = filtered_df.filter((filtered_df.category_code.contains('computers')) & (filtered_df.brand != 'fila') & (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hotwheels') & (filtered_df.brand != 'taftoys') & (filtered_df.brand != 'barbi') & (filtered_df.brand != 'fitbit') & (filtered_df.brand != 'nike'))
#filtered on appliances and removed misplaced brands
appliance_df = filtered_df.filter((filtered_df.category_code.contains('appliances')) & (filtered_df.brand != 'fila')& (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'toshiba')& (filtered_df.brand != 'hp')& (filtered_df.brand != 'nokia')&(filtered_df.brand != 'hyundai')& (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'colorkid') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'logitech'))
#filtered on auto and removed misplaced brands
auto_df = filtered_df.filter((filtered_df.category_code.contains('auto')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp'))
#filtered on furniture and removed misplaced brands
furniture_df = filtered_df.filter((filtered_df.category_code.contains('furniture')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'lg')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hyundai')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'dell') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'newsuntoys') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') )
#filtered on kids and removed misplaced brands
kids_df = filtered_df.filter((filtered_df.category_code.contains('kids')) & (filtered_df.brand != 'tefal')& (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'samsung'))
#filtered on sports and removed misplaced brands
sports_df = filtered_df.filter((filtered_df.category_code.contains('sport')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'hp') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'mersedes-benz') & (filtered_df.brand != 'toyland') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'ikea') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'bmw') & (filtered_df.brand != 'jeep') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'asus') & (filtered_df.brand != 'hyundai'))
#filtered on country_yard and removed misplaced brands
country_df = filtered_df.filter((filtered_df.category_code.contains('country_yard')) & (filtered_df.brand != 'nike')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'vans') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'columbia') & (filtered_df.brand != 'adidas')& (filtered_df.brand != 'apple'))
#filtered on stationary and removed misplaced brands
stationery_df = filtered_df.filter((filtered_df.category_code.contains('stationery')) & (filtered_df.brand !='hyundai') & (filtered_df.brand !='puma') & (filtered_df.brand !='nike') & (filtered_df.brand !='jeep') & (filtered_df.brand !='jaguar') & (filtered_df.brand !='toyota') & (filtered_df.brand !='shoesrepublic') & (filtered_df.brand !='tefal') & (filtered_df.brand !='fila'))
#filtered on accessories and removed misplaced brands
accessories_df = filtered_df.filter((filtered_df.category_code == 'accessories.umbrella') |(filtered_df.category_code == 'accessories.wallet') |(filtered_df.category_code == 'accessories.bag') &(filtered_df.brand != 'hyundai'))
medicine_df = filtered_df.filter((filtered_df.category_code.contains('medicine')) & (filtered_df.brand != 'ikea'))
# combine all the separated DataFrames into one to load into a table.
df = medicine_df.union(accessories_df)
df = df.union(stationery_df)
df = df.union(country_df)
df = df.union(sports_df)
df = df.union(kids_df)
df = df.union(furniture_df)
df = df.union(auto_df)
df = df.union(appliance_df)
df = df.union(computer_df)
df = df.union(apparel_df)
df = df.union(electronic_df)
df = df.union(construction_df)
# load the cleaned data to a spark database
df.write.saveAsTable("retailaidb.cleaned_dataset")
###Output
_____no_output_____ |
Control_Ops.ipynb | ###Markdown
Control Ops TutorialIn this tutorial we show how to use control flow operators in Caffe2 and give some details about their underlying implementations. Conditional Execution Using NetBuilderLet's start with conditional operator. We will demonstrate how to use it in two Caffe2 APIs used for building nets: `NetBuilder` and `brew`.
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
from caffe2.python.core import Plan, to_execution_step, Net
from caffe2.python.net_builder import ops, NetBuilder
###Output
WARNING:root:This caffe2 python run does not have GPU support. Will run in CPU only mode.
WARNING:root:Debug message: No module named caffe2_pybind11_state_gpu
###Markdown
In the first example, we define several blobs and then use the 'If' operator to set the value of one of them conditionally depending on values of other blobs.The pseudocode for the conditional examples we will implement is as follows: if (x > 0): y = 1 else: y = 0
###Code
with NetBuilder() as nb:
# Define our constants
ops.Const(0.0, blob_out="zero")
ops.Const(1.0, blob_out="one")
ops.Const(0.5, blob_out="x")
ops.Const(0.0, blob_out="y")
# Define our conditional sequence
with ops.IfNet(ops.GT(["x", "zero"])):
ops.Copy("one", "y")
with ops.Else():
ops.Copy("zero", "y")
###Output
_____no_output_____
###Markdown
Note the usage of `NetBuilder`'s `ops.IfNet` and `ops.Else` calls: `ops.IfNet` accepts a blob reference or blob name as an input, it expects an input blob to have a scalar value convertible to bool. Note that the optional `ops.Else` is at the same level as `ops.IfNet` and immediately follows the corresponding `ops.IfNet`. Let's execute the resulting net (execution step) and check the values of the blobs.Note that since x = 0.5, which is indeed greater than 0, we should expect y = 1 after execution.
###Code
# Initialize a Plan
plan = Plan('if_net_test')
# Add the NetBuilder definition above to the Plan
plan.AddStep(to_execution_step(nb))
# Initialize workspace for blobs
ws = workspace.C.Workspace()
# Run the Plan
ws.run(plan)
# Fetch some blobs and print
print('x = ', ws.blobs["x"].fetch())
print('y = ', ws.blobs["y"].fetch())
###Output
x = 0.5
y = 1.0
###Markdown
Before going further, it's important to understand the semantics of execution blocks ('then' and 'else' branches in the example above), i.e. handling of reads and writes into global (defined outside of the block) and local (defined inside the block) blobs.`NetBuilder` uses the following set of rules: - In `NetBuilder`'s syntax, a blob's declaration and definition occur at the same time - we define an operator which writes its output into a blob with a given name. - `NetBuilder` keeps track of all operators seen before the current execution point in the same block and up the stack in parent blocks. - If an operator writes into a previously unseen blob, it creates a **local** blob that is visible only within the current block and the subsequent children blocks. Local blobs created in a given block are effectively deleted when we exit the block. Any write into previously defined (in the same block or in the parent blocks) blob updates an originally created blob and does not result in the redefinition of a blob. - An operator's input blobs have to be defined earlier in the same block or in the stack of parent blocks. As a result, in order to see the values computed by a block after its execution, the blobs of interest have to be defined outside of the block. This rule effectively forces visible blobs to always be correctly initialized.To illustrate concepts of block semantics and provide a more sophisticated example, let's consider the following net:
###Code
with NetBuilder() as nb:
# Define our constants
ops.Const(0.0, blob_out="zero")
ops.Const(1.0, blob_out="one")
ops.Const(2.0, blob_out="two")
ops.Const(1.5, blob_out="x")
ops.Const(0.0, blob_out="y")
# Define our conditional sequence
with ops.IfNet(ops.GT(["x", "zero"])):
ops.Copy("x", "local_blob") # create local_blob using Copy -- this is not visible outside of this block
with ops.IfNet(ops.LE(["local_blob", "one"])):
ops.Copy("one", "y")
with ops.Else():
ops.Copy("two", "y")
with ops.Else():
ops.Copy("zero", "y")
# Note that using local_blob would fail here because it is outside of the block in
# which it was created
###Output
_____no_output_____
###Markdown
When we execute this, we expect that y == 2.0, and that `local_blob` will not exist in the workspace.
###Code
# Initialize a Plan
plan = Plan('if_net_test_2')
# Add the NetBuilder definition above to the Plan
plan.AddStep(to_execution_step(nb))
# Initialize workspace for blobs
ws = workspace.C.Workspace()
# Run the Plan
ws.run(plan)
# Fetch some blobs and print
print('x = ', ws.blobs["x"].fetch())
print('y = ', ws.blobs["y"].fetch())
# Assert that the local_blob does not exist in the workspace
# It should have been destroyed because of its locality
assert "local_blob" not in ws.blobs
###Output
x = 1.5
y = 2.0
###Markdown
Conditional Execution Using Brew ModuleBrew is another Caffe2 interface used to construct nets. Unlike `NetBuilder`, `brew` does not track the hierarchy of blocks and, as a result, we need to specify which blobs are considered local and which blobs are considered global when passing 'then' and 'else' models to an API call.Let's start by importing the necessary items for the `brew` API.
###Code
from caffe2.python import brew
from caffe2.python.workspace import FeedBlob, RunNetOnce, FetchBlob
from caffe2.python.model_helper import ModelHelper
###Output
_____no_output_____
###Markdown
We will use the Caffe2's `ModelHelper` class to define and represent our models, as well as contain the parameter information about the models. Note that a `ModelHelper` object has two underlying nets: (1) param_init_net: Responsible for parameter initialization (2) net: Contains the main network definition, i.e. the graph of operators that the data flows throughNote that `ModelHelper` is similar to `NetBuilder` in that we define the operator graph first, and actually run later. With that said, let's define some models to act as conditional elements, and use the `brew` module to form the conditional statement that we want to run. We will construct the same statement used in the first example above.
###Code
# Initialize model, which will represent our main conditional model for this test
model = ModelHelper(name="test_if_model")
# Add variables and constants to our conditional model; notice how we add them to the param_init_net
model.param_init_net.ConstantFill([], ["zero"], shape=[1], value=0.0)
model.param_init_net.ConstantFill([], ["one"], shape=[1], value=1.0)
model.param_init_net.ConstantFill([], ["x"], shape=[1], value=0.5)
model.param_init_net.ConstantFill([], ["y"], shape=[1], value=0.0)
# Add Greater Than (GT) conditional operator to our model
# which checks if "x" > "zero", and outputs the result in the "cond" blob
model.param_init_net.GT(["x", "zero"], "cond")
# Initialize a then_model, and add an operator which we will set to be
# executed if the conditional model returns True
then_model = ModelHelper(name="then_test_model")
then_model.net.Copy("one", "y")
# Initialize an else_model, and add an operator which we will set to be
# executed if the conditional model returns False
else_model = ModelHelper(name="else_test_model")
else_model.net.Copy("zero", "y")
# Use the brew module's handy cond operator to facilitate the construction of the operator graph
brew.cond(
model=model, # main conditional model
cond_blob="cond", # blob with condition value
external_blobs=["x", "y", "zero", "one"], # data blobs used in execution of conditional
then_model=then_model, # pass then_model
else_model=else_model) # pass else_model
###Output
_____no_output_____
###Markdown
Before we run the model, let's use Caffe2's graph visualization tool `net_drawer` to check if the operator graph makes sense.
###Code
from caffe2.python import net_drawer
from IPython import display
graph = net_drawer.GetPydotGraph(model.net, rankdir="LR")
display.Image(graph.create_png(), width=800)
###Output
_____no_output_____
###Markdown
Now let's run the net! When using `ModelHelper`, we must first run the `param_init_net` to initialize paramaters, then we execute the main `net`.
###Code
# Run param_init_net once
RunNetOnce(model.param_init_net)
# Run main net (once in this case)
RunNetOnce(model.net)
# Fetch and examine some blobs
print("x = ", FetchBlob("x"))
print("y = ", FetchBlob("y"))
###Output
x = [0.5]
y = [1.]
###Markdown
Loops Using NetBuilderAnother important control flow operator is 'While', which allows repeated execution of a fragment of net. Let's consider `NetBuilder`'s version first.The pseudocode for this example is: i = 0 y = 0 while (i <= 7): y = i + y i += 1
###Code
with NetBuilder() as nb:
# Define our variables
ops.Const(0, blob_out="i")
ops.Const(0, blob_out="y")
# Define loop code and conditions
with ops.WhileNet():
with ops.Condition():
ops.Add(["i", ops.Const(1)], ["i"])
ops.LE(["i", ops.Const(7)])
ops.Add(["i", "y"], ["y"])
###Output
_____no_output_____
###Markdown
As with the 'If' operator, standard block semantic rules apply. Note the usage of `ops.Condition` clause that should immediately follow `ops.WhileNet` and contains code that is executed before each iteration. The last operator in the condition clause is expected to have a single boolean output that determines whether the other iteration is executed.In the example above we increment the counter ("i") before each iteration and accumulate its values in "y" blob, the loop's body is executed 7 times, the resulting blob values:
###Code
# Initialize a Plan
plan = Plan('while_net_test')
# Add the NetBuilder definition above to the Plan
plan.AddStep(to_execution_step(nb))
# Initialize workspace for blobs
ws = workspace.C.Workspace()
# Run the Plan
ws.run(plan)
# Fetch blobs and print
print("i = ", ws.blobs["i"].fetch())
print("y = ", ws.blobs["y"].fetch())
###Output
i = 8
y = 28
###Markdown
Loops Using Brew ModuleNow let's take a look at how to replicate the loop above using the `ModelHelper`+`brew` interface.
###Code
# Initialize model, which will represent our main conditional model for this test
model = ModelHelper(name="test_while_model")
# Add variables and constants to our model
model.param_init_net.ConstantFill([], ["i"], shape=[1], value=0)
model.param_init_net.ConstantFill([], ["one"], shape=[1], value=1)
model.param_init_net.ConstantFill([], ["seven"], shape=[1], value=7)
model.param_init_net.ConstantFill([], ["y"], shape=[1], value=0)
# Initialize a loop_model that represents the code to run inside of loop
loop_model = ModelHelper(name="loop_test_model")
loop_model.net.Add(["i", "y"], ["y"])
# Initialize cond_model that represents the conditional test that the loop
# abides by, as well as the incrementation step
cond_model = ModelHelper(name="cond_test_model")
cond_model.net.Add(["i", "one"], "i")
cond_model.net.LE(["i", "seven"], "cond")
# Use brew's loop operator to facilitate the creation of the loop's operator graph
brew.loop(
model=model, # main model that contains data
cond_blob="cond", # explicitly specifying condition blob
external_blobs=["cond", "i", "one", "seven", "y"], # data blobs used in execution of the loop
loop_model=loop_model, # pass loop_model
cond_model=cond_model # pass condition model (optional)
)
###Output
_____no_output_____
###Markdown
Once again, let's visualize the net using the `net_drawer`.
###Code
graph = net_drawer.GetPydotGraph(model.net, rankdir="LR")
display.Image(graph.create_png(), width=800)
###Output
_____no_output_____
###Markdown
Finally, we'll run the `param_init_net` and `net` and print our final blob values.
###Code
RunNetOnce(model.param_init_net)
RunNetOnce(model.net)
print("i = ", FetchBlob("i"))
print("y = ", FetchBlob("y"))
###Output
i = [8]
y = [28]
###Markdown
BackpropagationBoth 'If' and 'While' operators support backpropagation. To illustrate how backpropagation with control ops work, let's consider the following examples in which we construct the operator graph using `NetBuilder` and obtain calculate gradients using the `AddGradientOperators` function. The first example shows the following conditional statement: x = 1-D numpy float array y = 4 z = 0 if (x > 0): z = y^2 else: z = y^3
###Code
import numpy as np
# Feed blob called x, which is simply a 1-D numpy array [0.5]
FeedBlob("x", np.array(0.5, dtype='float32'))
# _use_control_ops=True forces NetBuilder to output single net as a result
# x is external for NetBuilder, so we let nb know about it through initial_scope param
with NetBuilder(_use_control_ops=True, initial_scope=["x"]) as nb:
ops.Const(0.0, blob_out="zero")
ops.Const(1.0, blob_out="one")
ops.Const(4.0, blob_out="y")
ops.Const(0.0, blob_out="z")
with ops.IfNet(ops.GT(["x", "zero"])):
ops.Pow("y", "z", exponent=2.0)
with ops.Else():
ops.Pow("y", "z", exponent=3.0)
# we should get a single net as output
assert len(nb.get()) == 1, "Expected a single net produced"
net = nb.get()[0]
# add gradient operators for 'z' blob
grad_map = net.AddGradientOperators(["z"])
###Output
_____no_output_____
###Markdown
In this case$$x = 0.5$$$$z = y^2 = 4^2 = 16$$We will fetch the blob `y_grad`, which was generated by the `AddGradientOperators` call above. This blob contains the gradient of blob z with respect to y. According to basic calculus:$$y\_grad = \frac{\partial{z}}{\partial{y}}y^2 = 2y = 2(4) = 8$$
###Code
# Run the net
RunNetOnce(net)
# Fetch blobs and print
print("x = ", FetchBlob("x"))
print("y = ", FetchBlob("y"))
print("z = ", FetchBlob("z"))
print("y_grad = ", FetchBlob("y_grad"))
###Output
x = 0.5
y = 4.0
z = 16.0
y_grad = 8.0
###Markdown
Now, let's change value of blob "x" to -0.5 and rerun net:
###Code
# To re-run net with different input, simply feed new blob
FeedBlob("x", np.array(-0.5, dtype='float32'))
RunNetOnce(net)
print("x = ", FetchBlob("x"))
print("y = ", FetchBlob("y"))
print("z = ", FetchBlob("z"))
print("y_grad = ", FetchBlob("y_grad"))
###Output
x = -0.5
y = 4.0
z = 64.0
y_grad = 48.0
###Markdown
The next and final example illustrates backpropagation on the following loop: x = 2 y = 3 z = 2 i = 0 while (i <= 2): x = x^2 if (i < 2): y = y^2 else: z = z^3 i += 1 s = x + y + z Note that this code essentially computes the sum of x^4 (by squaring x twice), y^2, and z^3.
###Code
with NetBuilder(_use_control_ops=True) as nb:
# Define variables and constants
ops.Copy(ops.Const(0), "i")
ops.Copy(ops.Const(1), "one")
ops.Copy(ops.Const(2), "two")
ops.Copy(ops.Const(2.0), "x")
ops.Copy(ops.Const(3.0), "y")
ops.Copy(ops.Const(2.0), "z")
# Define loop statement
# Computes x^4, y^2, z^3
with ops.WhileNet():
with ops.Condition():
ops.Add(["i", "one"], "i")
ops.LE(["i", "two"])
ops.Pow("x", "x", exponent=2.0)
with ops.IfNet(ops.LT(["i", "two"])):
ops.Pow("y", "y", exponent=2.0)
with ops.Else():
ops.Pow("z", "z", exponent=3.0)
# Sum s = x + y + z
ops.Add(["x", "y"], "x_plus_y")
ops.Add(["x_plus_y", "z"], "s")
assert len(nb.get()) == 1, "Expected a single net produced"
net = nb.get()[0]
# Add gradient operators to output blob 's'
grad_map = net.AddGradientOperators(["s"])
workspace.RunNetOnce(net)
print("x = ", FetchBlob("x"))
print("x_grad = ", FetchBlob("x_grad")) # derivative: 4x^3
print("y = ", FetchBlob("y"))
print("y_grad = ", FetchBlob("y_grad")) # derivative: 2y
print("z = ", FetchBlob("z"))
print("z_grad = ", FetchBlob("z_grad")) # derivative: 3z^2
###Output
x = 16.0
x_grad = 32.0
y = 9.0
y_grad = 6.0
z = 8.0
z_grad = 12.0
|
dataset/dataset/KNN/KNN-Predict Diabetes_final.ipynb | ###Markdown
KNN - Predict whether a person will have diabetes or not
###Code
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
dataset = pd.read_csv('../Downloads/diabetes.csv')
len(dataset)
dataset.head()
# Replace zeroes
zero_not_accepted = ['Glucose', 'BloodPressure', 'SkinThickness', 'BMI', 'Insulin']
for column in zero_not_accepted:
dataset[column] = dataset[column].replace(0, np.NaN)
mean = int(dataset[column].mean(skipna=True))
dataset[column] = dataset[column].replace(np.NaN, mean)
# split dataset
X = dataset.iloc[:, 0:8]
y = dataset.iloc[:, 8]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.2)
print(len(X_train))
print(len(y_train))
print(len(X_test))
print(len(y_test))
#Feature scaling
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Define the model: Init K-NN
classifier = KNeighborsClassifier(n_neighbors=11, p=2,metric='euclidean')
# Fit Model
classifier.fit(X_train, y_train)
# Predict the test set results
y_pred = classifier.predict(X_test)
y_pred
# Evaluate Model
cm = confusion_matrix(y_test, y_pred)
print (cm)
print(f1_score(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
###Output
0.8181818181818182
|
Nocoes_de_Probabilidade_e_Estatistica.ipynb | ###Markdown
**Título:** Noções de Probabilidade e Estatística - 7ª Edição - 2015 **Autores:** Marcos Nascimento Magalhães e Antônio Carlos Pedroso de Lima **Editora:** Editora da Universidade de São Paulo Introdução à Análise Exploratória de Dados No primeiro capítulo o autor aborda rapidamente algumas aplicações da estatística e apresenta alguns exemplos cotidianos onde ela é usada. De modo geral a Estatística está dividia em 3 grande áreas, sendo elas:* Estatística Descritiva* Probabilidade* Inferência EstatísticaA **Estatística Descritiva** é, de certa forma, um conjunto de técnicas destinadas a descrever e resumir os dados para que assim possamos tirar conclusões a respeito de características de interesse.A **Probabilidade** pode ser exemplificada como a teoria matemática utilizada para se estudar a incerteza oriunda de fenômenos de caráter aleatório.A **Inferência Estatística** é a area que estuda as técnicas que possibilitam a extrapolação das informações e conclusões obtidas a partir de subconjunto de valores.Ao conjunto de dados que contem a característica de interesse é dado o nome de **População**. A população contem todos os indviduos de um determinado grupo. Dessa forma podemos exemplificar a população como:1. Todos os habitantes de uma determinada cidade/estado/país...;2. Todas as mesas produzidas por uma fábrica de móveis;3. Todo o sangue no corpo de uma pessoa...No entanto, algumas vezes não conseguimos acessar todos os indivíduos de uma população sendo necessário fazer o uso de uma pequena parcela desta para coletarmos os dados. Essa parcela é chamada de **Amostra**. A definição da parcela amostral é chamada de amostragem e ela tenta fornecer um subconjunto de valores o mais parecido possível com a população que lhe dá origem.
###Code
Image(filename='images/populacao_amostra.jpg',width = 300, height = 150)
###Output
_____no_output_____ |
master/_downloads/be71c9935575a01822eb555cbfbbb1a1/plot_debiased_barycenter.ipynb | ###Markdown
Debiased Sinkhorn barycenter demoThis example illustrates the computation of the debiased Sinkhorn barycenteras proposed in [37]_... [37] Janati, H., Cuturi, M., Gramfort, A. Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020
###Code
# Author: Hicham Janati <[email protected]>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 3
import os
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import ot
from ot.bregman import (barycenter, barycenter_debiased,
convolutional_barycenter2d,
convolutional_barycenter2d_debiased)
###Output
_____no_output_____
###Markdown
Debiased barycenter of 1D Gaussians
###Code
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n, m=60, s=8)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
n_distributions = A.shape[1]
# loss matrix + normalization
M = ot.utils.dist0(n)
M /= M.max()
alpha = 0.2 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
epsilons = [5e-3, 1e-2, 5e-2]
bars = [barycenter(A, M, reg, weights) for reg in epsilons]
bars_debiased = [barycenter_debiased(A, M, reg, weights) for reg in epsilons]
labels = ["Sinkhorn barycenter", "Debiased barycenter"]
colors = ["indianred", "gold"]
f, axes = plt.subplots(1, len(epsilons), tight_layout=True, sharey=True,
figsize=(12, 4), num=1)
for ax, eps, bar, bar_debiased in zip(axes, epsilons, bars, bars_debiased):
ax.plot(A[:, 0], color="k", ls="--", label="Input data", alpha=0.3)
ax.plot(A[:, 1], color="k", ls="--", alpha=0.3)
for data, label, color in zip([bar, bar_debiased], labels, colors):
ax.plot(data, color=color, label=label, lw=2)
ax.set_title(r"$\varepsilon = %.3f$" % eps)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Debiased barycenter of 2D images
###Code
this_file = os.path.realpath('__file__')
data_path = os.path.join(Path(this_file).parent.parent.parent, 'data')
f1 = 1 - plt.imread(os.path.join(data_path, 'heart.png'))[:, :, 2]
f2 = 1 - plt.imread(os.path.join(data_path, 'duck.png'))[:, :, 2]
A = np.asarray([f1, f2]) + 1e-2
A /= A.sum(axis=(1, 2))[:, None, None]
###Output
_____no_output_____
###Markdown
Display the input images
###Code
fig, axes = plt.subplots(1, 2, figsize=(7, 4), num=2)
for ax, img in zip(axes, A):
ax.imshow(img, cmap="Greys")
ax.axis("off")
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Barycenter computation and visualization
###Code
bars_sinkhorn, bars_debiased = [], []
epsilons = [5e-3, 7e-3, 1e-2]
for eps in epsilons:
bar = convolutional_barycenter2d(A, eps)
bar_debiased, log = convolutional_barycenter2d_debiased(A, eps, log=True)
bars_sinkhorn.append(bar)
bars_debiased.append(bar_debiased)
titles = ["Sinkhorn", "Debiased"]
all_bars = [bars_sinkhorn, bars_debiased]
fig, axes = plt.subplots(2, 3, figsize=(8, 6), num=3)
for jj, (method, ax_row, bars) in enumerate(zip(titles, axes, all_bars)):
for ii, (ax, img, eps) in enumerate(zip(ax_row, bars, epsilons)):
ax.imshow(img, cmap="Greys")
if jj == 0:
ax.set_title(r"$\varepsilon = %.3f$" % eps, fontsize=13)
ax.set_xticks([])
ax.set_yticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
if ii == 0:
ax.set_ylabel(method, fontsize=15)
fig.tight_layout()
plt.show()
###Output
_____no_output_____ |
Sk_Regresion_Lineal_Simple.ipynb | ###Markdown
###Code
!python --version
import sklearn
sklearn.__version__
###Output
_____no_output_____
###Markdown
Regresion Lineal Simple, aca nuestro modelo unicamente debe aprender dos constantes 0 = Interseccion y 1 = la pendiente. 1)Entrenando nuestro modelo $y= b_0 + b_1x$ 2)Importando nuestro archivo que esta guardado en google drive
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
3) Importando librerias al proyecto: numpy, pandas, matplotlib.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
4)Cargamos nuestro set de datos desde Drive que estan en excel.
###Code
data = pd.read_excel("/content/drive/MyDrive/Colab_Notebooks/DataSets/ReduccionSolidosDemandaOxigeno.xlsx")
###Output
_____no_output_____
###Markdown
5)Consultando el contenido de la variable data.
###Code
data #Visualizando los datos completos de la variable data.
###Output
_____no_output_____
###Markdown
6)Se guardara en una variable X la variable data Reduccion de solidos
###Code
x = data[["Reduccion de solidos"]]
x #Visualizando los datos de X
###Output
_____no_output_____
###Markdown
7)Se guardara en una variable Y la variable data Reduccion de la demanda de oxigeno
###Code
y = data[["Reduccion de la demanda de oxigeno"]]
y #Visualizando los datos de Y
###Output
_____no_output_____
###Markdown
8)Creando el grafico de dispersion de las variables de nuestro dataset.
###Code
plt.scatter(x,y)#Creando el grafico con las variables de datos X,Y
plt.xlabel("Reduccion de solidos") #Nombre de variable X
plt.ylabel("Reduccion de la demanda de oxigeno") #Nombre de la variable Y
plt.grid()#Creando el tipo de cuadricula
plt.show()#Presentando el grafico
###Output
_____no_output_____
###Markdown
9)Convirtiendo el dataframe que tenemos a numpy con una matriz de datos
###Code
matriz = data.to_numpy()#Creando la matriz de datos con numpy
matriz #Consultando nuestra matriz de datos.
###Output
_____no_output_____
###Markdown
10)Calculando el valor de n con la matriz de datos.
###Code
n = len(matriz) #Onteniendo el valor de n que es la cantidad de datos en la matriz
sumatoria_x = np.sum(matriz[:,0])
sumatoria_y = np.sum(matriz[:,1])
sumatoria_producto = np.sum(matriz[:,0]*matriz[:,1])
sumatoria_cuadrado_x = np.sum(matriz[:,0]*matriz[:,0])
print("n:", n)
print("sumatoria x:", sumatoria_x)
print("sumatoria y:", sumatoria_y)
print("sumatoria xy:", sumatoria_producto)
print("sumatoria x^2:", sumatoria_cuadrado_x)
###Output
n: 33
sumatoria x: 1104
sumatoria y: 1124
sumatoria xy: 41355
sumatoria x^2: 41086
###Markdown
11)Realizando la sustitucion de valores obtenidos de la matriz en la ECU.
###Code
b1 = (n*sumatoria_producto-sumatoria_x*sumatoria_y) / (n*sumatoria_cuadrado_x-sumatoria_x*sumatoria_x)
b0 = (sumatoria_y-b1*sumatoria_x)/n
print("b1:", b1)
print("b0:", b0)
###Output
b1: 0.9036432105793231
b0: 3.829633197588709
###Markdown
12)Creando el modelo en scikit-learn
###Code
clf = LinearRegression() #Creando nuestro modelo de regresion con Scikit-learn.
###Output
_____no_output_____
###Markdown
13) Entrenando el modelo creado.
###Code
clf.fit(x,y) # Entrenando nuestro modelo con los datos de X y Y
###Output
_____no_output_____
###Markdown
14)Creando el entrenamiento de nuestro modelo.
###Code
clf.coef_ #Esto nos da el valor de b1
###Output
_____no_output_____
###Markdown
15)Obteniendo el valor de la intercept o b0
###Code
clf.intercept_ #Esto nos obtiene el valor de b0 que es el intercepto
###Output
_____no_output_____
###Markdown
16)Encontrando la prediccion en un valor del dataset
###Code
clf.predict([[7]]) #Hacemos la prediccion con un valor obtenido del dataset en este caso el 7 para ver valores cercanos.
clf.predict([[100]]) #Hacemos la prediccion con un valor obtenido del dataset en este caso el 100 para ver valores cercanos.
###Output
/usr/local/lib/python3.7/dist-packages/sklearn/base.py:451: UserWarning: X does not have valid feature names, but LinearRegression was fitted with feature names
"X does not have valid feature names, but"
###Markdown
17)Graficando los valores, para que pueda observarse la regresion lineal.
###Code
plt.plot(x,y) #Graficando los valores de x,y
plt.plot(x, clf.predict(x))#Se va a tratar de predecir toda la matriz de x
plt.title("Regresion Lineal Simple")
plt.xlabel("Reduccion de solidos") #Nombre de variable X
plt.ylabel("Reduccion de la demanda de oxigeno") #Nombre de la variable Y
plt.legend(["y", "Predicciones"]) #Y son los valores deseados y las predicciones
plt.grid()#Creando el tipo de cuadricula
plt.show()#Presentando el grafico
###Output
_____no_output_____ |
DB_OpenData/Explore_With_py2neo_NetworkX.ipynb | ###Markdown
Explore DB-OpenData with py2neo and NetworkXIn this notebook we explore the carsharing data from Deutsche Bahn, they're migrated to a Neo4J graph database. For this we use two libraries: [py2neo](http://py2neo.org/v3/index.html) and [NetworkX](https://networkx.github.io).
###Code
seperatingLine = "\n########################################################################################################\n"
###Output
_____no_output_____
###Markdown
Access the object informations over the py2neo-APIA single record in a cursor as result of a cypher-query execution contains the informations about a single (or a list of) node(s)/relationship(s), depending on which type the result objects of the query have.For example the following query:```MATCH (v:VEHICLE:MITSUBISHI) RETURN *```In this case ist the result data (Cursor) a set of node objects. Each row in this cursor has records. A record contains in the top level a key/value map with a key, that called with the same variable-name, such that used in the query for the object (here "v" as variable-name of a vehicle-object). The value of this item represents the node informations and is from Type Node:```('v': (ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"}))```By an access to the value of the top level map, we access to an object of type Node:```pythonsub = record["v"]``````(ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"})```Because of this object type we can also access to the labels of the node:```pythonrecord["v"].labels()``````SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'})```With a conversion of the node to a dictionary it is possible to select the attributes of the node;```pythondict(record["v"])``````{'ownershipType': 'Langzeitmiete', 'modelName': 'i-Miev', 'modelDetails': 'ELEKTRO 35kW Automatik 4-Sitzer', 'fuelType': 'Strom', 'vin': 'JMBLDHA3WBU000341', 'registrationPlate': 'F-R 8009', 'vehicleID': 148221, 'kw': 35}```Alternatively we can access to the attributes with the following way:```pythonprint(record["v"]["modelName"])``````i-Miev```For the query `MATCH (v:VEHICLE:MITSUBISHI)-[r:WAS_BOOKED_IN]->(s:STATION) RETURN v, r, s` it will be returned three objects and one from type Relationship (variable r).By a relationship its possible to access following informations over the API:- Type of relationship `relationship.type()` `-> WAS_BOOKED_IN` - All nodes `relationship.nodes()` ``` ((a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}), (f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"})) ``` - Start or end node `relationship.start_node()` ``` (a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}) ``` `relationship.end_node()` ```(f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"}) ``` - Attributes of the relationship `relationship[times]` `28`
###Code
def printOutNodeInformations(node, singleAttributeName):
print('keys of the node')
print(node.keys())
print('labels of the node')
print(node.labels())
print('single attribute access')
print(node[singleAttributeName])
def printOutRelationshipInformations(relationship, singleAttributeName):
print('type of the relationship')
print(relationship.type())
print('single attribute access')
print(relationship[singleAttributeName])
print('all nodes of relationship')
print(relationship.nodes())
print('start node of relationship')
print(relationship.start_node())
print('end node of relationship')
print(relationship.end_node())
from py2neo import Graph, Path, Subgraph, Node, PropertyDict, Relationship, Walkable, walk
graph = Graph("http://neo4j:neo4jj@localhost:7474/db/data")
query = """
MATCH (v:VEHICLE:MITSUBISHI) RETURN *
"""
cursor = graph.run(query)
for record in cursor:
print('raw view of a record:')
print(record)
print('value in the records top level map:')
print(record["v"])
print('dictionary representation of the node attributes')
print(dict(record["v"]))
print('dictionary keys (node attribute names)')
print('%s Node informations (VEHICLE) %s' %(seperatingLine, seperatingLine))
node = record["v"]
printOutNodeInformations(node, "modelName")
query = """
MATCH (v:VEHICLE:MITSUBISHI)-[r:WAS_BOOKED_IN]->(s:STATION) RETURN v, r, s
"""
cursor = graph.run(query)
# print(cursor.data())
for record in cursor:
print('%s Node informations (VEHICLE) %s' %(seperatingLine, seperatingLine))
vehicle = record["v"]
printOutNodeInformations(vehicle, "modelName")
print('%s Node informations (STATION) %s' %(seperatingLine, seperatingLine))
station = record["s"]
printOutNodeInformations(station, "name")
print('%s Relationship informations (WAS_BOOKED_IN) %s' %(seperatingLine, seperatingLine))
station = record["r"]
printOutRelationshipInformations(station, "times")
###Output
########################################################################################################
Node informations (VEHICLE)
########################################################################################################
keys of the node
dict_keys(['ownershipType', 'modelName', 'modelDetails', 'fuelType', 'vin', 'registrationPlate', 'vehicleID', 'kw'])
labels of the node
SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'})
single attribute access
i-Miev
########################################################################################################
Node informations (STATION)
########################################################################################################
keys of the node
dict_keys(['code', 'poiAirport', 'city', 'rentalZoneID', 'poiSuburbanTrains', 'latitude', 'name', 'longtitude', 'type', 'poiLongDistanceTrains', 'poiUnderground'])
labels of the node
SetView({'ACTIVE', 'STATION', 'STATIONBASED'})
single attribute access
Wilhelmstraße-ELEKTRO
########################################################################################################
Relationship informations (WAS_BOOKED_IN)
########################################################################################################
type of the relationship
WAS_BOOKED_IN
single attribute access
791
all nodes of relationship
((ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"}), (b227752:ACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"WIL-ELEKTRO",latitude:48.8958625793457,longtitude:9.191786766052246,name:"Wilhelmstraße-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:403352,type:"stationbased"}))
start node of relationship
(ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"})
end node of relationship
(b227752:ACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"WIL-ELEKTRO",latitude:48.8958625793457,longtitude:9.191786766052246,name:"Wilhelmstraße-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:403352,type:"stationbased"})
########################################################################################################
Node informations (VEHICLE)
########################################################################################################
keys of the node
dict_keys(['ownershipType', 'modelName', 'modelDetails', 'fuelType', 'vin', 'registrationPlate', 'bordComputerType', 'vehicleID', 'kw'])
labels of the node
SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'})
single attribute access
i-Miev
########################################################################################################
Node informations (STATION)
########################################################################################################
keys of the node
dict_keys(['code', 'poiAirport', 'city', 'rentalZoneID', 'poiSuburbanTrains', 'latitude', 'name', 'longtitude', 'type', 'poiLongDistanceTrains', 'poiUnderground'])
labels of the node
SetView({'STATIONBASED', 'STATION', 'INACTIVE'})
single attribute access
Bahnhof-ELEKTRO
########################################################################################################
Relationship informations (WAS_BOOKED_IN)
########################################################################################################
type of the relationship
WAS_BOOKED_IN
single attribute access
245
all nodes of relationship
((a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}), (dfa6823:INACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"BF-Elektro",latitude:48.891685485839844,longtitude:9.183795928955078,name:"Bahnhof-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:404993,type:"stationbased"}))
start node of relationship
(a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"})
end node of relationship
(dfa6823:INACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"BF-Elektro",latitude:48.891685485839844,longtitude:9.183795928955078,name:"Bahnhof-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:404993,type:"stationbased"})
########################################################################################################
Node informations (VEHICLE)
########################################################################################################
keys of the node
dict_keys(['ownershipType', 'modelName', 'modelDetails', 'fuelType', 'vin', 'registrationPlate', 'bordComputerType', 'vehicleID', 'kw'])
labels of the node
SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'})
single attribute access
i-Miev
########################################################################################################
Node informations (STATION)
########################################################################################################
keys of the node
dict_keys(['code', 'poiAirport', 'city', 'rentalZoneID', 'poiSuburbanTrains', 'latitude', 'name', 'longtitude', 'type', 'poiLongDistanceTrains', 'poiUnderground'])
labels of the node
SetView({'STATIONBASED', 'STATION', 'INACTIVE'})
single attribute access
Parkgarage Staatsgalerie
########################################################################################################
Relationship informations (WAS_BOOKED_IN)
########################################################################################################
type of the relationship
WAS_BOOKED_IN
single attribute access
28
all nodes of relationship
((a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}), (f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"}))
start node of relationship
(a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"})
end node of relationship
(f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"})
###Markdown
Draw informations as graph over NetworkXIn the library networkX is every graph a set of edges, where each one of them connects two nodes. Every edge or node can also have attributes. In the following we prepare the data in the dataframe - as result of a trivial query - for this view about the rental-zone/vehicle data:```pythondfn['VEHICLE_ID'] = dfn.apply({'v' : lambda x: x["vehicleID"]})dfn['RENTALZONE_ID'] = dfn.apply({'s' : lambda x: x["rentalZoneID"]})dfn['vModelName'] = dfn.apply({'v' : lambda x: x["modelName"]})dfn['sName'] = dfn.apply({'s' : lambda x: x["name"]})dfn['TIMES'] = dfn.apply({'r' : lambda x: x["times"]})```Alternatively we can also access to the needed informations in this way:```pythondfn["v"]["vehicleID"]dfn["s"]["rentalZoneID"]dfn["r"]["times"]```NetworkX provides an easy way to import the data in a dataframe as edges and nodes:```pythonG2=nx.from_pandas_dataframe(dfn, 'VEHICLE_ID', 'RENTALZONE_ID', ['TIMES'])```The disadvantage of this approach is, that we can't import node attributes over this interface. This will also don't work, if we try to import the nodes over a explicit function, ```pythondef addVRZNodesToGraph(row, graph): graph.add_node(row["RENTALZONE_ID"],code=str(row["s"]["code"])) graph.add_node(row["VEHICLE_ID"],vin=str(row["v"]["vin"])) return graph...```... and import on this basis the data to the graph:```pythonG2=nx.Graph()dfn.apply(addVRZNodesToGraph, axis=1, graph=G2)print (G2.nodes(data=True))G2=nx.from_pandas_dataframe(dfn, 'VEHICLE_ID', 'RENTALZONE_ID', ['TIMES'])print (G.edges(data=True))print (G.nodes(data=True))```If we look at the result of the above code, we find out, that the nodes were overridden within the import operation:``` Node informations in the graph before import [(403352, {'code': 'WIL-ELEKTRO'}), (148221, {'vin': 'JMBLDHA3WBU000341'}), (404993, {'code': 'BF-Elektro'}), (148261, {'vin': 'JMBLDHA3WBU000344'}), (401727, {'code': 'STG'})] Node informations in the graph after import [(148221, {}), (403352, {}), (148261, {}), (404993, {}), (401727, {})] Edge informations in the graph after import [(148221, 403352, {'TIMES': '791'}), (148261, 404993, {'TIMES': '245'}), (148261, 401727, {'TIMES': '28'})]```Because of this side effect we use only the way to add nodes and edges manually to the graph:```pythondef addVRZEdgesToGraph(row, graph, relationshipType): graph.add_edge(row["VEHICLE_ID"], row["RENTALZONE_ID"],{'type': relationshipType, 'times': row["TIMES"]}) return graph...G=nx.Graph()dfn.apply(addVRZNodesToGraph, axis=1, graph=G)dfn.apply(addVRZEdgesToGraph, axis=1, graph=G, relationshipType='WAS_BOOKED_IN')...print('%s Node informations in the graph after import %s' %(seperatingLine, seperatingLine)) print (G.nodes(data=True))print('%s Edge informations in the graph after import %s' %(seperatingLine, seperatingLine)) print (G.edges(data=True))```The output of the code above is:``` Node informations in the graph after manual import [(403352, {'code': 'WIL-ELEKTRO'}), (148221, {'vin': 'JMBLDHA3WBU000341'}), (404993, {'code': 'BF-Elektro'}), (148261, {'vin': 'JMBLDHA3WBU000344'}), (401727, {'code': 'STG'})] Edge informations in the graph after manual import [(403352, 148221, {'type': 'WAS_BOOKED_IN', 'times': '791'}), (404993, 148261, {'type': 'WAS_BOOKED_IN', 'times': '245'}), (148261, 401727, {'type': 'WAS_BOOKED_IN', 'times': '28'})]```In our case we'll draw also labels for the nodes and edges. NetworkX provides in the functionality a way to pass the labels of nodes and edges as parameter. For the node labels most them exist as dict and for edges as simple list.Therefore we prepare the labels on the basis of the existing data as follows.We've two different types of nodes: Vehicle and rental zone. For this reason we must collect different informations as values for keys (IDs) from both entities. The id-namespace of both entities aren't overlapping in our case. We bring the IDs of both data in the first step together: ```pythondfnnl = dfn.drop(['r', 's', 'v', 'TIMES', 'vModelName', 'sName'], axis=1).copy(True)dfnnl = dfnnl.reset_index()dfnnl = dfnnl.drop("index", axis=1)dfnnl = dfnnl.stack()dfnnl = dfnnl.reset_index()dfnnl = dfnnl.rename_axis({"level_0": "levelName", "level_1": "columnName", 0: "ID"}, axis="columns")dfnnl["columnName"] = dfnnl["columnName"].astype(str)```In the second step we append a label column to the data frame with the specific label-information for each entity (vehicle or rental zone):```pythondef produceLabelInformation(row, orgData): label = " " if str(row["columnName"]) == 'VEHICLE_ID': label = orgData.loc[(orgData['VEHICLE_ID'] == row["ID"])] .drop_duplicates(subset=["vModelName"],keep="first")["vModelName"].values[0] else: label = orgData.loc[(orgData['RENTALZONE_ID'] == row["ID"])] .drop_duplicates(subset=["sName"],keep="first")["sName"].values[0] return label...dfnnl["LABEL"] = dfnnl.apply(produceLabelInformation, axis=1, orgData=dfn)print('%s IDs from vehicles and rental zones with label informations %s' %(seperatingLine, seperatingLine)) print(dfnnl)```The code above produces following output:``` IDs from vehicles and rental zones with label informations levelName columnName ID LABEL0 0 VEHICLE_ID 148221 i-Miev1 0 RENTALZONE_ID 403352 Wilhelmstraße-ELEKTRO2 1 VEHICLE_ID 148261 i-Miev3 1 RENTALZONE_ID 404993 Bahnhof-ELEKTRO5 2 RENTALZONE_ID 401727 Parkgarage Staatsgalerie```Because of the uniformity in the relationships between the nodes, we need only a constant label for each edge. Therefore it's enough to produce a list containing string members for a single label:```pythondef prepareEdgeLabelsForGraph(dfn): dfnel = dfn.apply({'r' : lambda x: 'WAS_BOOKED_IN'}) print('%s Edge labels %s' %(seperatingLine, seperatingLine)) dfnel = dfnel.reset_index() dfnel = dfnel.drop("index", axis=1) dfnel = dfnel.rename_axis({"r": "edgeLabel"}, axis="columns") print(dfnel) return dfnel ...dfnel = prepareEdgeLabelsForGraph(dfn)```The code above produces following output:``` Edge labels edgeLabel0 WAS_BOOKED_IN1 WAS_BOOKED_IN2 WAS_BOOKED_IN```We can now pass all informations together (graph and labels) to a simple function, that draws a graph as image:```pythonimport matplotlib.pyplot as pltimport networkx as nximport pandas as pd%matplotlib inlinedef draw_graph(graph, layout, edgeLabels, nodeLabels, name): edge_labels = dict(zip(graph.edges(), edgeLabels)) G = graph graph_pos = layout plt.figure(3,figsize=(30,30)) draw nodes, edges and labels nx.draw_networkx_nodes(G, graph_pos, node_size=15000, node_color='blue', alpha=0.3) we can now added edge thickness and edge color nx.draw_networkx_edges(G, graph_pos, width=5, alpha=0.3, edge_color='green') nx.draw_networkx_labels(G, graph_pos, nodeLabels, font_size=16, font_family='sans-serif') nx.draw_networkx_edge_labels(G, graph_pos, font_size=16, edge_labels=edge_labels) plt.savefig("graph_" + name + ".png", dpi=100, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1) plt.show()...draw_graph(G, nx.spring_layout(G, 2, 1), edgeLabels, nodeLabels, "spring")```
###Code
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
%matplotlib inline
def draw_graph(graph, layout, edgeLabels, nodeLabels, name):
edge_labels = dict(zip(graph.edges(), edgeLabels))
G = graph
graph_pos = layout
plt.figure(3,figsize=(30,30))
# draw nodes, edges and labels
nx.draw_networkx_nodes(G, graph_pos, node_size=15000, node_color='blue', alpha=0.3)
# we can now added edge thickness and edge color
nx.draw_networkx_edges(G, graph_pos, width=5, alpha=0.3, edge_color='green')
nx.draw_networkx_labels(G, graph_pos, nodeLabels, font_size=16, font_family='sans-serif')
nx.draw_networkx_edge_labels(G, graph_pos, font_size=16, edge_labels=edge_labels)
plt.savefig("graph_" + name + ".png",
dpi=100,
facecolor='w',
edgecolor='w',
orientation='portrait',
papertype=None,
format=None,
transparent=False,
bbox_inches=None,
pad_inches=0.1)
plt.show()
def addVRZNodesToGraph(row, graph):
graph.add_node(row["RENTALZONE_ID"],code=str(row["s"]["code"]))
graph.add_node(row["VEHICLE_ID"],vin=str(row["v"]["vin"]))
return graph
def addVRZEdgesToGraph(row, graph, relationshipType):
graph.add_edge(row["VEHICLE_ID"], row["RENTALZONE_ID"],{'type': relationshipType, 'times': row["TIMES"]})
return graph
def produceLabelInformation(row, orgData):
label = " "
if str(row["columnName"]) == 'VEHICLE_ID':
label = orgData.loc[(orgData['VEHICLE_ID'] == row["ID"])]\
.drop_duplicates(subset=["vModelName"],keep="first")["vModelName"].values[0]
else:
label = orgData.loc[(orgData['RENTALZONE_ID'] == row["ID"])]\
.drop_duplicates(subset=["sName"],keep="first")["sName"].values[0]
return label
def prepareDataForGraph(df):
dfn = df.reset_index()
dfn = dfn.drop("index", axis=1)
print('%s Original dataframe without index %s' %(seperatingLine, seperatingLine))
print(dfn.head())
dfn['VEHICLE_ID'] = dfn.apply({'v' : lambda x: x["vehicleID"]})
dfn['RENTALZONE_ID'] = dfn.apply({'s' : lambda x: x["rentalZoneID"]})
dfn['vModelName'] = dfn.apply({'v' : lambda x: x["modelName"]})
dfn['sName'] = dfn.apply({'s' : lambda x: x["name"]})
dfn['TIMES'] = dfn.apply({'r' : lambda x: x["times"]})
print('%s Extended dataframe %s' %(seperatingLine, seperatingLine))
print(dfn)
return dfn
def prepareNodeLabelsForGraph(dfn):
dfnnl = dfn.drop(['r', 's', 'v', 'TIMES', 'vModelName', 'sName'], axis=1).copy(True)
dfnnl = dfnnl.reset_index()
dfnnl = dfnnl.drop("index", axis=1)
dfnnl = dfnnl.stack()
dfnnl = dfnnl.reset_index()
dfnnl = dfnnl.rename_axis({"level_0": "levelName", "level_1": "columnName", 0: "ID"}, axis="columns")
dfnnl["columnName"] = dfnnl["columnName"].astype(str)
dfnnl = dfnnl.drop_duplicates(subset=["ID"],keep="first")
# dfnnl.append(dfn.get(['RENTALZONE_ID']).copy(True), ignore_index=True)
print('%s Stacked ids from vehicles and rental zones %s' %(seperatingLine, seperatingLine))
print(dfnnl)
dfnnl["LABEL"] = dfnnl.apply(produceLabelInformation, axis=1, orgData=dfn)
print('%s IDs from vehicles and rental zones with label informations %s' %(seperatingLine, seperatingLine))
print(dfnnl)
return dfnnl
def prepareEdgeLabelsForGraph(dfn):
dfnel = dfn.apply({'r' : lambda x: 'WAS_BOOKED_IN'})
print('%s Edge labels %s' %(seperatingLine, seperatingLine))
dfnel = dfnel.reset_index()
dfnel = dfnel.drop("index", axis=1)
dfnel = dfnel.rename_axis({"r": "edgeLabel"}, axis="columns")
print(dfnel)
return dfnel
#
# Not recommend way to hold data to a graph
#
def importDataToGraph(dfn):
G2=nx.Graph()
dfn.apply(addVRZNodesToGraph, axis=1, graph=G2)
print('%s Node informations in the graph before import %s' %(seperatingLine, seperatingLine))
print (G2.nodes(data=True))
G2=nx.from_pandas_dataframe(dfn, 'VEHICLE_ID', 'RENTALZONE_ID', ['TIMES'])
print('%s Node informations in the graph after import %s' %(seperatingLine, seperatingLine))
print (G2.nodes(data=True))
print('%s Edge informations in the graph after import %s' %(seperatingLine, seperatingLine))
print (G2.edges(data=True))
return G2
#
# Recommend way to hold data to a graph
#
def addDataToGraph(dfn):
G=nx.Graph()
dfn.apply(addVRZNodesToGraph, axis=1, graph=G)
dfn.apply(addVRZEdgesToGraph, axis=1, graph=G, relationshipType='WAS_BOOKED_IN')
print('%s Node informations in the graph after manual import %s' %(seperatingLine, seperatingLine))
print (G.nodes(data=True))
print('%s Edge informations in the graph after manual import %s' %(seperatingLine, seperatingLine))
print (G.edges(data=True))
return G
data = graph.data(query)
df = pd.DataFrame(data)
dfn = prepareDataForGraph(df)
dfnnl = prepareNodeLabelsForGraph(dfn)
dfnel = prepareEdgeLabelsForGraph(dfn)
#
# Recommend way to hold data to a graph
#
G = addDataToGraph(dfn)
#
# Not recommend way to hold data to a graph
#
G2 = importDataToGraph(dfn)
nodeLabels = dict(zip(dfnnl["ID"], dfnnl["LABEL"]))
edgeLabels = dfnel["edgeLabel"].astype(str)
print(nodeLabels)
print(edgeLabels)
draw_graph(G, nx.spring_layout(G, 2, 1), edgeLabels, nodeLabels, "spring")
###Output
{148221: 'i-Miev', 403352: 'Wilhelmstraße-ELEKTRO', 148261: 'i-Miev', 404993: 'Bahnhof-ELEKTRO', 401727: 'Parkgarage Staatsgalerie'}
0 WAS_BOOKED_IN
1 WAS_BOOKED_IN
2 WAS_BOOKED_IN
Name: edgeLabel, dtype: object
|
playbook/tactics/defense-evasion/T1070.001.ipynb | ###Markdown
T1070.001 - Indicator Removal on Host: Clear Windows Event LogsAdversaries may clear Windows Event Logs to hide the activity of an intrusion. Windows Event Logs are a record of a computer's alerts and notifications. There are three system-defined sources of events: System, Application, and Security, with five event types: Error, Warning, Information, Success Audit, and Failure Audit.The event logs can be cleared with the following utility commands:* wevtutil cl system* wevtutil cl application* wevtutil cl securityThese logs may also be cleared through other mechanisms, such as the event viewer GUI or [PowerShell](https://attack.mitre.org/techniques/T1059/001). Atomic Tests
###Code
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
###Output
_____no_output_____
###Markdown
Atomic Test 1 - Clear LogsUpon execution this test will clear Windows Event Logs. Open the System.evtx logs at C:\Windows\System32\winevt\Logs and verify that it is now empty.**Supported Platforms:** windowsElevation Required (e.g. root or admin) Attack Commands: Run with `command_prompt````command_promptwevtutil cl System```
###Code
Invoke-AtomicTest T1070.001 -TestNumbers 1
###Output
_____no_output_____
###Markdown
Atomic Test 2 - Delete System Logs Using Clear-EventLogClear event logs using built-in PowerShell commands.Upon successful execution, you should see the list of deleted event logsUpon execution, open the Security.evtx logs at C:\Windows\System32\winevt\Logs and verify that it is now empty or has very few logs in it.**Supported Platforms:** windowsElevation Required (e.g. root or admin) Attack Commands: Run with `powershell````powershell$logs = Get-EventLog -List | ForEach-Object {$_.Log}$logs | ForEach-Object {Clear-EventLog -LogName $_ }Get-EventLog -list```
###Code
Invoke-AtomicTest T1070.001 -TestNumbers 2
###Output
_____no_output_____ |
use-cases/healthcare/breast_cancer/autopilot_xgboost_breast_cancer.ipynb | ###Markdown
Breast Cancer prediction with Amazon SageMaker Autopilot------ Contents1. [Introduction](Introduction)1. [Prerequisites](Prerequisites)1. [Downloading the dataset](Downloading)1. [Upload the dataset to Amazon S3](Uploading)1. [Setting up the SageMaker Autopilot Job](Settingup)1. [Launching the SageMaker Autopilot Job](Launching)1. [Tracking Sagemaker Autopilot Job Progress](Tracking)1. [Results](Results)1. [Cleanup](Cleanup) IntroductionAmazon SageMaker Autopilot is an automated machine learning (commonly referred to as AutoML) solution for tabular datasets. You can use SageMaker Autopilot in different ways: on autopilot (hence the name) or with human guidance, without code through SageMaker Studio, or using the AWS SDKs. This notebook, as a first glimpse, will use the AWS SDKs to simply create and deploy a machine learning model.The model lifecycle can be viewed below: add information about the datasetThis notebook demonstrates how you can use Autopilot on this dataset to get the most accurate ML pipeline through exploring a number of potential options, or "candidates". Each candidate generated by Autopilot consists of two steps. The first step performs automated feature engineering on the dataset and the second step trains and tunes an algorithm to produce a model. When you deploy this model, it follows similar steps. Feature engineering followed by inference, to decide whether the lead is worth pursuing or not. The notebook contains instructions on how to train the model as well as to deploy the model to perform breast cancer malignancy predictions. PrerequisitesBefore you start the tasks in this tutorial, do the following:- The Amazon Simple Storage Service (Amazon S3) bucket and prefix that you want to use for training and model data. This should be within the same Region as Amazon SageMaker training. The code below will create, or if it exists, use, the default bucket.- The IAM role to give Autopilot access to your data. See the Amazon SageMaker documentation for more information on IAM roles: https://docs.aws.amazon.com/sagemaker/latest/dg/security-iam.html
###Code
import sagemaker
# Define IAM role
import boto3
import re
from sagemaker import get_execution_role
region = boto3.Session().region_name
# Define IAM role
role = get_execution_role()
session = sagemaker.Session()
#bucket = '' # <uncomment and change to your own bucket if you don't want to use the default bucket>
bucket = session.default_bucket()
print(sagemaker.Session().default_bucket())
prefix = 'sagemaker/bc/autopilot' # modify to your own path if desired
sm = boto3.Session().client(service_name='sagemaker',region_name=region)
# Import Libraries
import numpy as np # For matrix operations and numerical processing
import pandas as pd # For munging tabular data
import matplotlib.pyplot as plt # For charts and visualizations
from IPython.display import Image # For displaying images in the notebook
from IPython.display import display # For displaying outputs in the notebook
import time # For labeling SageMaker models, endpoints, etc.
from time import gmtime, strftime # For labeling SageMaker models, endpoints, etc.
import sys # For writing outputs to notebook
import math # For ceiling function
import json # For parsing hosting outputs
import os # For manipulating filepath names
import zipfile # Amazon SageMaker's Python SDK provides many helper functions
###Output
_____no_output_____
###Markdown
--- Data WranglingFor this illustration, we will continue using UCI'S breast cancer diagnostic data set available at https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29. The data set is also available on Kaggle at https://www.kaggle.com/uciml/breast-cancer-wisconsin-data. The purpose here is to use this data set to build a predictve model of whether a breast mass image indicates benign or malignant tumor. Upload the dataset to Amazon S3Before you run Autopilot on the dataset, first perform a check of the dataset to make sure that it has no obvious errors. The Autopilot process can take long time, and it's generally a good practice to inspect the dataset before you start a job. This particular dataset is small, so you can inspect it in the notebook instance itself. If you have a larger dataset that will not fit in a notebook instance memory, inspect the dataset offline using a big data analytics tool like Apache Spark. [Deequ](https://github.com/awslabs/deequ) is a library built on top of Apache Spark that can be helpful for performing checks on large datasets. Autopilot is capable of handling datasets up to 5 GB.
###Code
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data
###Output
--2021-10-13 06:52:32-- https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data
Resolving archive.ics.uci.edu (archive.ics.uci.edu)... 128.195.10.252
Connecting to archive.ics.uci.edu (archive.ics.uci.edu)|128.195.10.252|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 124103 (121K) [application/x-httpd-php]
Saving to: ‘wdbc.data’
wdbc.data 100%[===================>] 121.19K 273KB/s in 0.4s
2021-10-13 06:52:34 (273 KB/s) - ‘wdbc.data’ saved [124103/124103]
###Markdown
Read the data into a Pandas data frame and take a look.
###Code
col_names = ["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean",
"compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean",
"radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se",
"concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst",
"perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst",
"concave points_worst","symmetry_worst","fractal_dimension_worst"]
breastcancer = pd.read_csv('./wdbc.data', header=None, names=col_names)
pd.set_option('display.max_columns', 50) # Make sure we can see all of the columns
pd.set_option('display.max_rows', 10) # Keep the output on one page
breastcancer
###Output
_____no_output_____
###Markdown
Note that there are 20 features to help predict the target column 'y'.Amazon SageMaker Autopilot takes care of preprocessing your data for you. You do not need to perform conventional data preprocssing techniques such as handling missing values, converting categorical features to numeric features, scaling data, and handling more complicated data types.Moreover, splitting the dataset into training and validation splits is not necessary. Autopilot takes care of this for you. You may, however, want to split out a test set. That's next, although you use it for batch inference at the end instead of testing the model. We will drop the Id Column
###Code
breastcancer = breastcancer.drop(['id'], axis=1)
breastcancer
breastcancer.diagnosis = pd.Categorical(breastcancer.diagnosis).codes
breastcancer.head()
###Output
_____no_output_____
###Markdown
Reserve some data for calling batch inference on the modelDivide the data into training and testing splits. The training split is used by SageMaker Autopilot. The testing split is reserved to perform inference using the suggested model.
###Code
train_data = breastcancer.sample(frac=0.9,random_state=200)
test_data = breastcancer.drop(train_data.index)
test_data_no_target = breastcancer.drop(columns=['diagnosis'])
###Output
_____no_output_____
###Markdown
Upload the dataset to Amazon S3Copy the file to Amazon Simple Storage Service (Amazon S3) in a .csv format for Amazon SageMaker training to use.
###Code
train_file = 'train_data.csv';
train_data.to_csv(train_file, index=False, header=True)
# train_data_s3_path = session.upload_data(path=train_file, key_prefix=prefix + "/train")
# print('Train data uploaded to: ' + train_data_s3_path)
autopilot_train_s3_uri = session.upload_data(bucket=bucket, key_prefix=prefix, path=train_file)
autopilot_train_s3_uri
test_file = 'test_data.csv';
test_data_no_target.to_csv(test_file, index=False, header=False)
test_data_s3_path = session.upload_data(path=test_file, key_prefix=prefix + "/test")
print('Test data uploaded to: ' + test_data_s3_path)
from IPython.core.display import display, HTML
display(HTML('<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/sagemaker/bc/autopilot/">uploaded files</a> in S3 bucket</b>'.format(bucket, prefix, {})))
###Output
_____no_output_____
###Markdown
Setting up the SageMaker Autopilot JobAfter uploading the dataset to Amazon S3, you can invoke Autopilot to find the best ML pipeline to train a model on this dataset. The required inputs for invoking a Autopilot job are:* Amazon S3 location for input dataset and for all output artifacts* Name of the column of the dataset you want to predict (`y` in this case) * An IAM roleCurrently Autopilot supports only tabular datasets in CSV format. Either all files should have a header row, or the first file of the dataset, when sorted in alphabetical/lexical order, is expected to have a header row.
###Code
input_data_config = [{
'DataSource': {
'S3DataSource': {
'S3DataType': 'S3Prefix',
'S3Uri': 's3://{}/{}/train'.format(bucket,prefix)
}
},
'TargetAttributeName': 'diagnosis'
}
]
output_data_config = {
'S3OutputPath': 's3://{}/{}/output'.format(bucket,prefix)
}
autoMLJobConfig={
'CompletionCriteria': {
'MaxCandidates': 5
}
}
autoMLJobObjective = {
"MetricName": "Accuracy"
}
from IPython.core.display import display, HTML
display(HTML('<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/sagemaker/bc/autopilot/">No output bucket created yet</a> in S3 bucket</b>'.format(bucket, prefix, {})))
###Output
_____no_output_____
###Markdown
You can also specify the type of problem you want to solve with your dataset (`Regression, MulticlassClassification, BinaryClassification`). In case you are not sure, SageMaker Autopilot will infer the problem type based on statistics of the target column (the column you want to predict). You have the option to limit the running time of a SageMaker Autopilot job by providing either the maximum number of pipeline evaluations or candidates (one pipeline evaluation is called a `Candidate` because it generates a candidate model) or providing the total time allocated for the overall Autopilot job. Under default settings, this job takes about four hours to run. This varies between runs because of the nature of the exploratory process Autopilot uses to find optimal training parameters. Launching the SageMaker Autopilot JobYou can now launch the Autopilot job by calling the `create_auto_ml_job` API. https://docs.aws.amazon.com/cli/latest/reference/sagemaker/create-auto-ml-job.html
###Code
from time import gmtime, strftime, sleep
timestamp_suffix = strftime('%d-%H-%M-%S', gmtime())
print(autopilot_train_s3_uri)
model_output_s3_uri = 's3://{}/{}/output'.format(bucket, prefix)
print(model_output_s3_uri)
print(input_data_config)
auto_ml_job_name = 'automl-xgboost-bc' + timestamp_suffix
print('AutoMLJobName: ' + auto_ml_job_name)
max_candidates = 2
automl = sagemaker.automl.automl.AutoML(
target_attribute_name='diagnosis',
base_job_name=auto_ml_job_name,
output_path=model_output_s3_uri,
max_candidates=max_candidates,
sagemaker_session=session,
role=role,
max_runtime_per_training_job_in_seconds=600,
total_job_runtime_in_seconds=3000
)
print(max_candidates)
automl.fit(
### BEGIN SOLUTION - DO NOT delete this comment for grading purposes
inputs=autopilot_train_s3_uri, # Replace None
### END SOLUTION - DO NOT delete this comment for grading purposes
job_name=auto_ml_job_name,
wait=False,
logs=False
)
from IPython.core.display import display, HTML
display(HTML('<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/sagemaker/bc/autopilot/">output folder has been created</a> in S3 bucket</b>'.format(bucket, prefix, {})))
###Output
_____no_output_____
###Markdown
Tracking SageMaker Autopilot job progressSageMaker Autopilot job consists of the following high-level steps : * Analyzing Data, where the dataset is analyzed and Autopilot comes up with a list of ML pipelines that should be tried out on the dataset. The dataset is also split into train and validation sets.* Feature Engineering, where Autopilot performs feature transformation on individual features of the dataset as well as at an aggregate level.* Model Tuning, where the top performing pipeline is selected along with the optimal hyperparameters for the training algorithm (the last stage of the pipeline).
###Code
# This step takes about 33 minutes
print ('JobStatus - Secondary Status')
print('------------------------------')
describe_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)
print (describe_response['AutoMLJobStatus'] + " - " + describe_response['AutoMLJobSecondaryStatus'])
job_run_status = describe_response['AutoMLJobStatus']
while job_run_status not in ('Failed', 'Completed', 'Stopped'):
describe_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)
job_run_status = describe_response['AutoMLJobStatus']
print (describe_response['AutoMLJobStatus'] + " - " + describe_response['AutoMLJobSecondaryStatus'])
sleep(30)
###Output
JobStatus - Secondary Status
------------------------------
Completed - Completed
###Markdown
SageMaker processing jobsThe Autopilot creates required SageMaker processing jobs during the run:* First processing job (data splitter) checks the data sanity, performs stratified shuffling and splits the data into training and validation. * Second processing job (candidate generator) first streams through the data to compute statistics for the dataset. Then, uses these statistics to identify the problem type, and possible types of every column-predictor: numeric, categorical, natural language, etc.
###Code
from IPython.core.display import display, HTML
display(HTML('<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/">processing jobs</a></b>'.format(region)))
###Output
_____no_output_____
###Markdown
Review the Output in S3Once data analysis is complete, SageMaker AutoPilot generates two notebooks: * Data exploration* Candidate definitionNotebooks are included in the AutoML job artifacts generated during the run. Before checking the existence of the notebooks, you can check if the artifacts have been generated.```data-processor-models/ "models" learned to transform raw data into features documentation/ explainability and other documentation about your modelpreprocessed-data/ data for train and validationsagemaker-automl-candidates/ candidate models which autopilot comparestransformed-data/ candidate-specific data for train and validationtuning/ candidate-specific tuning resultsvalidations/ validation results```
###Code
display(
HTML(
'<b>Review all <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}?region={}&prefix=sagemaker/bc/autopilot/">output in S3</a></b>'.format(
bucket, region, auto_ml_job_name
)
)
)
###Output
_____no_output_____
###Markdown
Model training and tuningWhen you launched the Autopilot job, you requested that 3 model candidates are generated and compared.Therefore, you should see three (3) SageMaker training jobs below. from IPython.core.display import display, HTMLdisplay(HTML('Review hyper-parameter tuning jobs'.format(region))) ResultsNow use the describe_auto_ml_job API to look up the best candidate selected by the SageMaker Autopilot job.
###Code
best_candidate = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)["BestCandidate"]
best_candidate_name = best_candidate["CandidateName"]
print(best_candidate)
print("\n")
print("CandidateName: " + best_candidate_name)
print(
"FinalAutoMLJobObjectiveMetricName: "
+ best_candidate["FinalAutoMLJobObjectiveMetric"]["MetricName"]
)
print(
"FinalAutoMLJobObjectiveMetricValue: "
+ str(best_candidate["FinalAutoMLJobObjectiveMetric"]["Value"])
)
###Output
{'CandidateName': 'automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3', 'FinalAutoMLJobObjectiveMetric': {'MetricName': 'validation:f1_binary', 'Value': 0.9598100185394287}, 'ObjectiveStatus': 'Succeeded', 'CandidateSteps': [{'CandidateStepType': 'AWS::SageMaker::ProcessingJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:processing-job/automl-xgboost-bc13-06-53-18-db-1-2cf2c8486b1142f2ad1ecd482d9ce', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18-db-1-2cf2c8486b1142f2ad1ecd482d9ce'}, {'CandidateStepType': 'AWS::SageMaker::TrainingJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:training-job/automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5'}, {'CandidateStepType': 'AWS::SageMaker::TransformJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:transform-job/automl-xgboost-bc13-06-53-18-dpp0-csv-1-7fea98f662f140459e796e3', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18-dpp0-csv-1-7fea98f662f140459e796e3'}, {'CandidateStepType': 'AWS::SageMaker::TrainingJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:training-job/automl-xgboost-bc13-06-53-18t1rm-002-2c8903b3', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3'}], 'CandidateStatus': 'Completed', 'InferenceContainers': [{'Image': '783357654285.dkr.ecr.ap-southeast-2.amazonaws.com/sagemaker-sklearn-automl:2.2.1-1-cpu-py3', 'ModelDataUrl': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/data-processor-models/automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5/output/model.tar.gz', 'Environment': {'AUTOML_TRANSFORM_MODE': 'feature-transform', 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': 'application/x-recordio-protobuf', 'SAGEMAKER_PROGRAM': 'sagemaker_serve', 'SAGEMAKER_SUBMIT_DIRECTORY': '/opt/ml/model/code'}}, {'Image': '783357654285.dkr.ecr.ap-southeast-2.amazonaws.com/sagemaker-xgboost:1.2-2-cpu-py3', 'ModelDataUrl': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/tuning/automl-xgb-dpp0-xgb/automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3/output/model.tar.gz', 'Environment': {'MAX_CONTENT_LENGTH': '20971520', 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': 'text/csv', 'SAGEMAKER_INFERENCE_OUTPUT': 'predicted_label', 'SAGEMAKER_INFERENCE_SUPPORTED': 'predicted_label,probability,probabilities'}}, {'Image': '783357654285.dkr.ecr.ap-southeast-2.amazonaws.com/sagemaker-sklearn-automl:2.2.1-1-cpu-py3', 'ModelDataUrl': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/data-processor-models/automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5/output/model.tar.gz', 'Environment': {'AUTOML_TRANSFORM_MODE': 'inverse-label-transform', 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': 'text/csv', 'SAGEMAKER_INFERENCE_INPUT': 'predicted_label', 'SAGEMAKER_INFERENCE_OUTPUT': 'predicted_label', 'SAGEMAKER_INFERENCE_SUPPORTED': 'predicted_label,probability,labels,probabilities', 'SAGEMAKER_PROGRAM': 'sagemaker_serve', 'SAGEMAKER_SUBMIT_DIRECTORY': '/opt/ml/model/code'}}], 'CreationTime': datetime.datetime(2021, 10, 13, 7, 12, 28, tzinfo=tzlocal()), 'EndTime': datetime.datetime(2021, 10, 13, 7, 14, 6, tzinfo=tzlocal()), 'LastModifiedTime': datetime.datetime(2021, 10, 13, 7, 15, 4, 976000, tzinfo=tzlocal()), 'CandidateProperties': {'CandidateArtifactLocations': {'Explainability': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/documentation/explainability/output'}}}
CandidateName: automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3
FinalAutoMLJobObjectiveMetricName: validation:f1_binary
FinalAutoMLJobObjectiveMetricValue: 0.9598100185394287
###Markdown
Deploy The ModelNow that you have successfully completed the SageMaker Autopilot job on the dataset, create a model from any of the candidates by using [Inference Pipelines](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html). Now we are going to deploy the best model
###Code
timestamp_suffix = strftime("%d-%H-%M-%S", gmtime())
model_name = best_candidate_name + timestamp_suffix + "-model"
#Create the model
model_arn = sm.create_model(
Containers=best_candidate["InferenceContainers"], ModelName=model_name, ExecutionRoleArn=role
)
#Configure the model
epc_name = best_candidate_name + timestamp_suffix + "-epc"
ep_config = sm.create_endpoint_config(
EndpointConfigName=epc_name,
ProductionVariants=[
{
"InstanceType": "ml.m5.2xlarge",
"InitialInstanceCount": 1,
"ModelName": model_name,
"VariantName": "main",
}
],
)
#Deploy the model
ep_name = best_candidate_name + timestamp_suffix + "-ep"
create_endpoint_response = sm.create_endpoint(EndpointName=ep_name, EndpointConfigName=epc_name)
sm.get_waiter("endpoint_in_service").wait(EndpointName=ep_name)
###Output
_____no_output_____
###Markdown
EvaluateNow that we have a hosted endpoint running, we can make real-time predictions from our model by calling the predict method. But first, we'll need to setup serializers and deserializers for passing our test_data NumPy arrays to the model behind the endpoint.
###Code
from sagemaker.predictor import Predictor
from sagemaker.serializers import CSVSerializer
from sagemaker.deserializers import CSVDeserializer
predictor = Predictor(
endpoint_name=ep_name,
sagemaker_session=session,
serializer=CSVSerializer(),
deserializer=CSVDeserializer(),
)
# Remove the target column from the test data and reset the index for the ground truth data
test_data_inference = test_data.drop("diagnosis", axis=1)
actual=test_data.iloc[:,0]
actual=actual.reset_index(drop=True)
# Obtain predictions from SageMaker endpoint
prediction = predictor.predict(test_data_inference.to_csv(sep=",", header=False, index=False))
# Load prediction in pandas and compare to ground truth
prediction_df = pd.DataFrame(prediction)
#
pd.crosstab(index=actual, columns=prediction_df[0], rownames=['actual'], colnames=['predictions'])
###Output
_____no_output_____
###Markdown
Data Exploration NotebookSagemaker Autopilot also auto-generates a Data Exploration notebook, which can be downloaded from the following Amazon S3 location:
###Code
sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['AutoMLJobArtifacts']['DataExplorationNotebookLocation']
###Output
_____no_output_____
###Markdown
Candidate Generation Notebook Sagemaker AutoPilot also auto-generates a Candidate Definitions notebook. This notebook can be used to interactively step through the various steps taken by the Sagemaker Autopilot to arrive at the best candidate. This notebook can also be used to override various runtime parameters like parallelism, hardware used, algorithms explored, feature extraction scripts and more. The notebook can be downloaded from the following Amazon S3 location:
###Code
sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['AutoMLJobArtifacts']['CandidateDefinitionNotebookLocation']
###Output
_____no_output_____
###Markdown
CleanupThe Autopilot job creates many underlying artifacts such as dataset splits, preprocessing scripts, or preprocessed data, etc. This code, when un-commented, deletes them. This operation deletes all the generated models and the auto-generated notebooks as well.
###Code
#s3 = boto3.resource('s3')
#bucket = s3.Bucket(bucket)
#job_outputs_prefix = '{}/output/{}'.format(prefix,auto_ml_job_name)
#bucket.objects.filter(Prefix=job_outputs_prefix).delete()
###Output
_____no_output_____ |
lectures/notebooks/Lecture 03 - Linear methods.ipynb | ###Markdown
IntroductionThis notebook demonstrates some basic data handling using the Pandas package and the application of linear methods to identify relationships in materials data. We will be creating a rudimentary prediction model for the bulk modulus of an element from various basic elemental properties. For the purposes of this exercise, we will assume that a linear relationship does indeed exist.
###Code
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
params = {'legend.fontsize': 20,
'figure.figsize': (12, 8),
'axes.labelsize': 20,
'axes.titlesize': 24,
'xtick.labelsize':16,
'ytick.labelsize': 16}
mpl.rcParams.update(params)
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load in the elemental dataset using pandas' read_csv method. This data was obtained from the Materials Project for the ground state structure of each element only. The columns are:- K: Bulk modulus in GPa- MP: Melting point in K- BP: Boiling point in K- Z: Atomic number- X: Pauling electronegativity- r: Atomic radius in angstroms
###Code
data = pd.read_csv("element_data.csv", index_col=0)
print(data)
###Output
K MP BP Z X r
Element
Ac 29.0 1323.00 3573.0 89 1.10 1.95
Ag 88.0 1234.93 2435.0 47 1.93 1.60
Al 83.0 933.47 2792.0 13 1.61 1.25
As 40.0 1090.00 887.0 33 2.18 1.15
Au 137.0 1337.33 3129.0 79 2.54 1.35
... ... ... ... .. ... ...
W 304.0 3695.00 5828.0 74 2.36 1.35
Y 41.0 1799.00 3609.0 39 1.22 1.80
Yb 15.0 1097.00 1469.0 70 1.10 1.75
Zn 67.0 692.68 1180.0 30 1.65 1.35
Zr 94.0 2128.00 4682.0 40 1.33 1.55
[83 rows x 6 columns]
###Markdown
Since a strict linear relationship may not exist between K and the other variables, we will create a few additional features based on simple transformations of some of the inputs, namely the electronegativity and the atomic radius.
###Code
data["X^2"] = data["X"] ** 2
data["sqrt(X)"] = data["X"] ** 0.5
data["r^2"] = data["r"] ** 2
data["sqrt(r)"] = data["r"] ** 0.5
print(data)
###Output
K MP BP Z X r X^2 sqrt(X) r^2 \
Element
Ac 29.0 1323.00 3573.0 89 1.10 1.95 1.2100 1.048809 3.8025
Ag 88.0 1234.93 2435.0 47 1.93 1.60 3.7249 1.389244 2.5600
Al 83.0 933.47 2792.0 13 1.61 1.25 2.5921 1.268858 1.5625
As 40.0 1090.00 887.0 33 2.18 1.15 4.7524 1.476482 1.3225
Au 137.0 1337.33 3129.0 79 2.54 1.35 6.4516 1.593738 1.8225
... ... ... ... .. ... ... ... ... ...
W 304.0 3695.00 5828.0 74 2.36 1.35 5.5696 1.536229 1.8225
Y 41.0 1799.00 3609.0 39 1.22 1.80 1.4884 1.104536 3.2400
Yb 15.0 1097.00 1469.0 70 1.10 1.75 1.2100 1.048809 3.0625
Zn 67.0 692.68 1180.0 30 1.65 1.35 2.7225 1.284523 1.8225
Zr 94.0 2128.00 4682.0 40 1.33 1.55 1.7689 1.153256 2.4025
sqrt(r)
Element
Ac 1.396424
Ag 1.264911
Al 1.118034
As 1.072381
Au 1.161895
... ...
W 1.161895
Y 1.341641
Yb 1.322876
Zn 1.161895
Zr 1.244990
[83 rows x 10 columns]
###Markdown
For ease of interpretation, let's define our X and y.
###Code
features = [c for c in data.columns if c != "K"]
x = data[features]
y = data["K"]
###Output
_____no_output_____
###Markdown
We will now perform a standard multiple linear regression using scikit-learn.
###Code
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
reg = linear_model.LinearRegression()
reg.fit(x, y)
r2 = reg.score(x, y)
equation = ["%.2e %s" % (v, f) for v, f in zip(reg.coef_, features)]
print("K = %.1f + %s" % (reg.intercept_, " + ".join(equation)))
f, ax = plt.subplots(figsize=(12, 8))
yhat = reg.predict(data[features])
sns.scatterplot(x=y, y=yhat)
plt.ylabel(r"$K_{predicted}$ (GPa)")
plt.xlabel(r"$K$ (GPa)")
plt.annotate(r"$R^2$ = %.3f, MSE = %.1f" % (r2, mean_squared_error(y, yhat)), (200, 0), fontsize=18);
###Output
K = -243.8 + 4.25e-02 MP + 2.76e-02 BP + -2.48e-01 Z + 3.92e+02 X + -3.55e+02 r + -4.96e+01 X^2 + -3.85e+02 sqrt(X) + 5.40e+01 r^2 + 5.00e+02 sqrt(r)
###Markdown
Now, it may seem that this model performs very well. But in actuality, we have used the entire dataset to perform the regression. A proper fit should be conducted using cross-validation. Here, we will use a five-fold cross-validation to assess the performance of this highly overspecified model.
###Code
from sklearn.model_selection import cross_val_predict, KFold
from sklearn.metrics import r2_score
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
mlr = linear_model.LinearRegression()
yhat_mlr = cross_val_predict(mlr, x, y, cv=kfold)
r2_mlr = r2_score(y, yhat_mlr)
mse_mlr = mean_squared_error(y, yhat_mlr)
label_mlr = "MLR: $R^2$ = %.3f, MSE = %.1f" % (r2_mlr, mse_mlr)
f, ax = plt.subplots(figsize=(8, 8))
plt.plot(y, yhat_mlr, 'o', label=label_mlr)
plt.ylabel(r"$K_{predicted}$ (GPa)")
plt.xlabel(r"$K$ (GPa)")
plt.legend()
plt.xlim([0, 410])
plt.ylim([0, 410])
plt.plot([0, 410], [0, 410], 'k--');
###Output
_____no_output_____
###Markdown
Correlations between featuresHere, we will look at correlations between features. First, we do a pair plot between features.
###Code
grid = sns.pairplot(data[features])
###Output
_____no_output_____
###Markdown
From the plot, it is clear that MP and BP are correlated with each other. And X is inversely related to r in some way. Obviously, X and $X^2$ are correlated. Another way to plot this is using a correlation plot.
###Code
f, ax = plt.subplots(figsize=(8, 6))
sns.heatmap(x.corr(), cmap="coolwarm", vmin=-1, vmax=1, ax=ax);
###Output
_____no_output_____
###Markdown
Subset selection Sometimes, the input variables may not be directly related to the interested target. Hence, a feature selection step is necessary. There are many different methods for selecting features. Here we will go over a simple implementation in scikit-learn,
###Code
from sklearn.feature_selection import SelectKBest, f_regression
def identify_columns(x_new, nrows=10):
columns = x.columns
xvalues = x.values
dist = np.linalg.norm(xvalues[:nrows, :, None] - x_new[:nrows, None, :], axis=0)
return columns[np.argmin(dist, axis=0)].values
sel = SelectKBest(f_regression, k=3)
x_new = sel.fit_transform(x, y)
print(f"Selected features {identify_columns(x_new)}")
s = ', '.join(['%s: %.3e' % (i, j) for i, j in zip(x.columns, sel.pvalues_)])
print("The p values for the variables are " + s)
###Output
The p values for the variables are MP: 5.017e-17, BP: 3.285e-14, Z: 1.382e-01, X: 1.760e-01, r: 9.118e-02, X^2: 6.985e-01, sqrt(X): 6.706e-02, r^2: 1.346e-02, sqrt(r): 2.524e-01
###Markdown
Apparently, the most significant variables are MP and BP, followed by r^2 (p values < 0.05). Let's redo the regression using only these variables.
###Code
mlr_best = linear_model.LinearRegression()
yhat_mlr_best = cross_val_predict(mlr_best, x_new, y, cv=kfold)
r2_mlr_best = r2_score(y, yhat_mlr_best)
mse_mlr_best = mean_squared_error(y, yhat_mlr_best)
label_mlr_best = "MLR: $R^2$ = %.3f, MSE = %.1f" % (r2_mlr_best, mse_mlr_best)
f, ax = plt.subplots(figsize=(8, 8))
plt.plot(y, yhat_mlr, 'o', label=label_mlr)
plt.plot(y, yhat_mlr_best, 'o', label=label_mlr_best)
l = plt.ylabel(r"$K_{predicted}$ (GPa)")
l = plt.xlabel(r"$K$ (GPa)")
plt.legend()
plt.xlim([0, 410])
plt.ylim([0, 410])
plt.plot([0, 410], [0, 410], 'k--');
###Output
_____no_output_____
###Markdown
We can see that the best subset model has substantially reduced MSE and improved R2. ShrinkageHere, we will use shrinkage methods to shrink the feature coefficients. It is a best practice to first center the inputs and scale to unit variance prior to performing shrinkage. We will use scikit-learn's StandardScaler which performs this scaling.
###Code
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x)
means_ = scaler.mean_
stds_ = scaler.scale_
z = scaler.transform(x)
###Output
_____no_output_____
###Markdown
Ridge regressionUnlike the simple MLR example, we will do our ridge regression properly using k-fold cross-validation to identify the best shrinkage factor (denoted as the argument alpha in scikit-learn's implementation). We will use the MSE as the criterion in which to determine the best alpha.
###Code
from sklearn.model_selection import cross_validate, KFold
cv_results = []
coeffs = []
alphas = np.logspace(-2, 2, 71)
kfold = KFold(n_splits=10, shuffle=True, random_state=42)
for alpha in alphas:
ridge = linear_model.Ridge(alpha=alpha, max_iter=10000)
ridge.fit(z, y)
scores = cross_validate(ridge, z, y, cv=kfold, scoring="neg_mean_squared_error")
cv_results.append([alpha, -np.mean(scores["test_score"])] + list(ridge.coef_))
cv_results = pd.DataFrame(cv_results, columns=["alpha", "score"] + features)
f, ax = plt.subplots(figsize=(12, 8))
plt.plot(cv_results["alpha"], cv_results["score"], '-x')
plt.xlim([1e-2, 10**1.8])
plt.ylim((4000, 4800))
plt.xscale(r'log')
plt.xlabel(r'$\alpha$')
plt.ylabel(r'MSE')
plt.title(r'Ridge regression')
best_alpha = cv_results["alpha"][cv_results["score"].idxmin()]
plt.annotate(r"Best $\alpha$ = %.3f" % best_alpha, (best_alpha, cv_results["score"].min()), fontsize=16);
###Output
_____no_output_____
###Markdown
Here, we will take a look at the effect of alpha on the coefficients. Note that these are for the scaled coefficients, i.e., the coefficients that map the scaled inputs to the output, and not the unscaled inputs.
###Code
f, ax = plt.subplots(figsize=(12, 8))
for f in features:
plt.plot(cv_results["alpha"], cv_results[f], '-x', label=f)
plt.xscale('log')
plt.xlabel(r'$\alpha$')
plt.ylabel('Coefficient')
plt.title(r'Scaled coefficients change with $\alpha$')
plt.legend()
plt.xlim([1e-2, 10**1.8])
plt.ylim([-100, 100]);
###Output
_____no_output_____
###Markdown
Using the best alpha, we will now regenerate the final relationship. Note that we have to rescale the intercepts and coefficients back to the unnormalized inputs (multiply all coefficients by the respective standard deviations of the inputs, and shift all means).
###Code
reg = linear_model.Ridge(alpha=best_alpha, max_iter=10000)
reg.fit(z, y)
real_coef = reg.coef_ / stds_ # convert back to unnormalized inputs
real_interp = reg.intercept_ - means_.dot(real_coef) # convert back to unnormalized inputs
equation = ["%.2e %s" % (v, f) for v, f in zip(real_coef, features)]
print("K = %.1f + %s" % (real_interp, " + ".join(equation)))
###Output
K = -196.3 + 4.44e-02 MP + 2.43e-02 BP + -4.78e-02 Z + 1.98e+01 X + -2.00e+00 r + -4.27e+00 X^2 + 9.56e+01 sqrt(X) + -6.84e+00 r^2 + 3.33e+01 sqrt(r)
###Markdown
Here, we will redo our MLR with cross validation and compare with the ridge regression.
###Code
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import r2_score
ridge = linear_model.Ridge(alpha=best_alpha, max_iter=10000)
yhat_ridge = cross_val_predict(ridge, z, y, cv=kfold)
r2_ridge = r2_score(y, yhat_ridge)
mse_ridge = mean_squared_error(y, yhat_ridge)
label_ridge = "Ridge: $R^2$ = %.3f, MSE = %.1f" % (r2_ridge, mse_ridge)
f, ax = plt.subplots(figsize=(8, 8))
plt.plot(y, yhat_mlr, 'o', label=label_mlr)
plt.plot(y, yhat_ridge, 'o', label=label_ridge)
l = plt.ylabel("$K_{predicted}$ (GPa)")
l = plt.xlabel("$K$ (GPa)")
plt.legend()
plt.xlim([0, 410])
plt.ylim([0, 410])
plt.plot([0, 410], [0, 410], 'k--');
###Output
_____no_output_____
###Markdown
LASSOHere, we will perform a LASSO regression using the same process as the ridge regression.
###Code
alphas = np.logspace(-2, 1.2, 20)
cv_results = []
coeffs = []
for alpha in alphas:
lasso = linear_model.Lasso(alpha=alpha, max_iter=100000)
lasso.fit(z, y)
scores = cross_validate(lasso, z, y, cv=kfold, scoring='neg_mean_squared_error')
cv_results.append([alpha, -np.mean(scores["test_score"])] + list(lasso.coef_))
cv_results = pd.DataFrame(cv_results, columns=["alpha", "score"] + features)
f, ax = plt.subplots(figsize=(12, 8))
plt.plot(cv_results["alpha"], cv_results["score"], '-x')
plt.xlim([1e-2, 10**1.2])
plt.ylim((3500, 7000))
plt.xscale('log')
plt.xlabel(r'$\alpha$')
plt.ylabel('MSE')
plt.title('LASSO')
best_alpha = cv_results["alpha"][cv_results["score"].idxmin()]
plt.annotate(r"Best $\alpha$ = %.3f" % best_alpha, (best_alpha, cv_results["score"].min()), fontsize=16);
###Output
_____no_output_____
###Markdown
At a certain shrinkage factor, several of the coefficients have been shrunk to zero.
###Code
f, ax = plt.subplots(figsize=(12, 8))
for f in features:
plt.plot(cv_results["alpha"], cv_results[f], '-x', label=f)
plt.xscale('log')
plt.xlabel(r'$\alpha$')
plt.ylabel('Coefficient')
plt.title(r'Scaled coefficients change with $\alpha$')
plt.legend()
plt.xlim([1e-2, 10**1.2]);
###Output
_____no_output_____
###Markdown
We will now retrieve the final equation, ignoring the coefficients that are zero. We note that the atomic number Z no longer appears in the equation. This is somewhat in line with intuition since we do not expect atomic number to have a significant relationship with the bulk modulus. As you may recall, the electronegativity and atomic radius are inversely correlated with each other. So we would expect only one of these parameters to be needed to describe the bulk modulus. Furthermore, it seems that the bulk modulus should be related to sqrt(X) and not X.
###Code
reg = linear_model.Lasso(alpha=best_alpha, max_iter=10000)
reg.fit(z, y)
real_coef = reg.coef_ / stds_ # convert back to unnormalized inputs
real_interp = reg.intercept_ - means_.dot(real_coef) # convert back to unnormalized inputs
equation = ["%.2e %s" % (v, f) for v, f in zip(real_coef, features) if abs(v) > 1e-4]
print("K = %.1f + %s" % (real_interp, " + ".join(equation)))
lasso = linear_model.Lasso(alpha=best_alpha, max_iter=10000)
yhat_lasso = cross_val_predict(lasso, z, y, cv=kfold)
r2_lasso = r2_score(y, yhat_lasso)
mse_lasso = mean_squared_error(y, yhat_lasso)
label_lasso = "Lasso: $R^2$ = %.3f, MSE = %.1f" % (r2_lasso, mse_lasso)
f, ax = plt.subplots(figsize=(8, 8))
plt.plot(y, yhat_mlr, 'o', label=label_mlr)
plt.plot(y, yhat_lasso, 'o', label=label_lasso)
l = plt.ylabel("$K_{predicted}$ (GPa)")
l = plt.xlabel("$K$ (GPa)")
plt.legend()
plt.xlim([0, 410])
plt.ylim([0, 410])
plt.plot([0, 410], [0, 410], 'k--');
###Output
_____no_output_____
###Markdown
Partial Least SquaresHere, we will do a 2-component PLS regression.
###Code
from sklearn.cross_decomposition import PLSRegression
cv_results = []
coeffs = []
pls = PLSRegression(n_components=2)
pls.fit(x, y)
yhat_pls = cross_val_predict(pls, x, y, cv=kfold)
r2_pls = r2_score(y, yhat_pls)
mse_pls = mean_squared_error(y, yhat_pls)
label_pls = "PLS3: $R^2$ = %.3f, MSE = %.1f" % (r2_pls, mse_pls)
f, ax = plt.subplots(figsize=(8, 8))
plt.plot(y, yhat_mlr, 'o', label=label_mlr)
plt.plot(y, yhat_pls, 'o', label=label_pls)
l = plt.ylabel("$K_{predicted}$ (GPa)")
l = plt.xlabel("$K$ (GPa)")
plt.legend()
plt.xlim([0, 410])
plt.ylim([0, 410])
plt.plot([0, 410], [0, 410], 'k--');
###Output
_____no_output_____ |
week1/Basic Python Operations for Working with Text (Completed Class Copy).ipynb | ###Markdown
Table of Contents0.0.1 Installing Required Libraries0.0.1.1 Getting Familiar With Jupyter Notebooks1 Week 1: Basic Python Operations for Working with Text2 The Scale of Data in the 21st Century2.1 Overview2.1.0.1 Text Analytics2.1.0.2 Data Engineering2.1.0.3 Statistics / Machine Learning2.2 Loading Text into Memory2.2.0.1 Opening Files2.2.1 An Aside: List Comprehension2.2.2 Visualizing Summary Metrics Using Matplotlib2.2.3 First Method: Create a Dictionary to Store Word Count2.2.4 Using Python's Built-In Counter2.2.5 In-Class Question2.3 Zipf's Law2.3.1 General Definition2.3.2 Approximation in NLP3 Regular Expressions3.0.1 Match the first time a capital letter appears in the tweet3.0.2 Match all capital letters that appears in the tweet3.0.3 Match all words that are at least 3 characters long3.0.4 Word Boundaries3.0.5 Removing Stopwords Using Regex3.0.5.1 Exercises4 Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1 Next Week (March 24th)4.1.1 Check for Understanding Installing Required Libraries
###Code
!pip3 install matplotlib
!pip3 install pandas
###Output
_____no_output_____
###Markdown
Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append).
###Code
# Open Tale of Two Cities
text_file = open("tale-of-two-cities.txt", "r")
print(text_file)
###Output
<_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='UTF-8'>
###Markdown
Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger:
###Code
import sys
EMPTY_STRING = ""
ONE_CHAR_STRING = "a"
TWO_CHAR_STRING = "ab"
print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.")
print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.")
print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.")
###Output
The size of EMPTY_STRING is 49 bytes.
The size of ONE_CHAR_STRING is 50 bytes.
The size of TWO_CHAR_STRING is 51 bytes.
###Markdown
The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol.
###Code
text_file.readline()
###Output
_____no_output_____
###Markdown
You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line
###Code
text_file.seek(0) #reset the stream position to the start of the text file
for i in range(2): # repeat the below line twice
print(f"Iteration {i + 1}: {text_file.readline(5)}")
###Output
Iteration 1: IT
Iteration 2: WAS t
###Markdown
Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**:
###Code
text_file.seek(0) # reset the stream position to the start of the file
lines = text_file.readlines() # read all the lines and return a list of strings
###Output
_____no_output_____
###Markdown
We see that there are **12870** lines of text in the novel.
###Code
print(f"There are {len(lines)} lines in the novel.")
total_num_chars = 0
for line in lines: # iterate through each line
total_num_chars += len(line) # add the number of characters in a line to the total count of characters
avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average
print(f"On average, each line has {avg_chars} characters.")
import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt
###Output
_____no_output_____
###Markdown
An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib
###Code
NUM_BINS = 30 # increase this number to make the visualization more granular
plt.rcParams["figure.figsize"] = (15,6)
plt.hist([len(line) for line in lines], bins=NUM_BINS)
plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title
plt.xlabel("Number of Characters in Line") # label the X axis
plt.ylabel("Count of Lines") # label the Y axis
plt.show()
###Output
_____no_output_____
###Markdown
What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object.
###Code
words = [] # create a list of all words
word_count = {} # create a dictionary to store word counts
for line in lines: # for each line in the novel
for word in line.split(" "): # for each word in the line
words.append(word) # add the word to the list of words
if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1
word_count[word] = 1
else:
word_count[word] += 1 # if the word has been seen before, increment its count by 1
print(f"There's an estimated {len(words)} words in the novel.")
print(f"There's {len(word_count.keys())} unique words in the novel.")
###Output
_____no_output_____
###Markdown
Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file.
###Code
assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true."
###Output
_____no_output_____
###Markdown
Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use:
###Code
from collections import Counter
def count_words(lines, delimiter=" "):
words = Counter() # instantiate a Counter object called words
for line in lines:
for word in line.split(delimiter):
words[word] += 1 # increment count for word
return words
###Output
_____no_output_____
###Markdown
A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function.
###Code
def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6):
plt.rcParams["figure.figsize"] = (x_size,y_size)
plt.hist(values, bins=bins)
if title:
plt.title(title) # give the plot a title
if xlabel:
plt.xlabel(xlabel) # label the X axis
if ylabel:
plt.ylabel(ylabel) # label the Y axis
plt.show()
make_histogram(word_count.values(),
title="Distribution of Word Count",
xlabel="Number of Times Word Appears",
ylabel="Number of Unique Words")
###Output
_____no_output_____
###Markdown
In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful?
###Code
import pandas as pd # output the results to a dataframe
word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency
word_count_df["word"] = list(word_count.keys())
word_count_df["frequency"] = list(word_count.values())
word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error
###Output
_____no_output_____
###Markdown
Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions
###Code
# get the top stopwords
word_count_df.sort_values(by=["frequency"], ascending=False).head(5)
import re
SAMPLE_TWEET = '''
#wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from
#google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts"
'''
# create a dataframe version of Dickens' novel
dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r"), columns=["line"])
dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "")
###Output
_____no_output_____
###Markdown
Match the first time a capital letter appears in the tweet
###Code
match = re.search("[A-Z]", SAMPLE_TWEET)
match.group()
###Output
_____no_output_____
###Markdown
Match all capital letters that appears in the tweet
###Code
# re
re.findall("[A-Z]", SAMPLE_TWEET)
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])')
###Output
_____no_output_____
###Markdown
Match all words that are at least 3 characters long
###Code
# re
re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})')
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})')
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text?
###Code
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
# re
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
text = re.sub(r'(a|A)', '', text)
text
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix!
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', case=False)
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Removing Stopwords Using Regex
###Code
# re
text = re.sub('(the|The)', '', text, flags=re.IGNORECASE)
text
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False)
dickens_text_df.head()
###Output
_____no_output_____
###Markdown
Table of Contents0.0.1 Installing Required Libraries0.0.1.1 Getting Familiar With Jupyter Notebooks1 Week 1: Basic Python Operations for Working with Text2 The Scale of Data in the 21st Century2.1 Overview2.1.0.1 Text Analytics2.1.0.2 Data Engineering2.1.0.3 Statistics / Machine Learning2.2 Loading Text into Memory2.2.0.1 Opening Files2.2.1 An Aside: List Comprehension2.2.2 Visualizing Summary Metrics Using Matplotlib2.2.3 First Method: Create a Dictionary to Store Word Count2.2.4 Using Python's Built-In Counter2.2.5 In-Class Question2.3 Zipf's Law2.3.1 General Definition2.3.2 Approximation in NLP3 Regular Expressions3.0.1 Match the first time a capital letter appears in the tweet3.0.2 Match all capital letters that appears in the tweet3.0.3 Match all words that are at least 3 characters long3.0.4 Word Boundaries3.0.5 Removing Stopwords Using Regex3.0.5.1 Exercises4 Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1 Next Week (March 24th)4.1.1 Check for Understanding Installing Required Libraries
###Code
!pip3 install matplotlib
!pip3 install pandas
###Output
Requirement already satisfied: matplotlib in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages
Requirement already satisfied: numpy>=1.7.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib)
Requirement already satisfied: kiwisolver>=1.0.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib)
Requirement already satisfied: python-dateutil>=2.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib)
Requirement already satisfied: six>=1.10 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib)
Requirement already satisfied: pytz in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib)
Requirement already satisfied: cycler>=0.10 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib)
Requirement already satisfied: setuptools in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from kiwisolver>=1.0.1->matplotlib)
[33mYou are using pip version 9.0.3, however version 20.0.2 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already satisfied: pandas in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages
Requirement already satisfied: pytz>=2017.2 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from pandas)
Requirement already satisfied: numpy>=1.13.3 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from pandas)
Requirement already satisfied: python-dateutil>=2.6.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from pandas)
Requirement already satisfied: six>=1.5 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from python-dateutil>=2.6.1->pandas)
[33mYou are using pip version 9.0.3, however version 20.0.2 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
###Markdown
Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append).
###Code
# Open Tale of Two Cities
text_file = open("tale-of-two-cities.txt", "r")
print(text_file)
###Output
<_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='UTF-8'>
###Markdown
Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger:
###Code
import sys
EMPTY_STRING = ""
ONE_CHAR_STRING = "a"
TWO_CHAR_STRING = "ab"
print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.")
print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.")
print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.")
###Output
The size of EMPTY_STRING is 49 bytes.
The size of ONE_CHAR_STRING is 50 bytes.
The size of TWO_CHAR_STRING is 51 bytes.
###Markdown
The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol.
###Code
text_file.readline()
###Output
_____no_output_____
###Markdown
You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line
###Code
text_file.seek(0) #reset the stream position to the start of the text file
for i in range(2): # repeat the below line twice
print(f"Iteration {i + 1}: {text_file.readline(5)}")
###Output
Iteration 1: IT
Iteration 2: WAS t
###Markdown
Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**:
###Code
text_file.seek(0) # reset the stream position to the start of the file
lines = text_file.readlines() # read all the lines and return a list of strings
###Output
_____no_output_____
###Markdown
We see that there are **12870** lines of text in the novel.
###Code
print(f"There are {len(lines)} lines in the novel.")
total_num_chars = 0
for line in lines: # iterate through each line
total_num_chars += len(line) # add the number of characters in a line to the total count of characters
avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average
print(f"On average, each line has {avg_chars} characters.")
import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt
###Output
_____no_output_____
###Markdown
An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib
###Code
NUM_BINS = 30 # increase this number to make the visualization more granular
plt.rcParams["figure.figsize"] = (15,6)
plt.hist([len(line) for line in lines], bins=NUM_BINS)
plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title
plt.xlabel("Number of Characters in Line") # label the X axis
plt.ylabel("Count of Lines") # label the Y axis
plt.show()
###Output
_____no_output_____
###Markdown
What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object.
###Code
words = [] # create a list of all words
word_count = {} # create a dictionary to store word counts
for line in lines: # for each line in the novel
for word in line.split(" "): # for each word in the line
words.append(word) # add the word to the list of words
if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1
word_count[word] = 1
else:
word_count[word] += 1 # if the word has been seen before, increment its count by 1
print(f"There's an estimated {len(words)} words in the novel.")
print(f"There's {len(word_count.keys())} unique words in the novel.")
###Output
There's an estimated 143345 words in the novel.
There's 21683 unique words in the novel.
###Markdown
Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file.
###Code
assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true."
###Output
_____no_output_____
###Markdown
Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use:
###Code
from collections import Counter
def count_words(lines, delimiter=" "):
words = Counter() # instantiate a Counter object called words
for line in lines:
for word in line.split(delimiter):
words[word] += 1 # increment count for word
return words
###Output
_____no_output_____
###Markdown
A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function.
###Code
def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6):
plt.rcParams["figure.figsize"] = (x_size,y_size)
plt.hist(values, bins=bins)
if title:
plt.title(title) # give the plot a title
if xlabel:
plt.xlabel(xlabel) # label the X axis
if ylabel:
plt.ylabel(ylabel) # label the Y axis
plt.show()
make_histogram(word_count.values(),
title="Distribution of Word Count",
xlabel="Number of Times Word Appears",
ylabel="Number of Unique Words")
###Output
_____no_output_____
###Markdown
In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful?
###Code
import pandas as pd # output the results to a dataframe
word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency
word_count_df["word"] = list(word_count.keys())
word_count_df["frequency"] = list(word_count.values())
word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error
###Output
_____no_output_____
###Markdown
Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions
###Code
# get the top stopwords
word_count_df.sort_values(by=["frequency"], ascending=False).head(5)
import re
SAMPLE_TWEET = '''
#wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from
#google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts"
'''
# create a dataframe version of Dickens' novel
dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r"), columns=["line"])
dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "")
###Output
_____no_output_____
###Markdown
Match the first time a capital letter appears in the tweet
###Code
match = re.search("[A-Z]", SAMPLE_TWEET)
match.group()
###Output
_____no_output_____
###Markdown
Match all capital letters that appears in the tweet
###Code
# re
re.findall("[A-Z]", SAMPLE_TWEET)
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])')
###Output
_____no_output_____
###Markdown
Match all words that are at least 3 characters long
###Code
# re
re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})')
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})')
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text?
###Code
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
# re
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
text = re.sub(r'(a|A)', '', text)
text
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix!
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', case=False)
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Removing Stopwords Using Regex
###Code
# re
text = re.sub('(the|The)', '', text, flags=re.IGNORECASE)
text
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False)
dickens_text_df.head()
###Output
_____no_output_____
###Markdown
Table of Contents0.0.1 Installing Required Libraries0.0.1.1 Getting Familiar With Jupyter Notebooks1 Week 1: Basic Python Operations for Working with Text2 The Scale of Data in the 21st Century2.1 Overview2.1.0.1 Text Analytics2.1.0.2 Data Engineering2.1.0.3 Statistics / Machine Learning2.2 Loading Text into Memory2.2.0.1 Opening Files2.2.1 An Aside: List Comprehension2.2.2 Visualizing Summary Metrics Using Matplotlib2.2.3 First Method: Create a Dictionary to Store Word Count2.2.4 Using Python's Built-In Counter2.2.5 In-Class Question2.3 Zipf's Law2.3.1 General Definition2.3.2 Approximation in NLP3 Regular Expressions3.0.1 Match the first time a capital letter appears in the tweet3.0.2 Match all capital letters that appears in the tweet3.0.3 Match all words that are at least 3 characters long3.0.4 Word Boundaries3.0.5 Removing Stopwords Using Regex3.0.5.1 Exercises4 Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1 Next Week (March 24th)4.1.1 Check for Understanding Installing Required Libraries
###Code
!pip3 install matplotlib
!pip3 install pandas
###Output
_____no_output_____
###Markdown
Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append).
###Code
# Open Tale of Two Cities
text_file = open("tale-of-two-cities.txt", "r", encoding='utf8')
print(text_file)
###Output
<_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='utf8'>
###Markdown
Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger:
###Code
import sys
EMPTY_STRING = ""
ONE_CHAR_STRING = "a"
TWO_CHAR_STRING = "ab"
print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.")
print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.")
print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.")
###Output
The size of EMPTY_STRING is 49 bytes.
The size of ONE_CHAR_STRING is 50 bytes.
The size of TWO_CHAR_STRING is 51 bytes.
###Markdown
The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol.
###Code
text_file.readline()
###Output
_____no_output_____
###Markdown
You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line
###Code
text_file.seek(0) #reset the stream position to the start of the text file
for i in range(2): # repeat the below line twice
print(f"Iteration {i + 1}: {text_file.readline(5)}")
###Output
Iteration 1: IT
Iteration 2: WAS t
###Markdown
Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**:
###Code
text_file.seek(0) # reset the stream position to the start of the file
lines = text_file.readlines() # read all the lines and return a list of strings
###Output
_____no_output_____
###Markdown
We see that there are **12870** lines of text in the novel.
###Code
print(f"There are {len(lines)} lines in the novel.")
total_num_chars = 0
for line in lines: # iterate through each line
total_num_chars += len(line) # add the number of characters in a line to the total count of characters
avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average
print(f"On average, each line has {avg_chars} characters.")
import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt
###Output
_____no_output_____
###Markdown
An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib
###Code
NUM_BINS = 30 # increase this number to make the visualization more granular
plt.rcParams["figure.figsize"] = (15,6)
plt.hist([len(line) for line in lines], bins=NUM_BINS)
plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title
plt.xlabel("Number of Characters in Line") # label the X axis
plt.ylabel("Count of Lines") # label the Y axis
plt.show()
###Output
_____no_output_____
###Markdown
What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object.
###Code
words = [] # create a list of all words
word_count = {} # create a dictionary to store word counts
for line in lines: # for each line in the novel
for word in line.split(" "): # for each word in the line
words.append(word) # add the word to the list of words
if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1
word_count[word] = 1
else:
word_count[word] += 1 # if the word has been seen before, increment its count by 1
print(f"There's an estimated {len(words)} words in the novel.")
print(f"There's {len(word_count.keys())} unique words in the novel.")
###Output
There's an estimated 143345 words in the novel.
There's 21683 unique words in the novel.
###Markdown
Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file.
###Code
assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true."
###Output
_____no_output_____
###Markdown
Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use:
###Code
from collections import Counter
def count_words(lines, delimiter=" "):
words = Counter() # instantiate a Counter object called words
for line in lines:
for word in line.split(delimiter):
words[word] += 1 # increment count for word
return words
###Output
_____no_output_____
###Markdown
A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function.
###Code
def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6):
plt.rcParams["figure.figsize"] = (x_size,y_size)
plt.hist(values, bins=bins)
if title:
plt.title(title) # give the plot a title
if xlabel:
plt.xlabel(xlabel) # label the X axis
if ylabel:
plt.ylabel(ylabel) # label the Y axis
plt.show()
make_histogram(word_count.values(),
title="Distribution of Word Count",
xlabel="Number of Times Word Appears",
ylabel="Number of Unique Words")
###Output
_____no_output_____
###Markdown
In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful?
###Code
import pandas as pd # output the results to a dataframe
word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency
word_count_df["word"] = list(word_count.keys())
word_count_df["frequency"] = list(word_count.values())
word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error
###Output
_____no_output_____
###Markdown
Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions
###Code
# get the top stopwords
word_count_df.sort_values(by=["frequency"], ascending=False).head(5)
import re
SAMPLE_TWEET = '''
#wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from
#google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts"
'''
# create a dataframe version of Dickens' novel
dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r", encoding='utf8').readlines(), columns=["line"])
dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "")
###Output
_____no_output_____
###Markdown
Match the first time a capital letter appears in the tweet
###Code
match = re.search("[A-Z]", SAMPLE_TWEET)
match.group()
###Output
_____no_output_____
###Markdown
Match all capital letters that appears in the tweet
###Code
# re
re.findall("[A-Z]", SAMPLE_TWEET)
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])')
###Output
_____no_output_____
###Markdown
Match all words that are at least 3 characters long
###Code
# re
re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})')
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})')
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text?
###Code
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
# re
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
text = re.sub(r'(a|A)', '', text)
text
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix!
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', flags=re.IGNORECASE)
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Removing Stopwords Using Regex
###Code
# re
text = re.sub('(the|The)', '', text, flags=re.IGNORECASE)
text
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False)
dickens_text_df.head()
###Output
_____no_output_____
###Markdown
Exercises1. One of the main characters in A Tale of Two Cities is `Sydney Carton`. How many times is the word `Carton` used?2. How many times does the word `the` appear in the novel?3. How would you find replace the stopword `the` using regex from `Tale of Two Cities`3. What percentage of lines in Dickens' text contain adverbs? For now, you can classify an adverb as a word that ends in `ly`.4. One many times does Charles Dickens use the pattern `WORD, WORD, and WORD` in this novel (for example `red, bluff, and free`)? Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)Every day late is -10%.You are a business analyst working for a major US toy retailer:* A manager in the marketing department wants to find out the most frequently used words in positive reviews (five stars) and negative reviews (one star) in order to determine what occasion the toys are purchased for (Christmas, birthdays, and anniversaries.). He would like your opinion on **which gift occasions (Christmas, birthdays, or anniversaries) tend to have the most positive reviews** to focus marketing budget on those days.* One of your product managers suspects that **toys purchased for male recipients (husbands, sons, etc.)** tend to be much more likely to be reviewed poorly. She would like to see some data points confirming or rejecting her hypothesis. * Use **regular expressions to parse out all references to recipients and gift occassions**, and account for the possibility that people may spell words "son" / "children" / "Christmas" as both singular and plural, upper or lower-cased.* Explain what some of pitfalls/limitations are of using only a word count analysis to make these inferences. What additional research/steps would you need to do to verify your conclusions?Perform the same word count analysis using the reviews received from Amazon to answer your marketing manager's question. They are stored in two files, (`poor_amazon_toy_reviews.txt`) and (`good-amazon-toy-reviews.txt`). **Provide a few sentences with your findings and business recommendations.** Make any assumptions you'd like to- this is a fictitious company after all. I just want you to get into the habit of "finishing" your analysis: to avoid delivering technical numbers to a non-technical manager.**Submit everything as a new notebook and Slack direct message to me (Yu Chen) the HW as an attachment.**`NOTE`: Name the notebook `lastname_firstname_HW1.ipynb`. Next Week (March 24th)* `scikit-learn`, `nltk`, and `scipy` libraries for NLP (make sure to install each of these libraries* encoding schemes* Bayes Rule, Naive Bayes, probability theory for text classification* Similiarity/distance measures* N-Grams* Tokenization, lemmatization, stemming* Basic word vectors: Count, TF-IDF, One-Hot encoding* Dimensionality Reduction Check for Understanding1. Which of the encodings below will be able to encode this text: `사업`2. **True or False**: the word `dog` will have the same binary representation regardless of whether it is `ASCII`, `latin1`, or `utf8`.3. According to the Zipf Law approximation, approximately what frequency (express it has a percent) would the 3rd most popular word in a generic piece of text appear?4. **True or False**: what is considered a stopword changes depending on the business context and dataset you are working with. If true, provide an example. If false, explain why it is false.
###Code
good_reviews = open('good_amazon_toy_reviews.txt', "r", encoding='utf8').read()
poor_reviews = open('poor_amazon_toy_reviews.txt', "r", encoding='utf8').read()
good_reviews_df = pd.read_csv('good_amazon_toy_reviews.txt', sep="\n", header=None, names=["line"])
poor_reviews_df = pd.read_csv('poor_amazon_toy_reviews.txt', sep="\n", header=None, names=["line"])
word_list = re.findall(r'\b[A-z]+\b', good_reviews+poor_reviews)
word_list = [word.lower() for word in word_list]
word_dict = Counter(word_list)
# sorted(word_dict.items(), key=lambda x: x[1], reverse=True)
from fuzzywuzzy import fuzz
occasion_re = {}
christmas_word = [word for word in word_dict.keys() if fuzz.ratio(r'christmas',word) >= 80]
christmas_word
occasion_re['christmas'] = r'\b(xmas|' + '|'.join([word for word in christmas_word if word not in ['christ','christians','charisma']]) + r')\b'
birthday_word = [word for word in word_dict.keys() if fuzz.ratio(r'birthday',word) >= 80]
birthday_word
occasion_re['birthday'] = r'\b(' + '|'.join([word for word in birthday_word if word not in ['birthed']]) + r')\b'
anniversary_word = [word for word in word_dict.keys() if fuzz.ratio(r'anniversary',word) >= 80]
anniversary_word
occasion_re['anniversary']= r'\b(' + '|'.join([word for word in anniversary_word if word not in ['adversary']]) + r')\b'
occasion_re['valentine'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'valentine',word) >= 80]) + r')\b'
occasion_re['thanksgiving'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'thanksgiving',word) >= 90]) + r')\b'
occasion_re['halloween'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'halloween',word) >= 85]) + r')\b'
occasion_re['easter'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'easter',word) >= 95]) + r')\b'
occasion_re
occasion_df = pd.DataFrame(columns=['good_word','good_review','poor_word','poor_review']
, index=['christmas','birthday','anniversary','valentine','thanksgiving','halloween','easter','total'])
occasion_df = occasion_df.fillna(0)
for line in good_reviews_df['line']:
for occasion in occasion_df.index:
if occasion == 'total':
occasion_df.loc[occasion,'good_word'] += len(re.findall(r'\b[A-z]+\b',line))
occasion_df.loc[occasion,'good_review'] += 1
else:
if re.findall(occasion_re[occasion],line,flags=re.IGNORECASE):
occasion_df.loc[occasion,'good_word'] += len(re.findall(occasion_re[occasion],line,flags=re.IGNORECASE))
occasion_df.loc[occasion,'good_review'] += 1
for line in poor_reviews_df['line']:
for occasion in occasion_df.index:
if occasion == 'total':
occasion_df.loc[occasion,'poor_word'] += len(re.findall(r'\b[A-z]+\b',line))
occasion_df.loc[occasion,'poor_review'] += 1
else:
if re.findall(occasion_re[occasion],line,flags=re.IGNORECASE):
occasion_df.loc[occasion,'poor_word'] += len(re.findall(occasion_re[occasion],line,flags=re.IGNORECASE))
occasion_df.loc[occasion,'poor_review'] += 1
occasion_df
male_word = ['son','sons']
male_word.extend([word for word in word_dict.keys() if fuzz.ratio(r'husband',word) >= 85])
male_word.extend(['father','fathers'])
male_word.extend([word for word in word_dict.keys() if fuzz.ratio(r'dad',word) >= 85])
male_word.extend([word for word in word_dict.keys() if fuzz.ratio(r'granda',word) >= 85])
male_word.remove('dead')
male_word.remove('grand')
male_word.remove('grandma')
male_word.remove('grandman')
male_word.remove('grandmas')
male_word_re = r'\b(' + '|'.join([word for word in male_word]) + r')\b'
male_word_re
male_df = pd.DataFrame(columns=['good_review','poor_review'],index=['male','total'])
male_df = male_df.fillna(0)
for line in good_reviews_df['line']:
for i in male_df.index:
if i == 'total':
male_df.loc[i,'good_review'] += 1
else:
if re.findall(male_word_re,line,flags=re.IGNORECASE):
male_df.loc[i,'good_review'] += 1
for line in poor_reviews_df['line']:
for i in male_df.index:
if i == 'total':
male_df.loc[i,'poor_review'] += 1
else:
if re.findall(male_word_re,line,flags=re.IGNORECASE):
male_df.loc[i,'poor_review'] += 1
male_df
def make_sorted_dict(word_list):
word_dict = {k:round(v/len(word_list),4) for k,v in Counter(word_list).items()}
return dict(sorted(word_dict.items(), key=lambda x: x[1], reverse=True))
son_word = re.findall(r'\bsons?\b', good_reviews+poor_reviews, flags=re.IGNORECASE)
make_sorted_dict(son_word)
child_word = re.findall(r'\bchild(?:ren)?\b', good_reviews+poor_reviews, flags=re.IGNORECASE)
make_sorted_dict(child_word)
christmas_word = Counter(re.findall(r'\bchristmas(?:es)?\b', good_reviews+poor_reviews, flags=re.IGNORECASE))
make_sorted_dict(christmas_word)
###Output
_____no_output_____
###Markdown
Table of Contents0.0.1 Installing Required Libraries0.0.1.1 Getting Familiar With Jupyter Notebooks1 Week 1: Basic Python Operations for Working with Text2 The Scale of Data in the 21st Century2.1 Overview2.1.0.1 Text Analytics2.1.0.2 Data Engineering2.1.0.3 Statistics / Machine Learning2.2 Loading Text into Memory2.2.0.1 Opening Files2.2.1 An Aside: List Comprehension2.2.2 Visualizing Summary Metrics Using Matplotlib2.2.3 First Method: Create a Dictionary to Store Word Count2.2.4 Using Python's Built-In Counter2.2.5 In-Class Question2.3 Zipf's Law2.3.1 General Definition2.3.2 Approximation in NLP3 Regular Expressions3.0.1 Match the first time a capital letter appears in the tweet3.0.2 Match all capital letters that appears in the tweet3.0.3 Match all words that are at least 3 characters long3.0.4 Word Boundaries3.0.5 Removing Stopwords Using Regex3.0.5.1 Exercises4 Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1 Next Week (March 24th)4.1.1 Check for Understanding Installing Required Libraries
###Code
!pip3 install matplotlib
!pip3 install pandas
###Output
_____no_output_____
###Markdown
Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append).
###Code
# Open Tale of Two Cities
text_file = open("tale-of-two-cities.txt", "r")
print(text_file)
###Output
<_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='UTF-8'>
###Markdown
Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger:
###Code
import sys
EMPTY_STRING = ""
ONE_CHAR_STRING = "a"
TWO_CHAR_STRING = "ab"
print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.")
print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.")
print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.")
###Output
The size of EMPTY_STRING is 49 bytes.
The size of ONE_CHAR_STRING is 50 bytes.
The size of TWO_CHAR_STRING is 51 bytes.
###Markdown
The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol.
###Code
text_file.readline()
###Output
_____no_output_____
###Markdown
You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line
###Code
text_file.seek(0) #reset the stream position to the start of the text file
for i in range(2): # repeat the below line twice
print(f"Iteration {i + 1}: {text_file.readline(5)}")
###Output
Iteration 1: IT
Iteration 2: WAS t
###Markdown
Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**:
###Code
text_file.seek(0) # reset the stream position to the start of the file
lines = text_file.readlines() # read all the lines and return a list of strings
###Output
_____no_output_____
###Markdown
We see that there are **12870** lines of text in the novel.
###Code
print(f"There are {len(lines)} lines in the novel.")
total_num_chars = 0
for line in lines: # iterate through each line
total_num_chars += len(line) # add the number of characters in a line to the total count of characters
avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average
print(f"On average, each line has {avg_chars} characters.")
import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt
###Output
_____no_output_____
###Markdown
An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib
###Code
NUM_BINS = 30 # increase this number to make the visualization more granular
plt.rcParams["figure.figsize"] = (15,6)
plt.hist([len(line) for line in lines], bins=NUM_BINS)
plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title
plt.xlabel("Number of Characters in Line") # label the X axis
plt.ylabel("Count of Lines") # label the Y axis
plt.show()
###Output
_____no_output_____
###Markdown
What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object.
###Code
words = [] # create a list of all words
word_count = {} # create a dictionary to store word counts
for line in lines: # for each line in the novel
for word in line.split(" "): # for each word in the line
words.append(word) # add the word to the list of words
if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1
word_count[word] = 1
else:
word_count[word] += 1 # if the word has been seen before, increment its count by 1
print(f"There's an estimated {len(words)} words in the novel.")
print(f"There's {len(word_count.keys())} unique words in the novel.")
###Output
_____no_output_____
###Markdown
Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file.
###Code
assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true."
###Output
_____no_output_____
###Markdown
Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use:
###Code
from collections import Counter
def count_words(lines, delimiter=" "):
words = Counter() # instantiate a Counter object called words
for line in lines:
for word in line.split(delimiter):
words[word] += 1 # increment count for word
return words
###Output
_____no_output_____
###Markdown
A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function.
###Code
def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6):
plt.rcParams["figure.figsize"] = (x_size,y_size)
plt.hist(values, bins=bins)
if title:
plt.title(title) # give the plot a title
if xlabel:
plt.xlabel(xlabel) # label the X axis
if ylabel:
plt.ylabel(ylabel) # label the Y axis
plt.show()
make_histogram(word_count.values(),
title="Distribution of Word Count",
xlabel="Number of Times Word Appears",
ylabel="Number of Unique Words")
###Output
_____no_output_____
###Markdown
In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful?
###Code
import pandas as pd # output the results to a dataframe
word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency
word_count_df["word"] = list(word_count.keys())
word_count_df["frequency"] = list(word_count.values())
word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error
###Output
_____no_output_____
###Markdown
Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions
###Code
# get the top stopwords
word_count_df.sort_values(by=["frequency"], ascending=False).head(5)
import re
SAMPLE_TWEET = '''
#wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from
#google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts"
'''
# create a dataframe version of Dickens' novel
dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r"), columns=["line"])
dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "")
###Output
_____no_output_____
###Markdown
Match the first time a capital letter appears in the tweet
###Code
match = re.search("[A-Z]", SAMPLE_TWEET)
match.group()
###Output
_____no_output_____
###Markdown
Match all capital letters that appears in the tweet
###Code
# re
re.findall("[A-Z]", SAMPLE_TWEET)
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])')
###Output
_____no_output_____
###Markdown
Match all words that are at least 3 characters long
###Code
# re
re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})')
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})')
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text?
###Code
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
# re
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
text = re.sub(r'(a|A)', '', text)
text
text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor."
re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix!
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', case=False)
dickens_text_df.head(5)
###Output
_____no_output_____
###Markdown
Removing Stopwords Using Regex
###Code
# re
text = re.sub('(the|The)', '', text, flags=re.IGNORECASE)
text
# pandas
dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False)
dickens_text_df.head()
###Output
_____no_output_____ |
issa/assignment.ipynb | ###Markdown
Cluster Assignments
###Code
def cluster_assignment(cluster_res, data_name='user_id'):
"""
Converts the dictionary containing user_id and user_cluster assignment
to a pandas DataFrame.
cluster_res : dictionary
Result from clustering function with keys being the
user_id and values their cluster membership
col : string
Column name of the user or item
Returns
-------
result : pandas DataFrame
Two columns representing the user/item and their
corresponding cluster assignments
"""
import pandas as pd
if data_name == 'user_id':
cluster_name = 'ucluster'
else:
cluster_name = 'icluster'
c_assignment = pd.DataFrame(list(cluster_res.items()),
columns=[data_name, cluster_name])
c_assignment.set_index(data_name, inplace=True)
return c_assignment
uc_assignment = cluster_assignment(y_u, data_name='user_id')
ic_assignment = cluster_assignment(y_i, data_name='item_id')
ic_assignment
###Output
_____no_output_____
###Markdown
Unit Test
###Code
import unittest
import pandas as pd
from pandas._testing import assert_frame_equal
class Test_cluster_assign(unittest.TestCase):
def test_cluster_assignment(self):
dict_cluster_i = {0: 2, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1, 6: 2, 7: 1, 8: 3, 9: 3}
dict_cluster_u = {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 2, 6: 2, 7: 3, 8: 1, 9: 2}
df_ex_u = pd.DataFrame(list(dict_cluster_u.items()), columns=['user_id', 'ucluster'])
df_ex_u.set_index('user_id', inplace=True)
df_ex_i = pd.DataFrame(list(dict_cluster_i.items()), columns=['item_id', 'icluster'])
df_ex_i.set_index('item_id', inplace=True)
df_assignment_u = cluster_assignment(dict_cluster_u, data_name='user_id')
df_assignment_i = cluster_assignment(dict_cluster_i, data_name='item_id')
assert_frame_equal(df_ex_u, df_assignment_u)
assert_frame_equal(df_ex_i, df_assignment_i)
unittest.main(argv=[''], verbosity=2, exit=False)
###Output
test_cluster_assignment (__main__.Test_cluster_assign) ... ok
----------------------------------------------------------------------
Ran 1 test in 0.010s
OK
|
day5/task2.ipynb | ###Markdown
Assignment 2: Improving the discretized solutionIn this exercise you will make several improvements to the cakeeating code in the lecture 7, part 2, to make the solution evenmore accurate. Task 1. Base solutionCopy the version of the cake eating code (with discretized choices)from the lecture slides and modify if needed to to ensure:- There are two separate grids for state $ W_t $ and choice $ c_t $. - The grids for states and choices are initialized at the time the object is created, and do not change when the Bellman equation is solved. Debug the code and produce the convergence plots as in the lecture.
###Code
# write your code here
# come up with a test of your own
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from scipy import interpolate
class cake_discretized():
def __init__(self,beta=.9, Wbar=10, ngrid_state=50, ngrid_choice=100):
self.beta = beta # Discount factor
self.Wbar = Wbar # Upper bound on cake size
self.ngrid_state = ngrid_state # Number of grid points for the size of cake
self.ngrid_choice = ngrid_choice # Number of grid points for how much of cake to consume
self.epsilon = np.finfo(float).eps # smallest positive float number
self.grid_state = np.linspace(self.epsilon,Wbar,ngrid_state) # grid for state space
self.grid_choice = np.linspace(self.epsilon,Wbar,ngrid_choice) # grid for decision space
self.interpolation = 'linear' # interpolation type for Bellman equation
self.choice_bound = False # impose the state bound on the trial values of choice or not
def bellman(self,V0):
#Bellman operator, V0 is one-dim vector of values on grid
matW = np.repeat(np.reshape(self.grid_state,(1,-1)),self.ngrid_choice,0) # matrix with state space repeated in rows
c = np.repeat(np.reshape(self.grid_choice,(-1,1)),self.ngrid_state,1) # decisions grid repeated by columns
if self.choice_bound: c *= np.reshape(self.grid_state,(1,-1)) /self.Wbar # normalize max choice to current wealth
matWpr = matW-c # size of cake in the next period
matWpr[matWpr==0] = self.epsilon # add small quantity to avoid log(0)
mask = matWpr>0 # mask off infeasible choices
# interpolation kind
if self.interpolation=='linear':
interfunc = interpolate.interp1d(self.grid_state,V0,kind='slinear',fill_value="extrapolate")
elif self.interpolation=='quadratic':
interfunc = interpolate.interp1d(self.grid_state,V0,kind='quadratic',fill_value="extrapolate")
elif self.interpolation=='cubic':
interfunc = interpolate.interp1d(self.grid_state,V0,kind='cubic',fill_value="extrapolate")
elif self.interpolation=='polynomial':
p = np.polynomial.polynomial.polyfit(self.grid_state,V0,self.ngrid_state-1)
interfunc = lambda x: np.polynomial.polynomial.polyval(x,p)
else:
print('Unknown interpolation type')
return None
# INPERPOLATE values of next period value at next period case sizes
matV1 = interfunc(matWpr)
preV1 = np.full((self.ngrid_choice,self.ngrid_state),-np.inf) # init V with -inf
preV1[mask] = np.log(c[mask]) + self.beta*matV1[mask] # maximand of the Bellman equation
V1 = np.amax(preV1,0,keepdims=False) # maximum in every column
c1 = c[np.argmax(preV1,axis=0),range(self.ngrid_state)] # choose the max attaining levels of c
return V1, c1
def solve(self, maxiter=1000, tol=1e-4, callback=None, interpolation='linear', choice_bound = False):
'''Solves the model using successive approximations'''
self.interpolation = interpolation # update solver settings
self.choice_bound = choice_bound
V0=np.log(self.grid_state) # on first iteration assume consuming everything
for iter in range(maxiter):
V1,c1=self.bellman(V0)
if callback: callback(iter,self.grid_state,V1,c1) # callback for making plots
if np.all(abs(V1-V0) < tol):
break
V0=V1
else: # when i went up to maxiter
print('No convergence: maximum number of iterations achieved!')
return V1,c1
m = cake_discretized(beta=0.92,Wbar=10,ngrid_state=50,ngrid_choice=50)
V,c = m.solve()
###Output
_____no_output_____
###Markdown
Task 2. Accuracy measureModify the function that compares the numerical solution to the analytical sothat it outputs a measure of accuracy equal to the average of squared deviations overa fixed dense grid. Also provide an argument to disable the plot for Task 5.
###Code
# write your code here
def accuracy(model,V=None,policy=None,title='',npoints=1000,plot=True):
'''Check the cake eating numerical solution against the analytic solution'''
# analytic solution
aV = lambda w: np.log(w)/(1 - model.beta) + np.log(1 - model.beta)/(1 - model.beta) + model.beta* np.log(model.beta)/((1 - model.beta)**2)
aP = lambda w: (1 - model.beta) * w
if 'cake_ongrid' in str(type(model)):
grid = model.grid
else:
grid = model.grid_state
# solve if needed
if V is None or policy is None:
V,policy = model.solve()
# accuracy measure
xd = np.linspace(grid[0],grid[-1],npoints) # dense grid for accuracy measure
ac = ((aV(xd)-np.interp(xd,grid,V))**2).mean()
# make plots
if plot:
fig1, (ax1,ax2) = plt.subplots(1,2,figsize=(14,8))
ax1.grid(b=True, which='both', color='0.65', linestyle='-')
ax2.grid(b=True, which='both', color='0.65', linestyle='-')
ax1.set_title('Value functions')
ax2.set_title('Policy functionas')
ax1.set_xlabel('Cake size, W')
ax2.set_xlabel('Cake size, W')
ax1.set_ylabel('Value function')
ax2.set_ylabel('Policy function')
ax1.plot(grid[1:],V[1:],linewidth=1.5,label='Numerical')
ax1.plot(grid[1:],aV(grid[1:]),linewidth=1.5,label='Analytical')
ax2.plot(grid,policy,linewidth=1.5,label='Numerical')
ax2.plot(grid,aP(grid),linewidth=1.5,label='Analytical')
ax1.legend()
ax2.legend()
fig1.suptitle(title)
plt.show()
return ac
ac0=accuracy(m,V=V,policy=c)
print('Accuracy of the basic solution is',ac0)
###Output
_____no_output_____
###Markdown
Task 3. Bounding the choice gridModify the Bellman equation code to make the choice discretization grid dependenton the point of the state space where it is applied to, namely make the grid withthe same number of points going from $ 0 $ to $ \vec{W}_j $ when solvingat the point $ \vec{W}_j $, instead of from $ 0 $ to $ \bar{W} $.Repeat the accuracy check in Task 2 with the new specification.
###Code
# write your code here
V,c = m.solve(choice_bound=True)
ac1=accuracy(m,V=V,policy=c)
print('Accuracy of the method with bound on choices is',ac1,'instead of',ac0)
###Output
_____no_output_____
###Markdown
Task 4. Improving interpolation methodWe could utilize more advanced interpolation schemes for the value function itself.Replace linear interpolation of the value function with quadratic and cubic splines, and approximatingpolynomials.Compare the accuracy of the new two versions to the original solution and the solutionwith the improvement from task 3.What is the most accurate solution algorithm?
###Code
# write your code here
for knd in 'linear','quadratic','cubic','polynomial':
V,c = m.solve(choice_bound=True,interpolation=knd)
ac=accuracy(m,V=V,policy=c)
print('Accuracy with '+knd+' interpolation is',ac)
###Output
_____no_output_____
###Markdown
Task 5. Convergence to true solutionMake a plot of the accuracy measure as function of number of grid points (assumingthe number of grid points on choice grid is 2 times that of the states) for each of the fourinterpolation schemes, with and without the bounding of the choicesOn the separate axes plot the same curve under the assumption that the number of gridpoints on the choice grid is 10 time larger than the state grid, for each of the 4interpolation schemes, with and without the bounding of the choicesWhat is the best way to improve the accuracy of the solution?
###Code
# write your code here
fig1, (ax1,ax2) = plt.subplots(1,2,figsize=(14,8))
fig1.suptitle('Same grids for states and choices')
ax1.set_title('Basic solver')
ax2.set_title('With bounding of the choices')
ax1.set_xlabel('Number of grid points')
ax2.set_xlabel('Number of grid points')
ax1.set_ylabel('Accuracy')
ax2.set_ylabel('Accuracy')
grids = np.arange(50,551,100,dtype='int')
for knd in 'linear','quadratic','cubic','polynomial':
line = np.empty(grids.size)
for i in range(grids.size):
K = grids[i]
m = cake_discretized(beta=0.92,Wbar=10,ngrid_state=K,ngrid_choice=2*K)
V,c = m.solve(choice_bound=False,interpolation=knd)
line[i]=accuracy(m,V=V,policy=c,plot=False)
print('.',end='')
ax1.plot(grids,line,label=knd+' interpolation')
print('|',end='')
for i in range(grids.size):
K = grids[i]
m = cake_discretized(beta=0.92,Wbar=10,ngrid_state=K,ngrid_choice=2*K)
V,c = m.solve(choice_bound=True,interpolation=knd)
line[i]=accuracy(m,V=V,policy=c,plot=False)
print('.',end='')
ax2.plot(grids,line,label=knd+' interpolation')
print('|',end='')
ax1.legend()
ax2.legend()
plt.show()
###Output
_____no_output_____ |
data-wrangling/DS_Data_Munging.ipynb | ###Markdown
Data Munging Relational DataThe simplest type of data we have see might consist a single table with a some columns and some rows. This sort of data is easy to analyze and compute and we generally want to reduce our data to a single table before we start running machine learning algorithms. Yet, real world data doesn't necessarily fit into this paradigm. Most real world data is messy and complicated which doesn't fit well into a tabular format and we will have to do some work to reduce this complexity. Additionally, in many case we can reduce our memory cost by not keeping data in a single table, but instead in a set of data structures with defined relations between them. Here we will explore a bit of data and see how combining different sets of data can help us generate useful features.First we need some data. We will make use of some data from Wikipedia and we will use the pandas `read_html` function to scrape the data from a particular webpage. We will study the top 10 companies in the Fortune Global 500 which conveniently have [their own Wikipedia page](https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446).We will download the data in tabular form, but work with it as a list of dictionaries, this will allow us to get used to working with unstructured data.
###Code
import pandas as pd
import json
df = pd.read_html('https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446', header=0)[0]
fortune_500 = json.loads(df.to_json(orient="records"))
df
###Output
_____no_output_____
###Markdown
Lets look at the data.
###Code
fortune_500
###Output
_____no_output_____
###Markdown
This is a great start to our analysis, however, there really isn't that much information here, we will need to bring in additional data sources to get any further understanding of these companies.The first question we might want to ask is how many employees does it take to get that revenue, in other words, what is the revenue per employee? Luckily, we can use Wikipedia to get that data as well, we have scraped this data manually (all from Wikipedia) and created the following dictionary.
###Code
other_data = [
{"name": "Walmart",
"employees": 2300000,
"year founded": 1962
},
{"name": "State Grid Corporation of China",
"employees": 927839,
"year founded": 2002},
{"name": "China Petrochemical Corporation",
"employees":358571,
"year founded": 1998
},
{"name": "China National Petroleum Corporation",
"employees": 1636532,
"year founded": 1988},
{"name": "Toyota Motor Corporation",
"employees": 364445,
"year founded": 1937},
{"name": "Volkswagen AG",
"employees": 642292,
"year founded": 1937},
{"name": "Royal Dutch Shell",
"employees": 92000,
"year founded": 1907},
{"name": "Berkshire Hathaway Inc.",
"employees":377000,
"year founded": 1839},
{"name": "Apple Inc.",
"employees": 123000,
"year founded": 1976},
{"name": "Exxon Mobile Corporation",
"employees": 69600,
"year founded": 1999},
{"name": "BP plc",
"employees": 74000,
"year founded": 1908}
]
###Output
_____no_output_____
###Markdown
Some data have a slightly different name than in our original set, so we will keep a dictionary of mappings between the two. Notice, we only include the mapping in the dictionary if there is a difference.
###Code
mapping = {
'Apple': 'Apple Inc.',
'BP': 'BP plc',
'Berkshire Hathaway': 'Berkshire Hathaway Inc.',
'China National Petroleum': 'China National Petroleum Corporation',
'Exxon Mobil': 'Exxon Mobile Corporation',
'Sinopec Group': 'China Petrochemical Corporation',
'State Grid': 'State Grid Corporation of China',
'Toyota Motor': 'Toyota Motor Corporation',
'Volkswagen': 'Volkswagen AG'
}
###Output
_____no_output_____
###Markdown
This data is one to one, meaning the data contained in one source only aligns with a single element in the other source, thus we should be able to put these together. However, we know that the data isn't in a great form to be joined at the moment. This is for two reasons1. All the names will not align (we need to use our mapping)2. The `list` structure is not optimized for looking through elements. While for 10 elements the second reason won't really matter, for larger data sets such performance considerations are extremely important. We can turn this list of dictionaries into a dictionary of dictionaries, so we can quickly access each element of the data.
###Code
dict_data = {k["name"] : k for k in other_data}
dict_data
###Output
_____no_output_____
###Markdown
**Question:** If we had many entries in `other_data`, we could display a small piece by printing `other_data[:5]`. With dataframes we might use `df.head()`. Can you think of a way to print out a small piece of a dictionary? Now we can easily compute the revenue per employee, we need to map the "Company" value in our original data with the "name" column of this other data, but we also need to use the mapping to ensure the columns will line up. We in general don't want to mutate our original data, so lets make a new list of dictionaries with this new feature (revenue per employee). On the course of doing this, we will need to handle converting some numbers like `$500 Billion` to a numeric value. Lets create a function to do this.
###Code
def convert_revenue(x):
return float(x.lstrip('$').rstrip('billion')) * 1e9
assert convert_revenue('$500 billion') == 500e9
###Output
_____no_output_____
###Markdown
Now we should be able to create a few functions to compute this revenue per employee and create a data list.
###Code
def rev_per_emp(company):
name = company[u'Company']
n_employees = dict_data[mapping.get(name, name)].get('employees')
company['rev per emp'] = convert_revenue(company[u'Revenue in USD'])/n_employees
return company
def compute_copy(d, func):
return func({k:v for k,v in d.items()})
data = list(map(lambda x : compute_copy(x, rev_per_emp), fortune_500))
###Output
_____no_output_____
###Markdown
Lets take a look at our new data and also the old data to ensure we didn't mutate anything.
###Code
data[:2]
fortune_500[:2]
###Output
_____no_output_____
###Markdown
Now we can sort these values. We first can select out on the elements we care about and then sort that list.
###Code
rev_per_emp = sorted([(i[u'Company'], i['rev per emp']) for i in data],
key=lambda x : x[1],
reverse=True)
rev_per_emp
###Output
_____no_output_____
###Markdown
This results in a much different order. What does this tell us about the companies?Now lets pull in some other data (this is data science, more data is always better!). We can see that these companies are in a few different industries, let find out which ones.
###Code
from collections import Counter
Counter(i[u'Industry'] for i in data)
###Output
_____no_output_____
###Markdown
One thing we might want to know is what sort of market share they have of the specific industry to which they belong. Let's look at the two industries that categorize the 6 of the top 10, `Automobiles` and `Petroleum`. We can select only those elements of our data to work with.
###Code
sub_data = [i for i in data if i[u'Industry'] in [u'Automobiles', u'Petroleum']]
sub_data
###Output
_____no_output_____
###Markdown
It might be the case that the each particular category has a different relevant metric for market share. For example, we could look at total revenue for a car company or we could look at cars produced. So for the automobile industry we will look at the percent total of cars produced. We can get this data again from Wikipedia.
###Code
df_list = pd.read_html("https://en.wikipedia.org/w/index.php?title=Automotive_industry&oldid=875776152", header=0)
car_totals = json.loads(df_list[0].to_json(orient="records"))
car_by_man = json.loads(df_list[2].to_json(orient='records'))
car_totals[:2]
car_by_man[:2]
###Output
_____no_output_____
###Markdown
Now lets get only the groups we care about and divide by the total production which we will take as the latest year.
###Code
total_prod = sorted((i[u"Year"], i[u'Production']) for i in car_totals)[-1][1]
total_prod
###Output
_____no_output_____
###Markdown
Now we can find the market share for each of the car companies. We will keep track of a market share dictionary. We will again need to keep track of some slight name differences.
###Code
car_by_man_dict = {i[u'Group']:i[u'Vehicles'] for i in car_by_man}
market_share = {}
for name, orig_name in zip(['Toyota', 'Volkswagen Group'], ['Toyota', 'Volkswagen']):
market_share[orig_name] = car_by_man_dict[name]/ float(total_prod)
market_share
###Output
_____no_output_____
###Markdown
Now we can do the same for the Petroleum industry, but in this case, lets compute the market share by revenue. On Wikipedia, we can find a list of oil companies by revenue. Although its not a complete list, it has enough companies that we don't expect the companies left off the list to contribute greatly to our analysis.
###Code
rev = pd.read_html("https://en.wikipedia.org/w/index.php?title=List_of_largest_oil_and_gas_companies_by_revenue&oldid=871711850", header=1)[0]
rev = rev.iloc[:, 1:3]
rev.columns = ['Company', 'Revenue']
rev = rev[~(rev['Company'] == 'Company name')]
oil_data = json.loads(rev.to_json(orient="records"))
oil_data[:2]
###Output
_____no_output_____
###Markdown
Now we can compute the totals and market share. Since the data here might be slightly different (perhaps older) than our original data, we will compute the market share of each company within this data set, then pull out the numbers we care about.
###Code
total = sum([float(i[u'Revenue'].rstrip('*')) for i in oil_data])
shares = {i[u'Company']:float(i[u'Revenue'].rstrip('*'))/total for i in oil_data}
print(total)
###Output
_____no_output_____
###Markdown
Now we can pull out the companies we care about in the petroleum industry.
###Code
petro_companies = [i[u'Company'] for i in data if i['Industry'] == u'Petroleum']
petro_companies
###Output
_____no_output_____
###Markdown
Lets check if these are all in the our shares dictionary.
###Code
[(i, i in shares) for i in petro_companies]
###Output
_____no_output_____
###Markdown
Some of these companies are directly there, and looking through our dictionary, we can see the others are there without exact names.
###Code
shares.keys()
###Output
_____no_output_____
###Markdown
So lets make a fuzzy match, this will be a pretty simple one where it will try to match words in a name and take the maximum number of matches.
###Code
def fuzzy_match(word, s):
words = set(word.split(' '))
overlaps = [(k, len(v.intersection(words))) for k, v in s.items()]
return max(overlaps, key=lambda x : x[1])[0]
split_names = {i: set(i.split(' ')) for i in shares.keys()}
for i in petro_companies:
match = fuzzy_match(i, split_names)
print("matched {} to {}".format(i, match))
market_share[i] = shares[match]
market_share
###Output
_____no_output_____
###Markdown
By industryWe have some nice examples of data munging, now lets see an example of keeping data in a relational fashion. Lets say we want to add another feature which is the growth of each industry. If we were to store this data as a single quantity, we would be saving a bunch of extra information, we would be much better off extracting this information and keeping it in a single table so we are not replicating by industry. With PandasNow we can also perform these same computations with Pandas, lets see how this compares.
###Code
df = pd.read_html('https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446', header=0)[0]
df
df['rev'] = df['Revenue in USD'].apply(convert_revenue)
df['employees'] = df['Company'].apply(lambda x : dict_data[mapping.get(x, x)].get('employees'))
df['rev_per_employee'] = df['rev'] / df['employees'].astype(float)
df.sort_values(by='rev_per_employee', ascending=False)
df_list = pd.read_html("https://en.wikipedia.org/w/index.php?title=Automotive_industry&oldid=875776152", header=0)
df_totals = df_list[0]
df_by_man = df_list[2]
total_prod = df_totals.sort_values(by='Year').iloc[-1]['Production']
total_prod
df_by_man['share'] = df_by_man['Vehicles'].astype(float) / total_prod
market_share = df_by_man.set_index('Group')['share'][['Toyota', 'Volkswagen Group']]
market_share
rev = pd.read_html("https://en.wikipedia.org/w/index.php?title=List_of_largest_oil_and_gas_companies_by_revenue&oldid=871711850", header=1)[0]
rev = rev.iloc[:, 1:3]
rev.columns = ['Company', 'Revenue']
rev = rev[~(rev['Company'] == 'Company name')]
rev
rev['rev_clean'] = rev['Revenue'].apply(lambda x : float(x.rstrip('*')))
total = rev['rev_clean'].sum()
total
rev['share'] = rev['rev_clean'] / total
rev
rev = rev[rev['Company'].isin(['Exxon Mobil', 'Sinopec', 'China National Petroleum Corporation', 'Royal Dutch Shell'])].copy()
rev
# do fuzzy search
split_names = {i: set(i.split(' ')) for i in df['Company']}
def fuzzy(word):
return fuzzy_match(word, split_names)
rev['name'] = rev['Company'].apply(fuzzy)
rev
ms2 = df.merge(rev[['share', 'name']], left_on='Company', right_on='name')
###Output
_____no_output_____
###Markdown
Now we want to put these together and get only the company and the market share.
###Code
ms = market_share.reset_index()[['Group','share']]
ms.columns = ['Company', 'share']
pd.concat([ms, ms2[['Company', 'share']]])
###Output
_____no_output_____ |
Movie_Notebook.ipynb | ###Markdown
data cleaningThe main data cleaning task is related with missing values. The typical reasons why data are missing can be someone who forgot to fill in a field, transferring them from a legacy database, a programming error or simply a user chosen not to fill a field tied to his belief about the interpretation and use of the data. These sources are just simple random mistakes. In our dataset there are some unknown filled values. We know that Pandas will recognise “NA” as a missing value, but what about the others? If there’s multiple users manually entering data, then this is a common problem. Maybe I like to use “n/a” but you like to use “unknown”. We will also drop columns that are not used like, US DVD Sales, MPAA Rating , etc. We will then turn the "Worldwide Gross" column into a numeric type so pandas can make a histogram out of it. We will also remove all NaN rows and re-index the dataframe. This is done specifically for computing the p-value thought the stats.pearsonr method that does not support NaNs and infinities.
###Code
import matplotlib.pyplot as plt
import numpy.ma as ma
import pandas as pd
import numpy as np
import math
from scipy import stats
%matplotlib inline
missing_values = ["n/a", "na", "--","unknown","Unknown","0"]
movie_data = pd.read_csv("movies.csv",na_values = missing_values)
to_drop =['US DVD Sales',
'MPAA Rating',
'Running Time (min)',
'Distributor',
'Source',
'Creative Type']
new_names = {'Worldwide Gross': 'w_gross',
'Production Budget':'p_budget',
'Major Genre': 'm_genre',
'IMDB Votes':'imdb_votes',
'IMDB Rating':'imdb_rating',
'Rotten Tomatoes Rating': 'tomatoes',
'US Gross': 'us_gross',
'Release Date': 'r_date'}
#drop unnecessary columns
movie_data.drop(to_drop, inplace=True, axis=1)
#remove all NaN rows and re-index the dataframe
#this is done specifically for computing the p-value through the stats.pearsonr method
#that does not support NaNs and infinities.
movie_data = movie_data.dropna()
movie_data = movie_data.reset_index(drop=True)
#rename columns, some commands cant handle variables with spaces
movie_data.rename(columns=new_names, inplace=True)
#transform strings to numeric
movie_data["w_gross"] = pd.to_numeric(movie_data["w_gross"])
###Output
_____no_output_____
###Markdown
plotting the histogramsBy plotting the first graphs we can see that there is a clear correlation between the Worldwide Gross and the number of IMDB Votes as they clearly follow a geometric distribution.
###Code
movie_data.plot.hist(y='w_gross')
movie_data.plot.hist(y='imdb_votes')
movie_data.plot.hist(y='tomatoes')
###Output
_____no_output_____
###Markdown
The "Rotten tomatoes rating" somehow resembles a uniform distribution with a peak and at the value 90 and a bottom at value 10. The "IMDB rating" follows a left-skewed distribution. The mean is to the left of the peak. This is the main definition behind “skewness”, which is technically a measure of the distribution of values around the mean.
###Code
movie_data.plot.hist(y='imdb_rating')
###Output
_____no_output_____
###Markdown
counting the number of movies for every major genreIts clear that the most popular genres are drama and comedy.
###Code
# Count unique values in column 'm_genre' of the dataframe
genres_count_values = movie_data['m_genre'].value_counts()
# Value table
print(genres_count_values)
# Make a pandas dataframe out of the pandas series
genres_count_dataframe = pd.DataFrame(genres_count_values)
# Plot the graph
genres_count_dataframe.plot.bar()
###Output
Drama 392
Comedy 310
Action 230
Adventure 134
Thriller/Suspense 127
Horror 71
Romantic Comedy 68
Musical 21
Western 20
Black Comedy 19
Documentary 7
Concert/Performance 1
Name: m_genre, dtype: int64
###Markdown
log-log plot We will define some bins with exponential increasing size. We will use the numpy.logspace that returns an evenly space logarithmic sequence of numbers. Using the min() function we can find where the minimum value lies and with log(max)/log(min) where the values will end. That is enough for the logspace input to generate exponential numbers.
###Code
min_value_imdb_votes = movie_data['imdb_votes'].min()
max_value_imdb_votes = movie_data['imdb_votes'].max()
print('minimum value of the imdb_votes:'+str(min_value_imdb_votes))
print('maximum value of the imdb_votes:'+str(max_value_imdb_votes))
#generate sequence of exponentially increasing numbers
product = min_value_imdb_votes
exponential_bins = [product]
while product < max_value_imdb_votes:
exponential_bins.append(product)
product = product*2
exponential_bins = list(exponential_bins)
print('Exponential increasing bins to include the data range:',exponential_bins)
plt.plot(exponential_bins)
plt.show()
#empty list to hold the mean values
mean_values = []
#length of the list that holds the bin ranges
bin_ranges_list_length = len(exponential_bins)
#find the mean values of the bins
for i,obj in enumerate(exponential_bins):
if i < (bin_ranges_list_length - 1):
current = exponential_bins[i]
next_ = exponential_bins[i + 1]
mean_value_of_the_bin = ((current+next_)/2)
mean_values.append(int(mean_value_of_the_bin))
exponential_bins = list(exponential_bins)
mean_values = list(mean_values)
print('Mean values:',mean_values)
#mean values as input for bucket limits
plt.hist(movie_data['imdb_votes'], bins=exponential_bins)
plt.show()
#in a log-log scale
plt.hist(movie_data['imdb_votes'], bins=exponential_bins)
plt.yscale('log')
plt.xscale('log')
plt.xticks(mean_values,[np.log(i) for i in mean_values], rotation='vertical')
plt.show()
###Output
minimum value of the imdb_votes:33.0
maximum value of the imdb_votes:519541.0
Exponential increasing bins to include the data range: [33.0, 33.0, 66.0, 132.0, 264.0, 528.0, 1056.0, 2112.0, 4224.0, 8448.0, 16896.0, 33792.0, 67584.0, 135168.0, 270336.0]
###Markdown
doing the same for the w_gross column
###Code
min_value_w_gross = movie_data['w_gross'].min()
max_value_w_gross = movie_data['w_gross'].max()
print('minimum value of the w_gross:'+str(min_value_w_gross))
print('maximum value of the w_gross:'+str(max_value_w_gross))
#generate sequence of exponentially increasing numbers
product = min_value_w_gross
exponential_bins = [product]
while product < max_value_w_gross:
exponential_bins.append(product)
product = product*3
exponential_bins = list(exponential_bins)
print('Exponential increasing bins to include the data range:',exponential_bins)
plt.plot(exponential_bins)
plt.show()
#empty list to hold the mean values
mean_values = []
#length of the list that holds the bin ranges
bin_ranges_list_length = len(exponential_bins)
#find the mean values of the bins
for i,obj in enumerate(exponential_bins):
if i < (bin_ranges_list_length - 1):
current = exponential_bins[i]
next_ = exponential_bins[i + 1]
mean_value_of_the_bin = ((current+next_)/2)
mean_values.append(int(mean_value_of_the_bin))
mean_values = list(mean_values)
print('Mean values:',mean_values)
#mean values as input for bucket limits
plt.hist(movie_data['w_gross'], bins=mean_values)
plt.show()
#in a log-log scale
plt.hist(movie_data['w_gross'], bins=mean_values)
plt.yscale('log')
plt.xscale('log')
plt.xticks(mean_values,[np.log(i) for i in mean_values], rotation='vertical')
plt.show()
###Output
minimum value of the w_gross:20987.0
maximum value of the w_gross:2767891499.0
Exponential increasing bins to include the data range: [20987.0, 20987.0, 62961.0, 188883.0, 566649.0, 1699947.0, 5099841.0, 15299523.0, 45898569.0, 137695707.0, 413087121.0, 1239261363.0]
###Markdown
scatter plot of w_gross and imdb_votes
###Code
fig = plt.figure()
ax = plt.gca()
ax.plot(movie_data['w_gross'] ,movie_data['imdb_votes'] ,'o', c='red', alpha=0.05, markeredgecolor='none')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('gross')
ax.set_ylabel('imdb votes')
###Output
_____no_output_____
###Markdown
We can see that there is a direct correlation between those two columns. As the worldwide gross gets higher the IMDB votes also get higher. attributes correlation
###Code
fig, ax = plt.subplots(3, 2)
#increase figure size
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
ax[0, 0].plot(movie_data['w_gross'] ,movie_data['tomatoes'] ,'o', c='red', alpha=0.05, markeredgecolor='none')
ax[0, 0].set_xlabel('Worldwide Gross')
ax[0, 0].set_ylabel('Rotten Tomatoes Rating')
ax[0, 0].set_xscale('log')
ax[0, 0].set_yscale('log')
ax[1, 0].plot(movie_data['w_gross'] ,movie_data['imdb_rating'] ,'o', c='red', alpha=0.05, markeredgecolor='none')
ax[1, 0].set_xlabel('Worldwide Gross')
ax[1, 0].set_ylabel('IMDB Rating')
ax[1, 0].set_xscale('log')
ax[1, 0].set_yscale('log')
ax[2, 0].plot(movie_data['w_gross'] ,movie_data['imdb_votes'] ,'o', c='red', alpha=0.05, markeredgecolor='none')
ax[2, 0].set_xlabel('Worldwide Gross')
ax[2, 0].set_ylabel('IMDB Votes')
ax[2, 0].set_xscale('log')
ax[2, 0].set_yscale('log')
ax[0, 1].plot(movie_data['tomatoes'] ,movie_data['imdb_votes'] ,'o', c='red', alpha=0.05, markeredgecolor='none')
ax[0, 1].set_xlabel('Rotten Tomatoes Rating')
ax[0, 1].set_ylabel('IMDB Votes')
ax[0, 1].set_xscale('log')
ax[0, 1].set_yscale('log')
ax[1, 1].plot(movie_data['tomatoes'] ,movie_data['imdb_rating'] ,'o', c='red', alpha=0.05, markeredgecolor='none')
ax[1, 1].set_xlabel('Rotten Tomatoes Rating')
ax[1, 1].set_ylabel('IMDB Rating')
ax[0, 1].set_xscale('log')
ax[0, 1].set_yscale('log')
ax[2, 1].plot(movie_data['imdb_votes'] ,movie_data['imdb_rating'] ,'o', c='red', alpha=0.05, markeredgecolor='none')
ax[2, 1].set_xlabel('IMDB Votes')
ax[2, 1].set_ylabel('IMDB Rating')
ax[2, 1].set_xscale('log')
ax[2, 1].set_yscale('log')
plt.show()
###Output
_____no_output_____
###Markdown
1) As the worldwide gross get close to 10^8 the rotten tomatoes and imdb rating improves. 2) There is a clear correlation between the worldwide gross and the number of imdb votes. 3) There is a clear correlation between the imdb votes and the imdb rating, as the one gets higher the other one gets too. 4) The same as above applies between the rotten tomatoes rating and the imdb rating.5) When the imdb votes number gets higher so does the rotten tomatoes rating does. Pearson corelationFor example w_gross with US Gross have a corellation of 0.939742.Lower values mean less correlation
###Code
print(movie_data.corr(method='pearson'))
###Output
_____no_output_____
###Markdown
Spearman corelation
###Code
print(movie_data.corr(method='spearman'))
numeric_columns = ['us_gross','w_gross','p_budget','tomatoes','imdb_rating','imdb_votes']
corr = []
p_values = pd.DataFrame() # Matrix of p-values
for x in movie_data.columns:
for y in movie_data.columns:
#return pearson correlation coefficient and p-value for testing non-correlation
if(x in numeric_columns and y in numeric_columns):
corr = stats.pearsonr(movie_data[x], movie_data[y])
p_values.loc[x,y] = corr[1]
print(p_values)
###Output
_____no_output_____
###Markdown
We know from a previous calculation that the least popular genres are the following: Musical 21 Western 20 Black Comedy 19 Documentary 7 Concert/Performance 1 Bar plots with ErrorsAt first we will choose the desired confidence interval.The most commonly used confidence levels are 90 percent, 95 percent and 99 percent. Then we will calculate the margin error Za/2 * σ/√(n). Where Za/2 the the confidence coefficient, a the confidence level, σ the standard deviation and n the sample size. To find the critical value, or Za/2 with a confidence level of 95% we will convert the percentage to a decimal, .95, and divide it by 2 to get .475. Then, we will check out the z table to find the corresponding value that goes with .475. We can see that the closest value is 1.96, at the intersection of row 1.9 and the column of .06The final interval of the error is x̅ ± Za/2 * σ/√(n) where x̅ is the mean value
###Code
unique_genres = movie_data['m_genre'].unique()
not_popular_genres = ['Musical','Western','Black Comedy','Documentary','Concert/Performance']
popular_genres = list(filter(lambda x: x not in not_popular_genres, unique_genres))
#find the mean, count and std of the values
mean_values_dataframe = pd.DataFrame(movie_data.groupby('m_genre', as_index=False)['w_gross'].agg([np.mean, 'count', np.std]))
#reset the index from aggregating the mean, count and standard deviation
mean_values_dataframe = mean_values_dataframe.reset_index()
#drop unnecessary rows according to unpopular genres
mean_values_dataframe = mean_values_dataframe[~mean_values_dataframe['m_genre'].isin(not_popular_genres)]
print(mean_values_dataframe)
labels = popular_genres
x_pos = np.arange(len(labels))
mean_values = []
std_values = []
lower_error_list = []
upper_error_list = []
#for 95% confidence
confidence_value = 1.96
for x in popular_genres:
row = mean_values_dataframe.loc[mean_values_dataframe['m_genre'] == x]
mean = row.iloc[0]['mean']
std = row.iloc[0]['std']
sample_size = row.iloc[0]['count']
mean_values.append(mean)
std_values.append(std)
lower = mean - confidence_value*(std/math.sqrt(sample_size))
upper = mean + confidence_value*(std/math.sqrt(sample_size))
lower_error_list.append(lower)
upper_error_list.append(upper)
error_bars = [lower_error_list,upper_error_list]
print("\nError bars values")
print("------------------------------")
print("0 is the lower, 1 is the upper")
print(pd.DataFrame(error_bars))
fig, ax = plt.subplots()
#yerr takes as input two a two size list that contains the upper and lower bounds of the error
ax.bar(x_pos, mean_values,
yerr=error_bars,
align='center',
alpha=0.5,
ecolor='black',
capsize=10)
ax.set_ylabel('Mean value')
ax.set_xticks(x_pos)
ax.set_xticklabels(labels,rotation='vertical')
ax.yaxis.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
T-testingThe t score is a ratio between the difference between two groups and the difference within the groups. The larger the t score, the more difference there is between groups. The smaller the t score, the more similarity there is between groups. When runninG a t-test, the bigger the t-value, the more likely it is that the results are repeatable. Every t-value has a p-value to go with it. A p-value is the probability that the results from the sample data occurred by chance. P-values are from 0% to 100%. They are usually written as a decimal. For example, a p value of 5% is 0.05. Low p-values are good. They indicate that the data did not occur by chance. For example, a p-value of .01 means there is only a 1% probability that the results of the experiment happened by chance. In most cases, a p-value of 0.05 (5%) is accepted to mean the data is valid.
###Code
for x,obj in enumerate(popular_genres):
first_dataframe = movie_data[movie_data["m_genre"].str.contains(popular_genres[x])]
first_array = first_dataframe['w_gross']
for y,objj in enumerate(popular_genres):
second_dataframe = movie_data[movie_data["m_genre"].str.contains(popular_genres[y])]
second_array = second_dataframe['w_gross']
t2, p2 = stats.ttest_ind(first_array.to_numpy(),second_array.to_numpy(),equal_var=False)
print("--------------Comparing pairs ---------------")
print(popular_genres[x]," t = " + str(t2))
print(popular_genres[y]," p = " + str(p2))
###Output
_____no_output_____
###Markdown
They stoped making good movies anymore!(data ratings say thats true, but i think were biased over the 'old things are good' and nostalgia)
###Code
movie_data['r_date'] = pd.to_datetime(movie_data['r_date'])
#groupby year and take the mean values
a1 = pd.DataFrame(movie_data.groupby(movie_data['r_date'].dt.strftime('%Y'))['imdb_rating'].mean())
a2 = pd.DataFrame(movie_data.groupby(movie_data['r_date'].dt.strftime('%Y'))['tomatoes'].mean())
y_averages = (a1.join(a2)).reset_index()
print(y_averages)
###Output
_____no_output_____
###Markdown
It looks like movies in the future are going to be better!We can see that values after 2010 are from the future. We're not going to need them. Also values before 1970 have sparse data for every decade. We will keep the years from 1970 to 2010.
###Code
y_averages = y_averages.astype({"r_date": int})
y_averages = y_averages.drop(y_averages[(y_averages.r_date < 1970) | (y_averages.r_date > 2010)].index)
y_averages = y_averages.reset_index(drop=True)
y_averages = y_averages.set_index('r_date')
print(y_averages)
###Output
_____no_output_____
###Markdown
The cleaned series have no year in-between missing so we can computer the average decades by averaging every 10 rows into one.
###Code
y_averages_10_yr = y_averages.set_index(np.arange(len(y_averages)) // 10).mean(level=0)
print(y_averages_10_yr)
print('\ntomatoes graph')
y_averages_10_yr['tomatoes'].plot.line()
plt.show()
print('imdb graph')
y_averages_10_yr['imdb_rating'].plot.line()
###Output
_____no_output_____
###Markdown
As we expected we have 4 decades from 1970 to 2010. According to the IMDB the rating of every decade has fallen about 0.72 in 4 decades. Thats not a big difference but according to the rotten tomatoes movie index the rating has fallen for about 30 units or 3 units in the IMDB scale. For the IMDB the numerical rating is based off of votes from users on a 1-10 star scale. The ratings are then normalized using a bayesian filtering formula that strips out "outlier" overly negative or positive ratings since IMDB has to deal with "spam" votes with it's open registration system. Rotten Tomatoes also offers user-voting, the "Tomatometer", the primary rating metric is a measure of the number of "FRESH" reviews as a percentage of overall reviews. The reviews counted into the Tomatometer is from a discrete list of selected critics/publications that remains uniform across the whole site (usually professional, but always prescreened based off of specific criteria including the requirement to have reviewed at least 100 films over the recent two years). With the saying that "they don’t make such good movies anymore" users are showing that they don’t agree with this according to their votes but the much more "professional" critics of the tomato index verify it according to our dataset. Make your own question about the dataWhich of the top directors made the most profit? And If the amount of movies they make gets bigger does the profit rises? By saying top we can define it as the directors that made more than 10 movies.
###Code
directors = movie_data['Director'].unique()
director_counts = pd.DataFrame(movie_data['Director'].value_counts())
#find the mean, count and std of the values
director_values = pd.DataFrame(movie_data.groupby('Director')['w_gross'].agg([np.mean, 'count', np.sum]))
#reset the index from aggregating the mean, count and standard deviation
director_values = director_values.reset_index()
money_per_movie = []
#iterate over rows
for index, row in director_values.iterrows():
money_per_movie.append(row['sum']/row['count'])
#add column to the dataframe
director_values['money_per_movie'] = money_per_movie
#drop all directors that made less than ten movies
director_values = director_values.astype({"count": int})
director_values = director_values.drop(director_values[director_values['count'] <= 10].index)
director_values = director_values.reset_index(drop=True)
#sort them by the money they made
director_values = director_values.sort_values(by=['money_per_movie'], ascending=False)
print(director_values)
###Output
Director mean count sum money_per_movie
8 Steven Spielberg 3.382371e+08 17 5.750030e+09 3.382371e+08
6 Robert Zemeckis 3.199067e+08 11 3.518973e+09 3.199067e+08
5 Ridley Scott 1.843963e+08 12 2.212755e+09 1.843963e+08
4 Richard Donner 1.281672e+08 11 1.409839e+09 1.281672e+08
2 Joel Schumacher 1.181810e+08 12 1.418172e+09 1.181810e+08
3 Martin Scorsese 1.001857e+08 15 1.502785e+09 1.001857e+08
1 Clint Eastwood 9.065373e+07 11 9.971910e+08 9.065373e+07
0 Brian De Palma 4.127582e+07 11 4.540340e+08 4.127582e+07
9 Woody Allen 3.027158e+07 14 4.238022e+08 3.027158e+07
7 Spike Lee 2.898311e+07 15 4.347467e+08 2.898311e+07
|
01.getting-started/10.register-model-create-image-deploy-service/10.register-model-create-image-deploy-service.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
###Code
from azureml.core.model import Model
import sklearn
library_version = "sklearn"+sklearn.__version__.replace(".","x")
model = Model.register(model_path = "sklearn_regression_model.pkl",
model_name = "sklearn_regression_model.pkl",
tags = {'area': "diabetes", 'type': "regression", 'version': library_version},
description = "Ridge regression model to predict diabetes",
workspace = ws)
###Output
_____no_output_____
###Markdown
You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers.
###Code
regression_models = Model.list(workspace=ws, tags=['area'])
for m in regression_models:
print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
###Output
_____no_output_____
###Markdown
You can pick a specific model to deploy
###Code
print(model.name, model.description, model.version, sep = '\t')
###Output
_____no_output_____
###Markdown
Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models.
###Code
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script="score.py",
conda_file="myenv.yml",
tags = {'area': "diabetes", 'type': "regression"},
description = "Image with ridge regression model")
image = Image.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
List images by tag and find out the detailed build log for debugging.
###Code
for i in Image.list(workspace = ws,tags = ["area"]):
print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
###Output
_____no_output_____
###Markdown
Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "diabetes", 'type': "regression"},
description = 'Predict diabetes using regression model')
from azureml.core.webservice import Webservice
aci_service_name = 'my-aci-service-2'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Test web service Call the web service with some dummy input data to get a prediction.
###Code
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aci_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Delete ACI to clean up
###Code
aci_service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
###Code
from azureml.core.model import Model
import sklearn
library_version = "sklearn"+sklearn.__version__.replace(".","x")
model = Model.register(model_path = "sklearn_regression_model.pkl",
model_name = "sklearn_regression_model.pkl",
tags = {'area': "diabetes", 'type': "regression", 'version': library_version},
description = "Ridge regression model to predict diabetes",
workspace = ws)
###Output
_____no_output_____
###Markdown
You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers.
###Code
regression_models = Model.list(workspace=ws, tags=['area'])
for m in regression_models:
print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
###Output
_____no_output_____
###Markdown
You can pick a specific model to deploy
###Code
print(model.name, model.description, model.version, sep = '\t')
###Output
_____no_output_____
###Markdown
Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
# you can return any datatype as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models.
###Code
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script="score.py",
conda_file="myenv.yml",
tags = {'area': "diabetes", 'type': "regression"},
description = "Image with ridge regression model")
image = Image.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
List images by tag and find out the detailed build log for debugging.
###Code
for i in Image.list(workspace = ws,tags = ["area"]):
print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
###Output
_____no_output_____
###Markdown
Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "diabetes", 'type': "regression"},
description = 'Predict diabetes using regression model')
from azureml.core.webservice import Webservice
aci_service_name = 'my-aci-service-2'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Test web service Call the web service with some dummy input data to get a prediction.
###Code
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aci_service.run(input_data=test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Delete ACI to clean up
###Code
aci_service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
###Code
from azureml.core.model import Model
import sklearn
library_version = "sklearn"+sklearn.__version__.replace(".","x")
model = Model.register(model_path = "sklearn_regression_model.pkl",
model_name = "sklearn_regression_model.pkl",
tags = {'area': "diabetes", 'type': "regression", 'version': library_version},
description = "Ridge regression model to predict diabetes",
workspace = ws)
###Output
_____no_output_____
###Markdown
You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers.
###Code
regression_models = ws.models(tags=['area'])
for name, m in regression_models.items():
print("Name:", name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
###Output
_____no_output_____
###Markdown
You can pick a specific model to deploy
###Code
print(model.name, model.description, model.version, sep = '\t')
###Output
_____no_output_____
###Markdown
Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models.
###Code
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script="score.py",
conda_file="myenv.yml",
tags = {'area': "diabetes", 'type': "regression"},
description = "Image with ridge regression model")
image = Image.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
List images by tag and find out the detailed build log for debugging.
###Code
for i in Image.list(workspace = ws,tags = ["area"]):
print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
###Output
_____no_output_____
###Markdown
Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "diabetes", 'type': "regression"},
description = 'Predict diabetes using regression model')
from azureml.core.webservice import Webservice
aci_service_name = 'my-aci-service-2'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Test web service Call the web service with some dummy input data to get a prediction.
###Code
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aci_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Delete ACI to clean up
###Code
aci_service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
###Code
from azureml.core.model import Model
import sklearn
library_version = "sklearn"+sklearn.__version__.replace(".","x")
model = Model.register(model_path = "sklearn_regression_model.pkl",
model_name = "sklearn_regression_model.pkl",
tags = {'area': "diabetes", 'type': "regression", 'version': library_version},
description = "Ridge regression model to predict diabetes",
workspace = ws)
###Output
_____no_output_____
###Markdown
You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers.
###Code
regression_models = Model.list(workspace=ws, tags=['area'])
for m in regression_models:
print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
###Output
_____no_output_____
###Markdown
You can pick a specific model to deploy
###Code
print(model.name, model.description, model.version, sep = '\t')
###Output
_____no_output_____
###Markdown
Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
# you can return any datatype as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models.
###Code
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script="score.py",
conda_file="myenv.yml",
tags = {'area': "diabetes", 'type': "regression"},
description = "Image with ridge regression model")
image = Image.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
List images by tag and find out the detailed build log for debugging.
###Code
for i in Image.list(workspace = ws,tags = ["area"]):
print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
###Output
_____no_output_____
###Markdown
Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "diabetes", 'type': "regression"},
description = 'Predict diabetes using regression model')
from azureml.core.webservice import Webservice
aci_service_name = 'my-aci-service-2'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Test web service Call the web service with some dummy input data to get a prediction.
###Code
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aci_service.run(input_data=test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Delete ACI to clean up
###Code
aci_service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
###Code
from azureml.core.model import Model
import sklearn
library_version = "sklearn"+sklearn.__version__.replace(".","x")
model = Model.register(model_path = "sklearn_regression_model.pkl",
model_name = "sklearn_regression_model.pkl",
tags = {'area': "diabetes", 'type': "regression", 'version': library_version},
description = "Ridge regression model to predict diabetes",
workspace = ws)
###Output
_____no_output_____
###Markdown
You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers.
###Code
regression_models = Model.list(tags=['area'])
for m in regression_models:
print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
###Output
_____no_output_____
###Markdown
You can pick a specific model to deploy
###Code
print(model.name, model.description, model.version, sep = '\t')
###Output
_____no_output_____
###Markdown
Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models.
###Code
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script="score.py",
conda_file="myenv.yml",
tags = {'area': "diabetes", 'type': "regression"},
description = "Image with ridge regression model")
image = Image.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
List images by tag and find out the detailed build log for debugging.
###Code
for i in Image.list(workspace = ws,tags = ["area"]):
print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
###Output
_____no_output_____
###Markdown
Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "diabetes", 'type': "regression"},
description = 'Predict diabetes using regression model')
from azureml.core.webservice import Webservice
aci_service_name = 'my-aci-service-2'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Test web service Call the web service with some dummy input data to get a prediction.
###Code
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aci_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Delete ACI to clean up
###Code
aci_service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
###Code
from azureml.core.model import Model
import sklearn
library_version = "sklearn"+sklearn.__version__.replace(".","x")
model = Model.register(model_path = "sklearn_regression_model.pkl",
model_name = "sklearn_regression_model.pkl",
tags = {'area': "diabetes", 'type': "regression", 'version': library_version},
description = "Ridge regression model to predict diabetes",
workspace = ws)
###Output
_____no_output_____
###Markdown
You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers.
###Code
regression_models = ws.models(tags=['area'])
for name, m in regression_models.items():
print("Name:", name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
###Output
_____no_output_____
###Markdown
You can pick a specific model to deploy
###Code
print(model.name, model.description, model.version, sep = '\t')
###Output
_____no_output_____
###Markdown
Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models.
###Code
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script="score.py",
conda_file="myenv.yml",
tags = {'area': "diabetes", 'type': "regression"},
description = "Image with ridge regression model")
image = Image.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
List images by tag and find out the detailed build log for debugging.
###Code
for i in Image.list(workspace = ws,tags = ["area"]):
print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
###Output
_____no_output_____
###Markdown
Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "diabetes", 'type': "regression"},
description = 'Predict diabetes using regression model')
from azureml.core.webservice import Webservice
aci_service_name = 'my-aci-service-2'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Test web service Call the web service with some dummy input data to get a prediction.
###Code
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aci_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Delete ACI to clean up
###Code
aci_service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
###Code
from azureml.core.model import Model
import sklearn
library_version = "sklearn"+sklearn.__version__.replace(".","x")
model = Model.register(model_path = "sklearn_regression_model.pkl",
model_name = "sklearn_regression_model.pkl",
tags = {'area': "diabetes", 'type': "regression", 'version': library_version},
description = "Ridge regression model to predict diabetes",
workspace = ws)
###Output
_____no_output_____
###Markdown
You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers.
###Code
regression_models = Model.list(workspace=ws, tags=['area'])
for m in regression_models:
print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
###Output
_____no_output_____
###Markdown
You can pick a specific model to deploy
###Code
print(model.name, model.description, model.version, sep = '\t')
###Output
_____no_output_____
###Markdown
Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
myenv.add_pip_package("pynacl==1.2.1")
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models.
###Code
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script="score.py",
conda_file="myenv.yml",
tags = {'area': "diabetes", 'type': "regression"},
description = "Image with ridge regression model")
image = Image.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
List images by tag and find out the detailed build log for debugging.
###Code
for i in Image.list(workspace = ws,tags = ["area"]):
print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri))
###Output
_____no_output_____
###Markdown
Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "diabetes", 'type': "regression"},
description = 'Predict diabetes using regression model')
from azureml.core.webservice import Webservice
aci_service_name = 'my-aci-service-2'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Test web service Call the web service with some dummy input data to get a prediction.
###Code
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aci_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Delete ACI to clean up
###Code
aci_service.delete()
###Output
_____no_output_____ |
statistics/pvalues_analysis-from-CWoLa.ipynb | ###Markdown
Calculation of p-valuesThis notebook is for generating figures 8, 13, 14 of arXiv:1805.02664 Import and initialize some functions
###Code
from scipy.optimize import minimize
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.stats import poisson, norm, kstest
import numdifftools
from numpy.linalg import inv
import matplotlib.gridspec as gridspec
sigaeloss_bb = np.load('../data_strings/sigae_2prong_loss_bb1.npy')
sigae3ploss_bb = np.load('../data_strings/sigae_3prong_loss_bb1.npy')
#sigaeloss_bkg = np.load('../data/sigaeloss_bkg.npy')
mass_bb = np.load('../data/mass_bb1.npy')
#mass_bkg = np.load('../data/mass_bkg.npy')
bkgaeloss_bb = np.load('../data_strings/bkgaeloss_bb1.npy')
#bkgaeloss_bkg = np.load('../data/bkgaeloss_bkg.npy')
#sigaeloss_bb = np.load('../data_strings/sigae_2prong_loss_bb1.npy')
#sigae3ploss_bb = np.load('../data_strings/sigae_3prong_loss_bb1.npy')
bkgae0loss_bb = np.load('../data_strings/bkgae_rndbkg_loss_bb1.npy')
bkgae1loss_bb = np.load('../data_strings/bkgae_purebkg_loss_bb1.npy')
import pandas as pd
f_bb = pd.read_hdf('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5')
dt = f_bb.values
dt.shape
f_bb.columns
correct = (dt[:,3]>0) &(dt[:,19]>0)
dt = dt[correct]
mass_bb = mass_bb[correct]
mass_bb.shape
dt.shape
sigaeloss_bb.shape
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
correct = (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
dt = dt[correct]
mass_bb = mass_bb[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
bins = np.linspace(-2,2,101)
plt.hist(dt[:,23],bins,alpha=0.2,color='r');
#plt.hist(dt[bkg_idx,14],bins,alpha=0.2,color='b');
cuts = {'bb1':[10, 10, 1.3, 1.3],'bb2':[0.4,0.4, .7], 'bb3':[3,3,1.8]}
#'bb1':[8, 8, 2.1]
#'bb1':[1000000, 10000, 2.1]
#'bb2':[0.4,0.4, .7]
#'bb3':[3,3,1.76]
############### Black BOX 1 Default CUT ##############
#sigae_wp = 10000
#bkgae_wp = 2.1
sigae_wp, sigae3p_wp, bkgae0_wp, bkgae1_wp = cuts['bb1']
print(sigae_wp, sigae3p_wp, bkgae_wp)
print(sigaeloss_bb)
bkgae0loss_bb.shape
index_bb = np.where((bkgae0loss_bb>bkgae0_wp)&(bkgae1loss_bb>bkgae1_wp)&(sigaeloss_bb<sigae_wp)&(sigae3ploss_bb<sigae3p_wp)&(dt[:,14]>0.85)&(dt[:,18]>0.9))[0]
#index_bkg = np.where((bkgaeloss_bkg>bkgae_wp)&(sigaeloss_bkg<sigae_wp))[0]
#index_bb = np.where((dt[:,18]>0.9))[0]
#print(len(index_bb),len(index_bkg))
print(len(index_bb))
index_bb.shape
#### Without Scalefactor
#PLOT FOR BLACK BOX 1 with REALNVP
plt.style.use('ggplot')
bins = np.linspace(3000,6900,27)
#print(bins)
#bkg_hist = plt.hist(mass_bkg[index_bkg],bins=bins,alpha=0.3,color='r',label='background');
obs_hist = plt.hist(mass_bb[index_bb],bins=bins,alpha=0.3,color='b',label='Blackbox2')
plt.xlabel(r'$m_{JJ}$ [GeV]')
plt.ylabel('Number of events')
plt.legend(loc='upper right')
plt.title('$m_{JJ}$ without SF')
plt.axvline(x=3823)
plt.show()
index_bb = np.where((bkgaeloss_bb>bkgae_wp)&(sigaeloss_bb<sigae_wp)&(sigae3ploss_bb<sigae3p_wp)&(dt[:,14]>0.85)&(dt[:,18]>0.9)&(dt[:,0]>3700)&(dt[:,0]<3900))[0]
index_bb.shape
#### Without Scalefactor
#PLOT FOR BLACK BOX 1 with REALNVP
plt.style.use('ggplot')
bins = np.linspace(0,2000,27)
#print(bins)
#bkg_hist = plt.hist(mass_bkg[index_bkg],bins=bins,alpha=0.3,color='r',label='background');
obs_hist = plt.hist(dt[index_bb,3],bins=bins,alpha=0.3,color='b',label='Blackbox2')
plt.xlabel(r'$m_{JJ}$ [GeV]')
plt.ylabel('Number of events')
plt.legend(loc='upper right')
plt.title('$m_{JJ}$ without SF')
plt.axvline(x=732)
plt.show()
obs_hist
#datasets_nosig = bkg_hist[0]
datasets_sig = obs_hist[0]
datasets_sig = [102., 91., 94., 72., 66., 56., 51., 42., 36., 27., 30.,
24., 19., 29., 11., 11., 14., 8., 6., 6., 4., 6.,
9., 3., 3., 2., 2., 0., 0., 1., 1., 0.]
bins = [3050, 3200, 3350, 3500, 3650, 3800, 3950, 4100, 4250, 4400, 4550,
4700, 4850, 5000, 5150, 5300, 5450, 5600, 5750, 5900, 6050, 6200,
6350, 6500, 6650, 6800, 6950, 7100, 7250, 7400, 7550, 7700, 7850]
import ROOT as r
xlow, xhigh = 2800,7000
BINS = (xhigh-xlow)/100
BINS = int(BINS)
roothist_obs = r.TH1F('data_obs','data_obs',BINS,xlow,xhigh)
for i in range(0,BINS):
roothist_obs.SetBinContent(i+1,obs_hist[0][i])
f = r.TFile.Open("blackbox1.root",'recreate')
roothist_obs.Write()
f.Close()
f = r.TFile.Open("blackbox1.root",'read')
f.ls()
f.Close()
c11 = r.TCanvas("myCanvasName","The Canvas Title",800,600)
roothist_obs.Draw()
c11.Draw()
#filenames_nosig = ["../data/finalscan_nosignal/0_005_" + str(i) + "_bincounts.dat" for i in range(3,12)]
#filenames_sig = ["../data/finalscan_signal/sig_bin" + str(i) + "_bincounts.dat" for i in range(3,12)]
#datasets_nosig = np.array([np.loadtxt(filename) for filename in filenames_nosig])
#datasets_sig = np.array([np.loadtxt(filename) for filename in filenames_sig])
def get_p_value(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,return_teststat = False,plotsys=True,myax=None):
ydata = np.array(ydata)
#Assume poisson is gaussian with N+1 variance
if not yerr:
yerr = np.sqrt(ydata+1)
else:
yerr=np.array(yerr)
def fit_func(x,p1,p2,p3):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
xi = 0.
y = x/13000.
return p1*(1.-y)**(p2-xi*p3)*y**-p3
xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)])
xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)])
#Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins
ydata = np.array(ydata) * 100 / xwidths
yerr = np.array(yerr)*100/ np.array(xwidths)
#Least square fit, masking out the signal region
popt, pcov = curve_fit(fit_func, np.delete(xdata,mask), np.delete(ydata,mask),sigma=np.delete(yerr,mask),maxfev=10000)
if verbose:
print('fit params: ', popt)
ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata])
#Check that the function is a good fit to the sideband
residuals = np.delete((ydata - ydata_fit)/yerr,mask)
if verbose > 0:
print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf))
print(residuals)
print(((ydata - ydata_fit)/yerr)[mask])
print('\n')
#The following code is used to get the bin errors by propagating the errors on the fit params
def fit_func_array(parr):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
p1, p2, p3 = parr
xi = 0.
return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata])
jac=numdifftools.core.Jacobian(fit_func_array)
x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T)
#For plot, take systematic error band as the diagonal of the covariance matrix
y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)])
if (plotfile != None) & (plotfile != 'ax'):
if plotsys:
plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4)
yerr2 = np.array(yerr)
yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8
plt.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4)
plt.plot(xdata, ydata_fit, 'r--', label='data')
plt.yscale('log', nonposy='clip')
if plotfile == 'ax':
if plotsys:
myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4)
yerr2 = np.array(yerr)
yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8
myax.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4)
myax.plot(xdata, ydata_fit, 'r--', label='data')
myax.set_yscale('log', nonposy='clip')
if plotfile == 'show':
plt.show()
elif plotfile:
plt.savefig(plotfile)
#Now, let's compute some statistics.
# Will use asymptotic formulae for p0 from Cowan et al arXiv:1007.1727
# and systematics procedure from https://cds.cern.ch/record/2242860/files/NOTE2017_001.pdf
#First get systematics in the signal region
#This function returns array of signal predictions in the signal region
def signal_fit_func_array(parr):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
p1, p2, p3 = parr
xi = 0.
return np.array([np.sum([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3*xwidths[mask[i]]/100 for i, x in enumerate(xdata[mask])])])
#Get covariance matrix of prediction uncertainties in the signal region
jac=numdifftools.core.Jacobian(signal_fit_func_array)
x_signal_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T)
#Inverse signal region covariance matrix:
inv_x_signal_cov = inv(x_signal_cov)
#Get observed and predicted event counts in the signal region
obs = np.array([np.sum(np.array(ydata)[mask]*np.array(xwidths)[mask]/100)])
expected = np.array([np.sum([fit_func(xdata[targetbin],popt[0],popt[1],popt[2])*xwidths[targetbin]/100 for targetbin in mask])])
#Negative numerator of log likelihood ratio, for signal rate mu = 0
def min_log_numerator(expected_nuis_arr):
#expected_nuis_arr is the array of systematic background uncertainty nuisance parameters
#These are event rate densities
expected_nuis_arr = np.array(expected_nuis_arr)
to_return = 0
#Poisson terms
for i, expected_nuis in enumerate(expected_nuis_arr):
#Poisson lambda. Have to rescale nuisance constribution by bin width
my_lambda = expected[i]+expected_nuis_arr[i]
#Prevent negative predicted rates
if my_lambda < 10**-10:
my_lambda = 10**-10
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda)
#Gaussian nuisance term
nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr)
to_return = to_return + nuisance_term
return -to_return
def jac_min_log_numerator(expected_nuis_arr):
#expected_nuis_arr is the array of systematic background uncertainty nuisance parameters
#These are event rate densities
expected_nuis_arr = np.array(expected_nuis_arr)
to_return = np.array([0.])
#Poisson terms
#Poisson lambda. Have to rescale nuisance constribution by bin width
my_lambda = expected+expected_nuis_arr
dmy_lambda = np.array([1.])
#Prevent negative predicted rates
my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10
dmy_lambda[my_lambda < 10**-10] = 0
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return = to_return + (obs*dmy_lambda/my_lambda - dmy_lambda)
#Gaussian nuisance term
nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr)
to_return = to_return + nuisance_term
return -to_return
#Initialization of nuisance params
expected_nuis_array_init = [0.02]
#shift log likelihood to heklp minimization algo
def rescaled_min_log_numerator(expected_nuis_arr):
return min_log_numerator(expected_nuis_arr) - min_log_numerator(expected_nuis_array_init)
#Perform minimization over nuisance parameters. Set bounds for bg nuisance at around 8 sigma.
bnds=[[-8*y_unc[mask[0]],8*y_unc[mask[0]]]]
minimize_log_numerator = minimize(rescaled_min_log_numerator,
expected_nuis_array_init,
jac=jac_min_log_numerator,
bounds=bnds)
if verbose:
print("numerator: ", minimize_log_numerator.items(),'\n')
#Now get likelihood ratio denominator
def min_log_denom(nuis_arr):
#nuis_arr contains the bg systematics and also the signal rate
expected_nuis_arr = np.array(nuis_arr)[:1]
#print(expected_nuis_arr)
mu = nuis_arr[1]
#Signal prediction
pred = [mu]
to_return = 0
#Poisson terms
for i, expected_nuis in enumerate(expected_nuis_arr):
#Poisson lambda
my_lambda = expected[i]+expected_nuis_arr[i] + pred[i]
#Prevent prediction from going negative
if my_lambda < 10**-10:
my_lambda = 10**-10
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda)
#Gaussian nuisance term
nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr)
to_return = to_return + nuisance_term
return -to_return
def jac_min_log_denom(nuis_arr):
#expected_nuis_arr is the array of systematic background uncertainty nuisance parameters
#These are event rate densities
expected_nuis_arr = np.array(nuis_arr)[:1]
mu = nuis_arr[1]
pred = [mu]
to_return_first = np.array([0.])
#Poisson terms
#Poisson lambda. Have to rescale nuisance constribution by bin width
my_lambda = expected+expected_nuis_arr+pred
dmy_lambda = np.array([1.])
#Prevent prediction from going negative
my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10
dmy_lambda[my_lambda < 10**-10] = 0
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return_first = to_return_first + (obs*dmy_lambda/my_lambda - dmy_lambda)
#Gaussian nuisance term
nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr)
to_return_first = to_return_first + nuisance_term
to_return_last = np.array([0.])
dpred = np.array([[1.]])
my_lambda = expected+expected_nuis_arr+pred
dmy_lambda = dpred
to_return_last = np.dot((obs/my_lambda),dmy_lambda.T) - np.sum(dmy_lambda,axis=1)
return -np.append(to_return_first, to_return_last)
#initizalization for minimization
nuis_array_init = [0.01,1.]
#Shift log likelihood for helping minimization algo.
def rescaled_min_log_denom(nuis_arr):
return min_log_denom(nuis_arr) - min_log_denom(nuis_array_init)
bnds = ((None,None),(None,None))
minimize_log_denominator = minimize(rescaled_min_log_denom,nuis_array_init,
jac=jac_min_log_denom,
bounds=bnds)
if verbose:
print("Denominator: ", minimize_log_denominator.items(),'\n')
if minimize_log_denominator.x[-1] < 0:
Zval = 0
neglognum = 0
neglogden = 0
else:
neglognum = min_log_numerator(minimize_log_numerator.x)
neglogden = min_log_denom(minimize_log_denominator.x)
Zval = np.sqrt(2*(neglognum - neglogden))
p0 = 1-norm.cdf(Zval)
if verbose:
print("z = ", Zval)
print("p0 = ", p0)
#plt.title(str(p0))
# if plotfile == 'show':
# plt.show()
# elif plotfile:
# plt.savefig(plotfile)
if return_teststat:
return p0, 2*(neglognum - neglogden)
else:
return p0
def add_mjjplot(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,plotsys=True,myax=None,plotfit=True):
ydata = np.array(ydata)
#Assume poisson is gaussian with N+1 variance
if not yerr:
yerr = np.sqrt(ydata+1)
else:
yerr=np.array(yerr)
def fit_func(x,p1,p2,p3):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
xi = 0.
y = x/13000.
return p1*(1.-y)**(p2-xi*p3)*y**-p3
xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)])
xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)])
#Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins
ydata = np.array(ydata) * 100 / xwidths
yerr = np.array(yerr)*100/ np.array(xwidths)
#Least square fit, masking out the signal region
popt, pcov = curve_fit(fit_func, np.delete(xdata,mask),
np.delete(ydata,mask),
sigma=np.delete(yerr,mask),maxfev=10000)
if verbose:
print('fit params: ', popt)
ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata])
#Check that the function is a good fit to the sideband
residuals = np.delete((ydata - ydata_fit)/yerr,mask)
if verbose > 0:
print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf))
print(residuals)
print(((ydata - ydata_fit)/yerr)[mask])
print('\n')
#The following code is used to get the bin errors by propagating the errors on the fit params
def fit_func_array(parr):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
p1, p2, p3 = parr
xi = 0.
return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata])
jac=numdifftools.core.Jacobian(fit_func_array)
x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T)
#For plot, take systematic error band as the diagonal of the covariance matrix
y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)])
if (plotfile != None) & (plotfile != 'ax'):
if plotfit:
if plotsys:
plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4)
plt.plot(xdata, ydata_fit, 'r--', label='data')
plt.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4)
plt.yscale('log', nonposy='clip')
if plotfile == 'ax':
if plotfit:
if plotsys:
myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4)
myax.plot(xdata, ydata_fit, 'r--', label='data')
myax.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4)
myax.set_yscale('log', nonposy='clip')
if plotfile == 'show':
plt.show()
elif plotfile:
plt.savefig(plotfile)
###Output
_____no_output_____
###Markdown
Define the binning
###Code
binvals = bins
#binvals = [#1900.,2001.,2107.,2219.,2337.,2461.,2592.,2730.,
# 2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.,4500,4700,4900,5100,5300,5500,5700,5900,6100]
bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
len(bincenters)
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(1,len(binvals)-2)]
print(masks)
###Output
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28], [27, 28, 29], [28, 29, 30], [29, 30, 31]]
###Markdown
Calculate and plot p-values for mass scanFigure 8, right
###Code
pvalues_sig = [get_p_value(datasets_sig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)]
pvalues_nosig = [get_p_value(datasets_nosig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)]
plt.plot(bincenters[:-2],pvalues_nosig)
plt.semilogy()
f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
plt.ylabel(r'$p_0$',fontsize=fontsize)
plt.title(r'No signal')
#plt.title(r'With signal')
# plt.xlabel(r'$m_{JJ}$')
#plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
plt.show()
fontsize=22
smfontsize=16
plt.plot(bincenters[:-2],pvalues_sig)
plt.semilogy()
#plt.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
plt.ylabel(r'$p_0$',fontsize=fontsize)
#plt.title(r'No signal')
plt.title(r'With signal')
#plt.axvline(x=3823,color='b')
# plt.xlabel(r'$m_{JJ}$')
#plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
dashes = [5,5]
color='0.5'
linewidth=1.2
for sigma in range(2,6):
plt.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth)
if sigma > 2:
sigmastring = r'$' + str(sigma) + '\sigma$'
plt.text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize)
plt.show()
fontsize=22
smfontsize=16
#binvals = [#1900.,
# 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.]
#bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
bincenters = bincenters[3:18]
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)]
plt.close('all')
f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5))
# plt.figure(figsize=(5,5))
linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}]
choices = [0,1,4,6]
colors = ['black']
colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)])
# colors = ['black']
# colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)])
effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"]
dashes = [5,5]
color='0.5'
linewidth=1.2
for ax in axarr:
for sigma in range(1,8):
ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth)
if sigma > 2:
sigmastring = r'$' + str(sigma) + '\sigma$'
axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize)
for eff_i, effchoice in enumerate(choices):
pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_nosig)]
pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_sig)]
# plt.subplot(1,2,1)
axarr[0].plot(bincenters,pvalues_nosig,
color=colors[eff_i],
**linestyles[eff_i])
#plt.subplot(1,2,2)
axarr[1].plot(bincenters,pvalues_sig,
color=colors[eff_i],
**linestyles[eff_i])
if eff_i > 0:
axarr[1].annotate(effs[eff_i],fontsize=smfontsize,
xy=(bincenters[4],pvalues_sig[4]), xycoords='data',
xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
# axarr[1].annotate("Test",
# xy=(3000, 10**-7), xycoords='data',
# xytext=(3200, 10**-8), textcoords='data',
# arrowprops=dict(arrowstyle="->", #linestyle="dashed",
# color="0.0",
# #patchB=el,
# shrinkB=5,
# connectionstyle="arc3,rad=0.3",
# ),
# )
f.subplots_adjust(wspace=0)
#plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False)
plt.semilogy()
f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize)
axarr[0].set_title(r'No signal')
axarr[1].set_title(r'With signal')
# plt.xlabel(r'$m_{JJ}$')
plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
plt.show()
fontsize=22
smfontsize=16
#binvals = [#1900.,
# 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.]
#bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
bincenters = bincenters[3:18]
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)]
plt.close('all')
f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5))
# plt.figure(figsize=(5,5))
linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}]
choices = [0,1,4,6]
colors = ['black']
colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)])
# colors = ['black']
# colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)])
effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"]
dashes = [5,5]
color='0.5'
linewidth=1.2
for ax in axarr:
for sigma in range(1,8):
ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth)
if sigma > 2:
sigmastring = r'$' + str(sigma) + '\sigma$'
axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize)
for eff_i, effchoice in enumerate(choices):
pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_nosig[:,effchoice])]
pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_sig[:,effchoice])]
# plt.subplot(1,2,1)
axarr[0].plot(bincenters,pvalues_nosig,
color=colors[eff_i],
**linestyles[eff_i])
#plt.subplot(1,2,2)
axarr[1].plot(bincenters,pvalues_sig,
color=colors[eff_i],
**linestyles[eff_i])
if eff_i > 0:
axarr[1].annotate(effs[eff_i],fontsize=smfontsize,
xy=(bincenters[4],pvalues_sig[4]), xycoords='data',
xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
# axarr[1].annotate("Test",
# xy=(3000, 10**-7), xycoords='data',
# xytext=(3200, 10**-8), textcoords='data',
# arrowprops=dict(arrowstyle="->", #linestyle="dashed",
# color="0.0",
# #patchB=el,
# shrinkB=5,
# connectionstyle="arc3,rad=0.3",
# ),
# )
f.subplots_adjust(wspace=0)
#plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False)
plt.semilogy()
f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize)
axarr[0].set_title(r'No signal')
axarr[1].set_title(r'With signal')
# plt.xlabel(r'$m_{JJ}$')
plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
Figure 8
###Code
import matplotlib.gridspec as gridspec
fontsize=20
smfontsize=16
binvals = [#1900.,
2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.]
bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
bincenters = bincenters[3:12]
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,12)]
plt.close('all')
fig = plt.figure(figsize=(16,5))
outer = gridspec.GridSpec(1,2,wspace=0.3,width_ratios=[5,5])
plt_i = 0
inner = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=outer[0])
ax = plt.Subplot(fig, inner[0])
ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post',
color='palegoldenrod',alpha=1.)
for line_i in [4,6,9,11]:
ax.axvline(binvals[line_i],color='0.4',dashes=[5,3])
signallabel_y=0.15*10**6
ax.annotate("",xy=(binvals[6],signallabel_y), xytext = (binvals[9],signallabel_y),
arrowprops=dict(arrowstyle='<->'))
ax.text(0.5*(binvals[6] + binvals[9]),signallabel_y,r"Signal"'\n'r"region",va='center',ha='center',fontsize=smfontsize,
bbox=dict(facecolor='white',edgecolor='none', alpha=1.0))
sidebandlabel_y=0.8*10**-1
ax.annotate("",xy=(binvals[4],sidebandlabel_y), xytext = (binvals[6],sidebandlabel_y),
arrowprops=dict(arrowstyle='<->'))
ax.annotate("",xy=(binvals[9],sidebandlabel_y), xytext = (binvals[11],sidebandlabel_y),
arrowprops=dict(arrowstyle='<->'))
ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[9] + binvals[11]),sidebandlabel_y),
fontsize = smfontsize,
color='white',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=-0.2",
),)
ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[4] + binvals[6]),sidebandlabel_y),
fontsize = smfontsize,
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=-0.3",
),)
chosen_set = [0,1,4,6,9]
[get_p_value(datasets_sig[4,i,1:],binvals,mask=[6,7,8],verbose=0,plotfile='ax',myax=ax)
for i in chosen_set]
ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize)
ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize)
ax.set_ylim([2*10**-2,10**6])
ax.set_xlim([2001,4350])
fig.add_subplot(ax)
inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer[1], wspace=0)
axarr = [plt.Subplot(fig, inner[0]),plt.Subplot(fig, inner[1])]
dashes = [5,5]
color='0.5'
linewidth=1.2
linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}]
choices = [0,1,4,6]
colors = ['black']
colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)])
# colors = ['black']
# colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)])
effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"]
for ax in axarr:
ax.set_yscale('log')
ax.set_ylim([2.*10**-14,1])
for sigma in range(1,8):
ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth)
if sigma > 2:
sigmastring = r'$' + str(sigma) + '\sigma$'
axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize)
for eff_i, effchoice in enumerate(choices):
pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_nosig[:,effchoice])]
pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_sig[:,effchoice])]
axarr[0].plot(bincenters,pvalues_nosig,
color=colors[eff_i],
**linestyles[eff_i])
axarr[1].plot(bincenters,pvalues_sig,
color=colors[eff_i],
**linestyles[eff_i])
if eff_i > 0:
axarr[1].annotate(effs[eff_i],fontsize=smfontsize,
xy=(bincenters[4],pvalues_sig[4]), xycoords='data',
xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
# axarr[1].annotate("Test",
# xy=(3000, 10**-7), xycoords='data',
# xytext=(3200, 10**-8), textcoords='data',
# arrowprops=dict(arrowstyle="->", #linestyle="dashed",
# color="0.0",
# #patchB=el,
# shrinkB=5,
# connectionstyle="arc3,rad=0.3",
# ),
# )
# f.subplots_adjust(wspace=0)
# plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False)
# plt.semilogy()
#f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
#axarr.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize)
axarr[0].set_title(r'No signal',fontsize=fontsize)
axarr[1].set_title(r'With signal',fontsize=fontsize)
axarr[0].text(3700,2.75*10**-15,r'$m_{JJ} \, / \, \mathrm{GeV} $',va='top',ha='center',fontsize=fontsize)
axarr[1].set_yticklabels([])
fig.add_subplot(axarr[0])
fig.add_subplot(axarr[1])
#plt.savefig('pvalplots.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
Figure 14
###Code
spacing=0.05
f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True)
for bin_i in range(3,12):
# plt.close('all')
row = int((bin_i-3)/3)
col = (bin_i-3)%3
ax = axarr[row,col]
ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post',
color='palegoldenrod',alpha=1.)
if row == 2:
ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize)
if col == 0:
ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize)
for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]:
ax.axvline(binvals[line_i],color='0.4',dashes=[5,3])
chosen_set = [0,1,4,6,9]
def plotfit(i):
if i == 9:
return False
else:
return True
[add_mjjplot(datasets_sig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax,
plotsys = plotfit(i))
for i in chosen_set]
ax.set_ylim([2*10**-1,2*10**5])
f.subplots_adjust(hspace=spacing)
f.subplots_adjust(wspace=spacing)
for axrow in axarr:
for ax in axrow:
ax.label_outer()
#plt.savefig('mJJarr_sig.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
FIgure 13
###Code
spacing=0.05
f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True)
for bin_i in range(3,12):
# plt.close('all')
row = int((bin_i-3)/3)
col = (bin_i-3)%3
ax = axarr[row,col]
if row == 2:
ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize)
if col == 0:
ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize)
for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]:
ax.axvline(binvals[line_i],color='0.4',dashes=[5,3])
chosen_set = [0,1,4,6,9]
def plotfit(i):
if i == 9:
return False
else:
return True
[add_mjjplot(datasets_nosig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax,
plotsys = plotfit(i))
for i in chosen_set]
ax.set_ylim([2*10**-1,2*10**5])
f.subplots_adjust(hspace=spacing)
f.subplots_adjust(wspace=spacing)
for axrow in axarr:
for ax in axrow:
ax.label_outer()
#plt.savefig('mJJarr_nosig.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
Calculation of p-valuesThis notebook is for generating figures 8, 13, 14 of arXiv:1805.02664 Import and initialize some functions
###Code
from scipy.optimize import minimize
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.stats import poisson, norm, kstest
import numdifftools
from numpy.linalg import inv
import matplotlib.gridspec as gridspec
sigaeloss_bb = np.load('../data/sigaeloss_bb1.npy')
sigaeloss_bkg = np.load('../data/sigaeloss_bkg.npy')
mass_bb = np.load('../data/mass_bb1.npy')
mass_bkg = np.load('../data/mass_bkg.npy')
bkgaeloss_bb = np.load('../data/bkgaeloss_bb1.npy')
bkgaeloss_bkg = np.load('../data/bkgaeloss_bkg.npy')
############### DEFAULT CUT ##############
sigae_wp = .64
bkgae_wp = 2
index_bb = np.where((bkgaeloss_bb>bkgae_wp)&(sigaeloss_bb<sigae_wp))[0]
index_bkg = np.where((bkgaeloss_bkg>bkgae_wp)&(sigaeloss_bkg<sigae_wp))[0]
print(len(index_bb),len(index_bkg))
#### Without Scalefactor
#PLOT FOR BLACK BOX 1 with REALNVP
plt.style.use('ggplot')
bins = np.linspace(2800,7000,30)
bkg_hist = plt.hist(mass_bkg[index_bkg],bins=bins,alpha=0.3,color='r',label='background');
obs_hist = plt.hist(mass_bb[index_bb],bins=bins,alpha=0.3,color='b',label='Blackbox1');
plt.xlabel(r'$m_{JJ}$ [GeV]')
plt.ylabel('Number of events')
plt.legend(loc='upper right')
plt.title('$m_{JJ}$ without SF')
plt.show()
datasets_nosig = bkg_hist[0]
datasets_sig = obs_hist[0]
#filenames_nosig = ["../data/finalscan_nosignal/0_005_" + str(i) + "_bincounts.dat" for i in range(3,12)]
#filenames_sig = ["../data/finalscan_signal/sig_bin" + str(i) + "_bincounts.dat" for i in range(3,12)]
#datasets_nosig = np.array([np.loadtxt(filename) for filename in filenames_nosig])
#datasets_sig = np.array([np.loadtxt(filename) for filename in filenames_sig])
def get_p_value(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,return_teststat = False,plotsys=True,myax=None):
ydata = np.array(ydata)
#Assume poisson is gaussian with N+1 variance
if not yerr:
yerr = np.sqrt(ydata+1)
else:
yerr=np.array(yerr)
def fit_func(x,p1,p2,p3):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
xi = 0.
y = x/13000.
return p1*(1.-y)**(p2-xi*p3)*y**-p3
xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)])
xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)])
#Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins
ydata = np.array(ydata) * 100 / xwidths
yerr = np.array(yerr)*100/ np.array(xwidths)
#Least square fit, masking out the signal region
popt, pcov = curve_fit(fit_func, np.delete(xdata,mask), np.delete(ydata,mask),sigma=np.delete(yerr,mask),maxfev=10000)
if verbose:
print('fit params: ', popt)
ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata])
#Check that the function is a good fit to the sideband
residuals = np.delete((ydata - ydata_fit)/yerr,mask)
if verbose > 0:
print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf))
print(residuals)
print(((ydata - ydata_fit)/yerr)[mask])
print('\n')
#The following code is used to get the bin errors by propagating the errors on the fit params
def fit_func_array(parr):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
p1, p2, p3 = parr
xi = 0.
return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata])
jac=numdifftools.core.Jacobian(fit_func_array)
x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T)
#For plot, take systematic error band as the diagonal of the covariance matrix
y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)])
if (plotfile != None) & (plotfile != 'ax'):
if plotsys:
plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4)
yerr2 = np.array(yerr)
yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8
plt.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4)
plt.plot(xdata, ydata_fit, 'r--', label='data')
plt.yscale('log', nonposy='clip')
if plotfile == 'ax':
if plotsys:
myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4)
yerr2 = np.array(yerr)
yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8
myax.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4)
myax.plot(xdata, ydata_fit, 'r--', label='data')
myax.set_yscale('log', nonposy='clip')
if plotfile == 'show':
plt.show()
elif plotfile:
plt.savefig(plotfile)
#Now, let's compute some statistics.
# Will use asymptotic formulae for p0 from Cowan et al arXiv:1007.1727
# and systematics procedure from https://cds.cern.ch/record/2242860/files/NOTE2017_001.pdf
#First get systematics in the signal region
#This function returns array of signal predictions in the signal region
def signal_fit_func_array(parr):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
p1, p2, p3 = parr
xi = 0.
return np.array([np.sum([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3*xwidths[mask[i]]/100 for i, x in enumerate(xdata[mask])])])
#Get covariance matrix of prediction uncertainties in the signal region
jac=numdifftools.core.Jacobian(signal_fit_func_array)
x_signal_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T)
#Inverse signal region covariance matrix:
inv_x_signal_cov = inv(x_signal_cov)
#Get observed and predicted event counts in the signal region
obs = np.array([np.sum(np.array(ydata)[mask]*np.array(xwidths)[mask]/100)])
expected = np.array([np.sum([fit_func(xdata[targetbin],popt[0],popt[1],popt[2])*xwidths[targetbin]/100 for targetbin in mask])])
#Negative numerator of log likelihood ratio, for signal rate mu = 0
def min_log_numerator(expected_nuis_arr):
#expected_nuis_arr is the array of systematic background uncertainty nuisance parameters
#These are event rate densities
expected_nuis_arr = np.array(expected_nuis_arr)
to_return = 0
#Poisson terms
for i, expected_nuis in enumerate(expected_nuis_arr):
#Poisson lambda. Have to rescale nuisance constribution by bin width
my_lambda = expected[i]+expected_nuis_arr[i]
#Prevent negative predicted rates
if my_lambda < 10**-10:
my_lambda = 10**-10
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda)
#Gaussian nuisance term
nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr)
to_return = to_return + nuisance_term
return -to_return
def jac_min_log_numerator(expected_nuis_arr):
#expected_nuis_arr is the array of systematic background uncertainty nuisance parameters
#These are event rate densities
expected_nuis_arr = np.array(expected_nuis_arr)
to_return = np.array([0.])
#Poisson terms
#Poisson lambda. Have to rescale nuisance constribution by bin width
my_lambda = expected+expected_nuis_arr
dmy_lambda = np.array([1.])
#Prevent negative predicted rates
my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10
dmy_lambda[my_lambda < 10**-10] = 0
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return = to_return + (obs*dmy_lambda/my_lambda - dmy_lambda)
#Gaussian nuisance term
nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr)
to_return = to_return + nuisance_term
return -to_return
#Initialization of nuisance params
expected_nuis_array_init = [0.02]
#shift log likelihood to heklp minimization algo
def rescaled_min_log_numerator(expected_nuis_arr):
return min_log_numerator(expected_nuis_arr) - min_log_numerator(expected_nuis_array_init)
#Perform minimization over nuisance parameters. Set bounds for bg nuisance at around 8 sigma.
bnds=[[-8*y_unc[mask[0]],8*y_unc[mask[0]]]]
minimize_log_numerator = minimize(rescaled_min_log_numerator,
expected_nuis_array_init,
jac=jac_min_log_numerator,
bounds=bnds)
if verbose:
print("numerator: ", minimize_log_numerator.items(),'\n')
#Now get likelihood ratio denominator
def min_log_denom(nuis_arr):
#nuis_arr contains the bg systematics and also the signal rate
expected_nuis_arr = np.array(nuis_arr)[:1]
#print(expected_nuis_arr)
mu = nuis_arr[1]
#Signal prediction
pred = [mu]
to_return = 0
#Poisson terms
for i, expected_nuis in enumerate(expected_nuis_arr):
#Poisson lambda
my_lambda = expected[i]+expected_nuis_arr[i] + pred[i]
#Prevent prediction from going negative
if my_lambda < 10**-10:
my_lambda = 10**-10
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda)
#Gaussian nuisance term
nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr)
to_return = to_return + nuisance_term
return -to_return
def jac_min_log_denom(nuis_arr):
#expected_nuis_arr is the array of systematic background uncertainty nuisance parameters
#These are event rate densities
expected_nuis_arr = np.array(nuis_arr)[:1]
mu = nuis_arr[1]
pred = [mu]
to_return_first = np.array([0.])
#Poisson terms
#Poisson lambda. Have to rescale nuisance constribution by bin width
my_lambda = expected+expected_nuis_arr+pred
dmy_lambda = np.array([1.])
#Prevent prediction from going negative
my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10
dmy_lambda[my_lambda < 10**-10] = 0
#Poisson term. Ignore the factorial piece which will cancel in likelihood ratio
to_return_first = to_return_first + (obs*dmy_lambda/my_lambda - dmy_lambda)
#Gaussian nuisance term
nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr)
to_return_first = to_return_first + nuisance_term
to_return_last = np.array([0.])
dpred = np.array([[1.]])
my_lambda = expected+expected_nuis_arr+pred
dmy_lambda = dpred
to_return_last = np.dot((obs/my_lambda),dmy_lambda.T) - np.sum(dmy_lambda,axis=1)
return -np.append(to_return_first, to_return_last)
#initizalization for minimization
nuis_array_init = [0.01,1.]
#Shift log likelihood for helping minimization algo.
def rescaled_min_log_denom(nuis_arr):
return min_log_denom(nuis_arr) - min_log_denom(nuis_array_init)
bnds = ((None,None),(None,None))
minimize_log_denominator = minimize(rescaled_min_log_denom,nuis_array_init,
jac=jac_min_log_denom,
bounds=bnds)
if verbose:
print("Denominator: ", minimize_log_denominator.items(),'\n')
if minimize_log_denominator.x[-1] < 0:
Zval = 0
neglognum = 0
neglogden = 0
else:
neglognum = min_log_numerator(minimize_log_numerator.x)
neglogden = min_log_denom(minimize_log_denominator.x)
Zval = np.sqrt(2*(neglognum - neglogden))
p0 = 1-norm.cdf(Zval)
if verbose:
print("z = ", Zval)
print("p0 = ", p0)
#plt.title(str(p0))
# if plotfile == 'show':
# plt.show()
# elif plotfile:
# plt.savefig(plotfile)
if return_teststat:
return p0, 2*(neglognum - neglogden)
else:
return p0
def add_mjjplot(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,plotsys=True,myax=None,plotfit=True):
ydata = np.array(ydata)
#Assume poisson is gaussian with N+1 variance
if not yerr:
yerr = np.sqrt(ydata+1)
else:
yerr=np.array(yerr)
def fit_func(x,p1,p2,p3):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
xi = 0.
y = x/13000.
return p1*(1.-y)**(p2-xi*p3)*y**-p3
xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)])
xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)])
#Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins
ydata = np.array(ydata) * 100 / xwidths
yerr = np.array(yerr)*100/ np.array(xwidths)
#Least square fit, masking out the signal region
popt, pcov = curve_fit(fit_func, np.delete(xdata,mask),
np.delete(ydata,mask),
sigma=np.delete(yerr,mask),maxfev=10000)
if verbose:
print('fit params: ', popt)
ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata])
#Check that the function is a good fit to the sideband
residuals = np.delete((ydata - ydata_fit)/yerr,mask)
if verbose > 0:
print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf))
print(residuals)
print(((ydata - ydata_fit)/yerr)[mask])
print('\n')
#The following code is used to get the bin errors by propagating the errors on the fit params
def fit_func_array(parr):
#see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf.
p1, p2, p3 = parr
xi = 0.
return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata])
jac=numdifftools.core.Jacobian(fit_func_array)
x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T)
#For plot, take systematic error band as the diagonal of the covariance matrix
y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)])
if (plotfile != None) & (plotfile != 'ax'):
if plotfit:
if plotsys:
plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4)
plt.plot(xdata, ydata_fit, 'r--', label='data')
plt.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4)
plt.yscale('log', nonposy='clip')
if plotfile == 'ax':
if plotfit:
if plotsys:
myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4)
myax.plot(xdata, ydata_fit, 'r--', label='data')
myax.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4)
myax.set_yscale('log', nonposy='clip')
if plotfile == 'show':
plt.show()
elif plotfile:
plt.savefig(plotfile)
###Output
_____no_output_____
###Markdown
Define the binning
###Code
bins
binvals = bins
#binvals = [#1900.,2001.,2107.,2219.,2337.,2461.,2592.,2730.,
# 2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.,4500,4700,4900,5100,5300,5500,5700,5900,6100]
bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
len(bincenters)
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(1,len(binvals)-2)]
print(masks)
###Output
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28]]
###Markdown
Calculate and plot p-values for mass scanFigure 8, right
###Code
pvalues_sig = [get_p_value(datasets_sig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)]
pvalues_nosig = [get_p_value(datasets_nosig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)]
plt.plot(bincenters[:-2],pvalues_nosig)
plt.semilogy()
f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
plt.ylabel(r'$p_0$',fontsize=fontsize)
plt.title(r'No signal')
#plt.title(r'With signal')
# plt.xlabel(r'$m_{JJ}$')
#plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
plt.show()
plt.plot(bincenters[:-2],pvalues_sig)
plt.semilogy()
f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
plt.ylabel(r'$p_0$',fontsize=fontsize)
#plt.title(r'No signal')
plt.title(r'With signal')
plt.axvline(x=3823,color='b')
# plt.xlabel(r'$m_{JJ}$')
#plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
plt.show()
fontsize=22
smfontsize=16
#binvals = [#1900.,
# 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.]
#bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
bincenters = bincenters[3:18]
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)]
plt.close('all')
f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5))
# plt.figure(figsize=(5,5))
linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}]
choices = [0,1,4,6]
colors = ['black']
colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)])
# colors = ['black']
# colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)])
effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"]
dashes = [5,5]
color='0.5'
linewidth=1.2
for ax in axarr:
for sigma in range(1,8):
ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth)
if sigma > 2:
sigmastring = r'$' + str(sigma) + '\sigma$'
axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize)
for eff_i, effchoice in enumerate(choices):
pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_nosig)]
pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_sig)]
# plt.subplot(1,2,1)
axarr[0].plot(bincenters,pvalues_nosig,
color=colors[eff_i],
**linestyles[eff_i])
#plt.subplot(1,2,2)
axarr[1].plot(bincenters,pvalues_sig,
color=colors[eff_i],
**linestyles[eff_i])
if eff_i > 0:
axarr[1].annotate(effs[eff_i],fontsize=smfontsize,
xy=(bincenters[4],pvalues_sig[4]), xycoords='data',
xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
# axarr[1].annotate("Test",
# xy=(3000, 10**-7), xycoords='data',
# xytext=(3200, 10**-8), textcoords='data',
# arrowprops=dict(arrowstyle="->", #linestyle="dashed",
# color="0.0",
# #patchB=el,
# shrinkB=5,
# connectionstyle="arc3,rad=0.3",
# ),
# )
f.subplots_adjust(wspace=0)
#plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False)
plt.semilogy()
f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize)
axarr[0].set_title(r'No signal')
axarr[1].set_title(r'With signal')
# plt.xlabel(r'$m_{JJ}$')
plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
plt.show()
fontsize=22
smfontsize=16
#binvals = [#1900.,
# 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.]
#bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
bincenters = bincenters[3:18]
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)]
plt.close('all')
f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5))
# plt.figure(figsize=(5,5))
linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}]
choices = [0,1,4,6]
colors = ['black']
colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)])
# colors = ['black']
# colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)])
effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"]
dashes = [5,5]
color='0.5'
linewidth=1.2
for ax in axarr:
for sigma in range(1,8):
ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth)
if sigma > 2:
sigmastring = r'$' + str(sigma) + '\sigma$'
axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize)
for eff_i, effchoice in enumerate(choices):
pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_nosig[:,effchoice])]
pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_sig[:,effchoice])]
# plt.subplot(1,2,1)
axarr[0].plot(bincenters,pvalues_nosig,
color=colors[eff_i],
**linestyles[eff_i])
#plt.subplot(1,2,2)
axarr[1].plot(bincenters,pvalues_sig,
color=colors[eff_i],
**linestyles[eff_i])
if eff_i > 0:
axarr[1].annotate(effs[eff_i],fontsize=smfontsize,
xy=(bincenters[4],pvalues_sig[4]), xycoords='data',
xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
# axarr[1].annotate("Test",
# xy=(3000, 10**-7), xycoords='data',
# xytext=(3200, 10**-8), textcoords='data',
# arrowprops=dict(arrowstyle="->", #linestyle="dashed",
# color="0.0",
# #patchB=el,
# shrinkB=5,
# connectionstyle="arc3,rad=0.3",
# ),
# )
f.subplots_adjust(wspace=0)
#plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False)
plt.semilogy()
f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize)
axarr[0].set_title(r'No signal')
axarr[1].set_title(r'With signal')
# plt.xlabel(r'$m_{JJ}$')
plt.ylim([0.5*10**(-13),1])
#plt.tight_layout()
#plt.savefig('pvalue_plots.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
Injected signal
###Code
sighist=np.array([ 7, 22, 31, 54, 75, 106, 192, 217, 113, 40, 8, 5, 4, 1, 0])
mjjmin=2001
mjjmax=4350
mybinboundaries = np.round(np.logspace(np.log10(mjjmin), np.log10(mjjmax), num=16))
mybincenters = np.array([0.5*(mybinboundaries[i+1] + mybinboundaries[i]) for i in range(0,len(mybinboundaries)-1)])
mybinwidths = np.array([mybinboundaries[i+1] - mybinboundaries[i] for i in range(0,len(mybinboundaries)-1)])
plt.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post',color='0.8')
###Output
_____no_output_____
###Markdown
Figure 8, left
###Code
plt.figure(figsize=(5*1.4,5))
for line_i in [4,6,9,11]:
plt.axvline(binvals[line_i],color='0.4',dashes=[5,3])
plt.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post',
color='palegoldenrod',alpha=1.)
signallabel_y=0.15*10**6
plt.annotate("",xy=(binvals[6],signallabel_y), xytext = (binvals[9],signallabel_y),
arrowprops=dict(arrowstyle='<->'))
plt.text(0.5*(binvals[6] + binvals[9]),signallabel_y,r"Signal"'\n'r"region",va='center',ha='center',fontsize=smfontsize,
bbox=dict(facecolor='white',edgecolor='none', alpha=1.0))
sidebandlabel_y=1.0*10**-1
plt.annotate("",xy=(binvals[4],sidebandlabel_y), xytext = (binvals[6],sidebandlabel_y),
arrowprops=dict(arrowstyle='<->'))
plt.annotate("",xy=(binvals[9],sidebandlabel_y), xytext = (binvals[11],sidebandlabel_y),
arrowprops=dict(arrowstyle='<->'))
plt.annotate(r"Sideband", xytext=(2000,sidebandlabel_y*4/3), xy = (0.5*(binvals[9] + binvals[11]),sidebandlabel_y),
fontsize = smfontsize,
color='white',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=-0.2",
),)
plt.annotate(r"Sideband", xytext=(2000,sidebandlabel_y*4/3), xy = (0.5*(binvals[4] + binvals[6]),sidebandlabel_y),
fontsize = smfontsize,
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=-0.3",
),)
chosen_set = [0,1,4,6,9]
plt.ylabel(r'Events / 100 GeV',fontsize=fontsize)
plt.xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize)
plt.ylim([2*10**-2,10**6])
for i in chosen_set:
add_mjjplot(datasets_sig[4,i,1:],binvals,mask=[6,7,8],verbose=0,plotfile='tmp.png')
plt.savefig('/mnt/c/Users/Jack/Physics/jj_for_NN/mJJplots_' + str(i) + '.png', bbox_inches='tight')
#plt.xlim([2001,4350])
plt.show()
###Output
_____no_output_____
###Markdown
Figure 8
###Code
import matplotlib.gridspec as gridspec
fontsize=20
smfontsize=16
binvals = [#1900.,
2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.]
bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)]
bincenters = bincenters[3:12]
masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,12)]
plt.close('all')
fig = plt.figure(figsize=(16,5))
outer = gridspec.GridSpec(1,2,wspace=0.3,width_ratios=[5,5])
plt_i = 0
inner = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=outer[0])
ax = plt.Subplot(fig, inner[0])
ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post',
color='palegoldenrod',alpha=1.)
for line_i in [4,6,9,11]:
ax.axvline(binvals[line_i],color='0.4',dashes=[5,3])
signallabel_y=0.15*10**6
ax.annotate("",xy=(binvals[6],signallabel_y), xytext = (binvals[9],signallabel_y),
arrowprops=dict(arrowstyle='<->'))
ax.text(0.5*(binvals[6] + binvals[9]),signallabel_y,r"Signal"'\n'r"region",va='center',ha='center',fontsize=smfontsize,
bbox=dict(facecolor='white',edgecolor='none', alpha=1.0))
sidebandlabel_y=0.8*10**-1
ax.annotate("",xy=(binvals[4],sidebandlabel_y), xytext = (binvals[6],sidebandlabel_y),
arrowprops=dict(arrowstyle='<->'))
ax.annotate("",xy=(binvals[9],sidebandlabel_y), xytext = (binvals[11],sidebandlabel_y),
arrowprops=dict(arrowstyle='<->'))
ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[9] + binvals[11]),sidebandlabel_y),
fontsize = smfontsize,
color='white',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=-0.2",
),)
ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[4] + binvals[6]),sidebandlabel_y),
fontsize = smfontsize,
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=-0.3",
),)
chosen_set = [0,1,4,6,9]
[get_p_value(datasets_sig[4,i,1:],binvals,mask=[6,7,8],verbose=0,plotfile='ax',myax=ax)
for i in chosen_set]
ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize)
ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize)
ax.set_ylim([2*10**-2,10**6])
ax.set_xlim([2001,4350])
fig.add_subplot(ax)
inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer[1], wspace=0)
axarr = [plt.Subplot(fig, inner[0]),plt.Subplot(fig, inner[1])]
dashes = [5,5]
color='0.5'
linewidth=1.2
linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}]
choices = [0,1,4,6]
colors = ['black']
colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)])
# colors = ['black']
# colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)])
effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"]
for ax in axarr:
ax.set_yscale('log')
ax.set_ylim([2.*10**-14,1])
for sigma in range(1,8):
ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth)
if sigma > 2:
sigmastring = r'$' + str(sigma) + '\sigma$'
axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize)
for eff_i, effchoice in enumerate(choices):
pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_nosig[:,effchoice])]
pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None)
for i, ydata in enumerate(datasets_sig[:,effchoice])]
axarr[0].plot(bincenters,pvalues_nosig,
color=colors[eff_i],
**linestyles[eff_i])
axarr[1].plot(bincenters,pvalues_sig,
color=colors[eff_i],
**linestyles[eff_i])
if eff_i > 0:
axarr[1].annotate(effs[eff_i],fontsize=smfontsize,
xy=(bincenters[4],pvalues_sig[4]), xycoords='data',
xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data',
arrowprops=dict(arrowstyle="-|>", #linestyle="dashed",
color="0.0",
#patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
# axarr[1].annotate("Test",
# xy=(3000, 10**-7), xycoords='data',
# xytext=(3200, 10**-8), textcoords='data',
# arrowprops=dict(arrowstyle="->", #linestyle="dashed",
# color="0.0",
# #patchB=el,
# shrinkB=5,
# connectionstyle="arc3,rad=0.3",
# ),
# )
# f.subplots_adjust(wspace=0)
# plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False)
# plt.semilogy()
#f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
#axarr.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize)
axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize)
axarr[0].set_title(r'No signal',fontsize=fontsize)
axarr[1].set_title(r'With signal',fontsize=fontsize)
axarr[0].text(3700,2.75*10**-15,r'$m_{JJ} \, / \, \mathrm{GeV} $',va='top',ha='center',fontsize=fontsize)
axarr[1].set_yticklabels([])
fig.add_subplot(axarr[0])
fig.add_subplot(axarr[1])
#plt.savefig('pvalplots.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
Figure 14
###Code
spacing=0.05
f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True)
for bin_i in range(3,12):
# plt.close('all')
row = int((bin_i-3)/3)
col = (bin_i-3)%3
ax = axarr[row,col]
ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post',
color='palegoldenrod',alpha=1.)
if row == 2:
ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize)
if col == 0:
ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize)
for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]:
ax.axvline(binvals[line_i],color='0.4',dashes=[5,3])
chosen_set = [0,1,4,6,9]
def plotfit(i):
if i == 9:
return False
else:
return True
[add_mjjplot(datasets_sig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax,
plotsys = plotfit(i))
for i in chosen_set]
ax.set_ylim([2*10**-1,2*10**5])
f.subplots_adjust(hspace=spacing)
f.subplots_adjust(wspace=spacing)
for axrow in axarr:
for ax in axrow:
ax.label_outer()
#plt.savefig('mJJarr_sig.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
FIgure 13
###Code
spacing=0.05
f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True)
for bin_i in range(3,12):
# plt.close('all')
row = int((bin_i-3)/3)
col = (bin_i-3)%3
ax = axarr[row,col]
if row == 2:
ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize)
if col == 0:
ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize)
for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]:
ax.axvline(binvals[line_i],color='0.4',dashes=[5,3])
chosen_set = [0,1,4,6,9]
def plotfit(i):
if i == 9:
return False
else:
return True
[add_mjjplot(datasets_nosig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax,
plotsys = plotfit(i))
for i in chosen_set]
ax.set_ylim([2*10**-1,2*10**5])
f.subplots_adjust(hspace=spacing)
f.subplots_adjust(wspace=spacing)
for axrow in axarr:
for ax in axrow:
ax.label_outer()
#plt.savefig('mJJarr_nosig.pdf', bbox_inches='tight')
plt.show()
###Output
_____no_output_____ |
experiments/paper_eval.ipynb | ###Markdown
EvaluationThis notebook reproduces the evaluation results from the paper.Note: As of version 2.1b6.dev234, the Essentia library has a [bug](https://github.com/MTG/essentia/issues/1054) that causes an infinite loop for some inputs.To avoid this, you have to build our patched version of Essentia: https://github.com/cifkao/essentia/tree/patchedCopyright 2020 InterDigital R&D and Télécom Paris. Author: Ondřej Cífka Obtaining the outputsBefore running the evaluation, we need to obtain the outputs of all the systems on both of our test sets and place them in the `outputs/synth` and `outputs/real` directories (for the artificial and real inputs, respectively). The commands are different for each system: VQ-VAE```shpython -m ss_vq_vae.models.vqvae_oneshot --logdir=model run ../data/lmd/audio_test/pairs \ outputs/synth/vqvae_list outputs/synth/vqvaepython -m ss_vq_vae.models.vqvae_oneshot --logdir=model run ../data/mixing_secrests/test/pairs \ outputs/real/vqvae_list outputs/real/vqvae```The first command runs the model on all audio file pairs listed in the `../data/lmd/audio_test/pairs` file, writes the output files to the `outputs/synth/vqvae` directory and their paths to the file `outputs/synth/vqvae_list`. The second command does the same for the other test set. U+L (Ulyanov and Lebedev)```shpython -m ss_vq_vae.models.ulyanov --style-weight-log=-2.1 ../data/lmd/audio_test/pairs \ outputs/synth/ulyanov_swopt_list outputs/synth/ulyanovpython -m ss_vq_vae.models.ulyanov --style-weight-log=-2.1 ../data/mixing_secrets/test/pairs \ outputs/real/ulyanov_swopt_list outputs/real/ulyanov``` Musaicing (Driedger et al.)Clone Chris Tralie's [LetItBee repo](https://github.com/ctralie/LetItBee) and run the `Musaicing.py` script on each pair of audio files according to the instructions. Specify the content file using the `--target` option and the style file using the `--source` option, e.g.:```shpython LetItBee/Musaicing.py --sr 16000 \ --source ../data/lmd/audio_test/wav_16kHz/voices1_pitch1/00484d071147e49551de9ffb141e8b9e.style.wav \ --target ../data/lmd/audio_test/wav_16kHz/voices1_pitch1/00484d071147e49551de9ffb141e8b9e.content.wav \ --result outputs/synth/driedger/00000.wav```You might want to run these commands in parallel as they are time-consuming. Remember to write the list of output files to the `outputs/{synth,real}/driedger_list` file in the correct order, so that the evaluation code can pick them up.
###Code
import os
import pickle
import essentia.standard as estd
import librosa
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import pretty_midi
import re
import seaborn as sns
from tqdm.auto import tqdm
from ss_vq_vae.models import triplet_network
SR = 16000
MFCC_KWARGS = dict(
n_mfcc=13,
hop_length=500
)
triplet_model, triplet_backbone = triplet_network.build_model(num_features=12)
triplet_model.load_weights('timbre_metric/checkpoint.ckpt')
def read_paths(tsv_path, column_names):
parent_dir = os.path.dirname(tsv_path)
df = pd.read_csv(tsv_path, sep='\t', names=column_names)
df = df.applymap(lambda x: os.path.join(parent_dir, x))
return df
def normalize_power(audio):
return audio / (np.sqrt(np.mean(audio ** 2)) + np.finfo(audio.dtype).eps)
def get_pitches(audio):
input_sr, sr = SR, 8000 # Need to resample because of EqualLoudness
audio = estd.Resample(inputSampleRate=input_sr, outputSampleRate=sr)(audio)
audio = estd.EqualLoudness(sampleRate=sr)(audio)
rng = np.random.default_rng(seed=(audio > 0).sum())
audio = rng.normal(loc=audio, scale=1e-4).astype(audio.dtype) # To prevent Melodia from crashing
pitches = estd.MultiPitchMelodia(sampleRate=sr)(audio)
pitches = [[pretty_midi.utilities.hz_to_note_number(p) for p in pl if not np.isclose(0, p)]
for pl in pitches]
pitches = [[int(p + 0.5) for p in pl] for pl in pitches]
return pitches
def eval_example_target(output, reference):
def spec(audio):
audio = normalize_power(audio)
s = librosa.feature.melspectrogram(audio, sr=SR)
return librosa.power_to_db(s)
s_out, s_ref = spec(output), spec(reference)
lsd = np.mean(np.sqrt(np.mean((s_out - s_ref) ** 2, axis=1)))
return {'lsd': lsd}
def eval_example_style(output, reference):
mfcc_out = librosa.feature.mfcc(output, sr=SR, **MFCC_KWARGS)[1:]
mfcc_ref = librosa.feature.mfcc(reference, sr=SR, **MFCC_KWARGS)[1:]
mfcc_triplet_cos, _ = 1 - triplet_model.predict([
(mfcc_ref.T[None, :, :], mfcc_out.T[None, :, :], mfcc_out.T[None, :, :])]).reshape(2)
return {'mfcc_triplet_cos': mfcc_triplet_cos}
def eval_example_content(output, reference):
pitches_output, pitches_reference = get_pitches(output), get_pitches(reference)
assert len(pitches_output) == len(pitches_reference)
jaccard = []
for pl_output, pl_reference in zip(pitches_output, pitches_reference):
matches = len(set(pl_output) & set(pl_reference))
total = len(set(pl_output) | set(pl_reference))
if total == 0:
jaccard.append(0)
else:
jaccard.append(1 - matches / total)
jaccard = np.mean(jaccard)
return {'pitch_jaccard': jaccard}
def pad_or_truncate(audio, reference):
if len(audio) < len(reference):
return np.pad(audio, (0, max(0, len(reference) - len(audio))))
return audio[:len(reference)]
def eval_row_synth(row):
audio = row.apply(lambda path: librosa.load(path, sr=SR)[0])
audio = audio.apply(pad_or_truncate, reference=audio['target'])
return pd.DataFrame({
key: {
**eval_example_target(audio[key], audio['target']),
**eval_example_style(audio[key], audio['target']),
**eval_example_content(audio[key], audio['target'])
}
for key in row.keys() if key != 'target'
}).stack()
def eval_row_real(row):
audio = row.apply(lambda path: librosa.load(path, sr=SR)[0])
audio_ref = audio[['content', 'style']]
audio = audio.apply(pad_or_truncate, reference=audio_ref['content'])
return pd.DataFrame({
key: {
**eval_example_style(audio[key], audio_ref['style']),
**eval_example_content(audio[key], audio_ref['content'])
}
for key in row.keys()
}).stack()
paths_synth_df = pd.concat([
read_paths('../data/lmd/audio_test/triplets',
['content', 'style', 'target']),
read_paths('outputs/synth/vq-vae_list',
['vq-vae']),
read_paths('outputs/synth/driedger_list',
['driedger']),
read_paths('outputs/synth/ulyanov_list',
['ulyanov']),
], axis=1)
paths_real_df = pd.concat([
read_paths('../data/mixing_secrets/test/pairs',
['content', 'style']),
read_paths('outputs/real/vq-vae_list',
['vq-vae']),
read_paths('outputs/real/driedger_list',
['driedger']),
read_paths('outputs/real/ulyanov_list',
['ulyanov'])
], axis=1)
with tqdm(total=len(paths_synth_df)) as pbar:
pbar.update(-1)
def fn(x):
y = eval_row_synth(x)
pbar.update(1)
return y
results_synth = paths_synth_df.apply(fn, axis=1)
with tqdm(total=len(paths_real_df)) as pbar:
pbar.update(-1)
def fn(x):
y = eval_row_real(x)
pbar.update(1)
return y
results_real = paths_real_df.apply(fn, axis=1)
results_synth.to_pickle('results_synth.pickle')
results_real.to_pickle('results_real.pickle')
results_synth = pd.read_pickle('results_synth.pickle')
results_real = pd.read_pickle('results_real.pickle')
results_all = pd.concat([results_synth, results_real], axis=1, keys=['synth', 'real'])
pd.DataFrame(results_all.mean()).unstack(level=0).unstack(level=0).droplevel(axis=1, level=0).drop(('real', 'lsd'), axis=1)
latex = (pd.DataFrame(results_all.mean())
.unstack(level=0).unstack(level=0)
.droplevel(axis=1, level=0)
.drop(('real', 'lsd'), axis=1)
.loc[['content', 'style', 'ulyanov', 'driedger', 'vq-vae']]
.to_latex(formatters=[x.format for x in ['{:0.2f}', '{:0.4f}', '{:0.4f}', '{:0.4f}', '{:0.4f}']]))
latex = re.sub(r' +', ' ', latex)
print(latex)
###Output
\begin{tabular}{lrrrrr}
\toprule
{} & \multicolumn{3}{l}{synth} & \multicolumn{2}{l}{real} \\
{} & lsd & mfcc\_triplet\_cos & pitch\_jaccard & mfcc\_triplet\_cos & pitch\_jaccard \\
\midrule
content & 14.62 & 0.3713 & 0.5365 & 0.4957 & 0.0000 \\
style & 20.36 & 0.2681 & 0.8729 & 0.0000 & 0.9099 \\
ulyanov & 14.50 & 0.3483 & 0.5441 & 0.4792 & 0.1315 \\
driedger & 14.51 & 0.2933 & 0.6445 & 0.2319 & 0.6297 \\
vq-vae & 12.16 & 0.2063 & 0.5500 & 0.2278 & 0.6197 \\
\bottomrule
\end{tabular}
|
src/00_appToCategory.ipynb | ###Markdown
数据准备部分
###Code
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
import re
import gc
from tqdm import tqdm
age_test_data = pd.read_csv(r'../data/age_test.csv',header=None,names=['uId'])
age_train_data = pd.read_csv( r'../data/age_train.csv',header=None,names=['uId','age_group'])
age = age_train_data['age_group']
age_train_data.drop(columns=['age_group'],inplace=True)
print(age_train_data.shape) #(2010000, 1)
print(age_test_data.shape)
user_app_actived_data = pd.read_csv(r'../data/user_app_actived.csv',header=None,names=['uId','appId'])
print(user_app_actived_data.head());print(user_app_actived_data.shape)
user_numAppList = {} # {uId:[num,appList]}
for user in tqdm(user_app_actived_data.values):
appList = re.split(r'\#',user[1])
if appList[0] == '\\N':
user_numAppList[user[0]] = []
l = 0
else:
user_numAppList[user[0]] = appList
l = len(appList)
user_numAppList[user[0]].insert(0,l)
# print(user_numAppList[1002181])
# print(user_numAppList[1002179])
del user_app_actived_data
gc.collect()
app_info_data = pd.read_csv(r'../data/app_info.csv',header=None,names=['appId','category'])
data = app_info_data['category'].value_counts(dropna=False)
print(data.index)
print(data.values)
app_category = {} #{appId:[category]}
for aid in app_info_data.values:
app_category.setdefault(aid[0],[]).append(aid[1])
print('总共有多少个app: ',len(app_category))
category_num = {}
def initDict(categorylist):
for cate in categorylist:
category_num[cate] = 0
return category_num
categorylist = data.index
category_num = initDict(categorylist)
print(category_num.values());print(categorylist)
cate = list(np.zeros(40))
i = 1
batch_size = 10000 #每10000次存储一次数据
for numAppList in tqdm(user_numAppList.values()):
# print(i)
i += 1
categorylist = data.index
category_num = initDict(categorylist)
if len(numAppList)==1:
cate = np.vstack((cate,list(category_num.values())))
else:
appList = numAppList[1:-1]
for appId in appList:
if appId in app_category:
cateList = app_category[appId]
for category in cateList:
category_num[category] += 1
cate = np.vstack((cate,list(category_num.values())))
if i%batch_size==0:
if i==batch_size:
cate = pd.DataFrame(cate, columns=['实用工具', '便捷生活', '教育', '金融理财', '购物比价', '社交通讯', '影音娱乐', '新闻阅读', '休闲益智',
'商务', '运动健康', '出行导航', '经营策略', '动作射击', '儿童', '角色扮演', '拍摄美化', '棋牌桌游',
'旅游住宿', '汽车', '主题个性', '美食', '体育竞速', '网络游戏', '休闲游戏', '休闲娱乐', '动作冒险',
'学习办公', '益智棋牌', '表盘个性', '电子书籍', '模拟游戏', '策略游戏', '棋牌天地', '体育射击',
'图书阅读','主题铃声', '角色游戏', '合作壁纸*', '医疗健康'])
else:
cate = pd.DataFrame(cate)
cate.drop(0,inplace=True)
if i==batch_size:
cate.to_csv(r'../processed/category_num.csv', index=False, header=True, encoding='utf_8_sig',mode='a')
else:
cate.to_csv(r'../processed/category_num.csv', index=False, header=False, encoding='utf_8_sig', mode='a')
cate = list(np.zeros(40))
cate = pd.DataFrame(cate)
cate.drop(0,inplace=True)
# print(cate);print(cate.head())
cate.to_csv(r'../processed/category_num.csv', index=False, header=False, encoding='utf_8_sig',mode='a')
del user_numAppList
gc.collect()
###Output
_____no_output_____
###Markdown
特征生成部分
###Code
import numpy as np
import pandas as pd
from sklearn import preprocessing
category_num = pd.read_csv(r'../processed/category_num.csv')
print(category_num.shape)
# print(category_num.head())
col = pd.DataFrame(category_num.sum())
col.shape
category_num = pd.DataFrame(category_num.values[:,col[0]>1000])
category_num.shape
user_app_actived = pd.read_csv(r'../data/user_app_actived.csv',header=None)
print(user_app_actived.shape)
print(user_app_actived.head())
category_num = preprocessing.StandardScaler().fit_transform(category_num.values)
uId = user_app_actived[0].values
uId = uId.reshape(len(uId),1)
category_num.shape
category_num = np.column_stack((uId,category_num))
category_num = pd.DataFrame(category_num)
print(category_num.shape)
print(category_num.head())
age_train = pd.read_csv(r'../data/age_train.csv',header=None,usecols=[0])
print(age_train.shape)
age_test = pd.read_csv(r'../data/age_test.csv',header=None)
print(age_test.shape)
age_train = pd.merge(age_train,category_num,how='inner',on=0)
# print(age_train.head(10))
age_test = pd.merge(age_test,category_num,how='inner',on=0)
# print(age_test.head(10))
age_train.drop(labels=[0],axis=1,inplace=True)
age_test.drop(labels=[0],axis=1,inplace=True)
from scipy import sparse
age_train = sparse.csr_matrix(age_train,dtype=np.float32)
age_test = sparse.csr_matrix(age_test,dtype=np.float32)
print(age_train.shape)
print(age_test.shape)
sparse.save_npz(r'../trainTestData/trainData30.npz',age_train)
sparse.save_npz(r'../trainTestData/testData30.npz',age_test)
category_num.to_csv(r'../processed/category_num.csv',header=None,index=False)
###Output
_____no_output_____ |
bindings/python/tutorials/CNTK_202_Language_Understanding.ipynb | ###Markdown
Hands-On Lab: Language Understanding with Recurrent NetworksThis hands-on lab shows how to implement a recurrent network to process text,for the Air Travel Information Services (ATIS) tasks of slot tagging and intent classification.We will start with a straight-forward embedding followed by a recurrent LSTM.We will then extend it to include neighbor words and run bidirectionally.Lastly, we will turn this system into an intent classifier. The techniques you will practice include:* model description by composing layer blocks instead of writing formulas* creating your own layer block* variables with different sequence lengths in the same network* parallel trainingWe assume that you are familiar with basics of deep learning, and these specific concepts:* recurrent networks ([Wikipedia page](https://en.wikipedia.org/wiki/Recurrent_neural_network))* text embedding ([Wikipedia page](https://en.wikipedia.org/wiki/Word_embedding)) PrerequisitesWe assume that you have already [installed CNTK](https://www.cntk.ai/pythondocs/setup.html).This tutorial requires CNTK V2. We strongly recommend to run this tutorial on a machine with a capable CUDA-compatible GPU. Deep learning without GPUs is not fun.Finally you need to download the training and test set. The following piece of code does that for you. If you get an error, please follow the manual instructions below it.We also list the imports we will need for this tutorial
###Code
import os
import math
from cntk.blocks import * # non-layer like building blocks such as LSTM()
from cntk.layers import * # layer-like stuff such as Linear()
from cntk.models import * # higher abstraction level, e.g. entire standard models and also operators like Sequential()
from cntk.utils import *
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, INFINITELY_REPEAT, FULL_DATA_SWEEP
from cntk import Trainer
from cntk.ops import cross_entropy_with_softmax, classification_error, splice
from cntk.learner import adam_sgd, learning_rate_schedule, momentum_schedule
from cntk.persist import load_model, save_model
from _cntk_py import set_fixed_random_seed
set_fixed_random_seed(1) # to become invariant to initialization order
try:
from tqdm import tqdm
except:
tqdm = lambda x: x
import requests
def download(data):
url = "https://github.com/Microsoft/CNTK/blob/master/Examples/Tutorials/SLUHandsOn/atis.%s.ctf?raw=true"
response = requests.get(url%data, stream=True)
with open("atis.%s.ctf"%data, "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
for t in "train","test":
try:
f=open("atis.%s.ctf"%t)
f.close()
except:
download(t)
###Output
_____no_output_____
###Markdown
Fallback manual instructionsPlease download the ATIS [training](https://github.com/Microsoft/CNTK/blob/master/Tutorials/SLUHandsOn/atis.train.ctf) and [test](https://github.com/Microsoft/CNTK/blob/master/Tutorials/SLUHandsOn/atis.test.ctf) files and put them at the same folder as this notebook.
###Code
# load dictionaries
query_wl = [line.rstrip('\n') for line in open('query.wl')]
slots_wl = [line.rstrip('\n') for line in open('slots.wl')]
query_dict = {query_wl[i]:i for i in range(len(query_wl))}
slots_dict = {slots_wl[i]:i for i in range(len(slots_wl))}
###Output
_____no_output_____
###Markdown
Task and Model StructureThe task we want to approach in this tutorial is slot tagging.We use the [ATIS corpus](https://catalog.ldc.upenn.edu/LDC95S26).ATIS contains human-computer queries from the domain of Air Travel Information Services,and our task will be to annotate (tag) each word of a query whether it belongs to aspecific item of information (slot), and which one.The data in your working folder has already been converted into the "CNTK Text Format."Let's look at an example from the test-set file `atis.test.ctf`: 19 |S0 178:1 | BOS |S1 14:1 | flight |S2 128:1 | O 19 |S0 770:1 | show |S2 128:1 | O 19 |S0 429:1 | flights |S2 128:1 | O 19 |S0 444:1 | from |S2 128:1 | O 19 |S0 272:1 | burbank |S2 48:1 | B-fromloc.city_name 19 |S0 851:1 | to |S2 128:1 | O 19 |S0 789:1 | st. |S2 78:1 | B-toloc.city_name 19 |S0 564:1 | louis |S2 125:1 | I-toloc.city_name 19 |S0 654:1 | on |S2 128:1 | O 19 |S0 601:1 | monday |S2 26:1 | B-depart_date.day_name 19 |S0 179:1 | EOS |S2 128:1 | OThis file has 7 columns:* a sequence id (19). There are 11 entries with this sequence id. This means that sequence 19 consistsof 11 tokens;* column `S0`, which contains numeric word indices;* a comment column denoted by ``, to allow a human reader to know what the numeric word index stands for;Comment columns are ignored by the system. `BOS` and `EOS` are special wordsto denote beginning and end of sentence, respectively;* column `S1` is an intent label, which we will only use in the last part of the tutorial;* another comment column that shows the human-readable label of the numeric intent index;* column `S2` is the slot label, represented as a numeric index; and* another comment column that shows the human-readable label of the numeric label index.The task of the neural network is to look at the query (column `S0`) and predict theslot label (column `S2`).As you can see, each word in the input gets assigned either an empty label `O`or a slot label that begins with `B-` for the first word, and with `I-` for anyadditional consecutive word that belongs to the same slot.The model we will use is a recurrent model consisting of an embedding layer,a recurrent LSTM cell, and a dense layer to compute the posterior probabilities: slot label "O" "O" "O" "O" "B-fromloc.city_name" ^ ^ ^ ^ ^ | | | | | +-------+ +-------+ +-------+ +-------+ +-------+ | Dense | | Dense | | Dense | | Dense | | Dense | ... +-------+ +-------+ +-------+ +-------+ +-------+ ^ ^ ^ ^ ^ | | | | | +------+ +------+ +------+ +------+ +------+ 0 -->| LSTM |-->| LSTM |-->| LSTM |-->| LSTM |-->| LSTM |-->... +------+ +------+ +------+ +------+ +------+ ^ ^ ^ ^ ^ | | | | | +-------+ +-------+ +-------+ +-------+ +-------+ | Embed | | Embed | | Embed | | Embed | | Embed | ... +-------+ +-------+ +-------+ +-------+ +-------+ ^ ^ ^ ^ ^ | | | | | w ------>+--------->+--------->+--------->+--------->+------... BOS "show" "flights" "from" "burbank"Or, as a CNTK network description. Please have a quick look and match it with the description above:(descriptions of these functions can be found at: [the layers reference](http://cntk.ai/pythondocs/layerref.html)
###Code
vocab_size = 943 ; num_labels = 129 ; num_intents = 26 # number of words in vocab, slot labels, and intent labels
model_dir = "./Models"
data_dir = "."
# model dimensions
input_dim = vocab_size
label_dim = num_labels
emb_dim = 150
hidden_dim = 300
def create_model():
with default_options(initial_state=0.1):
return Sequential([
Embedding(emb_dim),
Recurrence(LSTM(hidden_dim), go_backwards=False),
Dense(num_labels)
])
# peek
model = create_model()
print(len(model.layers))
print(model.layers[0].E.shape)
print(model.layers[2].b.value)
###Output
_____no_output_____
###Markdown
CNTK ConfigurationTo train and test a model in CNTK, we need to create a model and specify how to read data and perform training and testing. In order to train we need to specify:* how to read the data * the model function and its inputs and outputs* hyper-parameters for the learner[comment]: (For testing ...) A Brief Look at Data and Data ReadingWe already looked at the data.But how do you generate this format?For reading text, this tutorial uses the `CNTKTextFormatReader`. It expects the input data to beof a specific format, which is described [here](https://github.com/Microsoft/CNTK/wiki/CNTKTextFormat-Reader).For this tutorial, we created the corpora by two steps:* convert the raw data into a plain text file that contains of TAB-separated columns of space-separated text. For example: ``` BOS show flights from burbank to st. louis on monday EOS (TAB) flight (TAB) O O O O B-fromloc.city_name O B-toloc.city_name I-toloc.city_name O B-depart_date.day_name O ``` This is meant to be compatible with the output of the `paste` command.* convert it to CNTK Text Format (CTF) with the following command: ``` python Scripts/txt2ctf.py --map query.wl intent.wl slots.wl --annotated True --input atis.test.txt --output atis.test.ctf ``` where the three `.wl` files give the vocabulary as plain text files, one line per word.In these CTF files, our columns are labeled `S0`, `S1`, and `S2`.These are connected to the actual network inputs by the corresponding lines in the reader definition:
###Code
def create_reader(path, is_training):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
query = StreamDef(field='S0', shape=vocab_size, is_sparse=True),
intent_unused = StreamDef(field='S1', shape=num_intents, is_sparse=True),
slot_labels = StreamDef(field='S2', shape=num_labels, is_sparse=True)
)), randomize=is_training, epoch_size = INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP)
# peek
reader = create_reader(data_dir + "/atis.train.ctf", is_training=True)
reader.streams
###Output
_____no_output_____
###Markdown
TrainerWe also must define the training criterion (loss function), and also an error metric to track.
###Code
def create_criterion_function(model):
labels = Placeholder()
ce = cross_entropy_with_softmax(model, labels)
errs = classification_error (model, labels)
return combine ([ce, errs]) # (features, labels) -> (loss, metric)
def train(reader, model, max_epochs=16):
# criterion: (model args, labels) -> (loss, metric)
# here (query, slot_labels) -> (ce, errs)
criterion = create_criterion_function(model)
# declare argument types
#criterion.set_signature(vocab_size, num_labels)
criterion.replace_placeholders({criterion.placeholders[0]: Input(vocab_size),
criterion.placeholders[1]: Input(num_labels)})
# training config
epoch_size = 18000
minibatch_size = 70
# learner
momentum_as_time_constant = minibatch_size / -math.log(0.9) # TODO: Change to round number. This is 664.39. 700?
lr_per_sample = [0.003]*4+[0.0015]*24+[0.0003] # LR schedule over epochs (we don't run that mayn epochs, but if we did, these are good values)
lr_schedule = learning_rate_schedule(lr_per_sample, units=epoch_size)
learner = adam_sgd(criterion.parameters,
lr_per_sample=lr_schedule, momentum_time_constant=momentum_as_time_constant,
low_memory=True,
gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True)
# trainer
trainer = Trainer(model, criterion.outputs[0], criterion.outputs[1], learner)
# process minibatches and perform model training
log_number_of_parameters(model)
#progress_printer = ProgressPrinter(freq=100, first=10, tag='Training') # more detailed logging
progress_printer = ProgressPrinter(tag='Training')
t = 0
for epoch in range(max_epochs): # loop over epochs
epoch_end = (epoch+1) * epoch_size
while t < epoch_end: # loop over minibatches on the epoch
data = reader.next_minibatch(minibatch_size, input_map={ # fetch minibatch
criterion.arguments[0]: reader.streams.query,
criterion.arguments[1]: reader.streams.slot_labels
})
trainer.train_minibatch(data) # update model with it
t += data[criterion.arguments[1]].num_samples # count samples processed so far
progress_printer.update_with_trainer(trainer, with_metric=True) # log progress
loss, metric, actual_samples = progress_printer.epoch_summary(with_metric=True)
return loss, metric
###Output
_____no_output_____
###Markdown
Running itYou can find the complete recipe below.
###Code
def do_train():
global model
model = create_model()
reader = create_reader(data_dir + "/atis.train.ctf", is_training=True)
train(reader, model)
do_train()
###Output
_____no_output_____
###Markdown
This shows how learning proceeds over epochs (passes through the data).For example, after four epochs, the loss, which is the cross-entropy criterion, has reached 0.22 as measured on the ~18000 samples of this epoch,and that the error rate is 5.0% on those same 18000 training samples.The epoch size is the number of samples--counted as *word tokens*, not sentences--toprocess between model checkpoints.Once the training has completed (a little less than 2 minutes on a Titan-X or a Surface Book),you will see an output like this```(0.06193035719939996, 0.014038397514149373)```which is a tuple containing the loss (cross entropy) and the metric (classification error) averaged over the final epoch.On a CPU-only machine, it can be 4 or more times slower. Evaluating the modelLike the train() function, we also define a function to measure accuracy on a test set.
###Code
def evaluate(reader, model):
criterion = create_criterion_function(model)
#criterion.set_signature(None, Input(num_labels))
criterion.replace_placeholders({criterion.placeholders[0]: Input(num_labels)})
# process minibatches and perform evaluation
dummy_learner = adam_sgd(criterion.parameters, lr_per_sample=1, momentum_time_constant=0, low_memory=True)
evaluator = Trainer(model, criterion.outputs[0], criterion.outputs[1], dummy_learner)
progress_printer = ProgressPrinter(tag='Evaluation')
while True:
minibatch_size = 1000
data = reader.next_minibatch(minibatch_size, input_map={ # fetch minibatch
criterion.arguments[0]: reader.streams.query,
criterion.arguments[1]: reader.streams.slot_labels
})
#data = reader.next_minibatch(minibatch_size) # fetch minibatch
if not data: # until we hit the end
break
metric = evaluator.test_minibatch(data)
progress_printer.update(0, data[criterion.arguments[1]].num_samples, metric) # log progress
loss, metric, actual_samples = progress_printer.epoch_summary(with_metric=True)
return loss, metric
###Output
_____no_output_____
###Markdown
Now we can measure the model accuracy.
###Code
def do_test():
reader = create_reader(data_dir + "/atis.test.ctf", is_training=False)
evaluate(reader, model)
do_test()
model.layers[2].b.value
# let's run a sequence through
w = [query_dict[w] for w in 'BOS flights from new york to seattle EOS'.split()] # convert to word indices
print(w)
onehot = np.zeros([len(w),len(query_dict)], np.float32)
for t in range(len(w)):
onehot[t,w[t]] = 1
pred = model.eval({model.arguments[0]:onehot})
print(pred.shape)
best = np.argmax(pred,axis=2)
print(best[0])
[slots_wl[s] for s in best[0]]
###Output
_____no_output_____
###Markdown
Modifying the ModelIn the following, you will be given tasks to practice modifying CNTK configurations.The solutions are given at the end of this document... but please try without! A Word About [`Sequential()`](https://www.cntk.ai/pythondocs/layerref.htmlsequential)Before jumping to the tasks, let's have a look again at the model we just ran.The model is described in what we call *function-composition style*.```python Sequential([ Embedding(emb_dim), Recurrence(LSTM(hidden_dim), go_backwards=False), Dense(num_labels) ])```You may be familiar with the "sequential" notation from other neural-network toolkits.If not, [`Sequential()`](https://www.cntk.ai/pythondocs/layerref.htmlsequential) is a powerful operation that,in a nutshell, allows to compactly express a very common situation in neural networkswhere an input is processed by propagating it through a progression of layers.`Sequential()` takes an list of functions as its argument,and returns a *new* function that invokes these functions in order,each time passing the output of one to the next.For example,```python FGH = Sequential ([F,G,H]) y = FGH (x)```means the same as``` y = H(G(F(x))) ```This is known as ["function composition"](https://en.wikipedia.org/wiki/Function_composition),and is especially convenient for expressing neural networks, which often have this form: +-------+ +-------+ +-------+ x -->| F |-->| G |-->| H |--> y +-------+ +-------+ +-------+Coming back to our model at hand, the `Sequential` expression simplysays that our model has this form: +-----------+ +----------------+ +------------+ x -->| Embedding |-->| Recurrent LSTM |-->| DenseLayer |--> y +-----------+ +----------------+ +------------+ Task 1: Add Batch NormalizationWe now want to add new layers to the model, specifically batch normalization.Batch normalization is a popular technique for speeding up convergence.It is often used for image-processing setups, for example our other [hands-on lab on imagerecognition](./Hands-On-Labs-Image-Recognition).But could it work for recurrent models, too? So your task will be to insert batch-normalization layers before and after the recurrent LSTM layer.If you have completed the [hands-on labs on image processing](https://github.com/Microsoft/CNTK/blob/master/bindings/python/tutorials/CNTK_201B_CIFAR-10_ImageHandsOn.ipynb),you may remember that the [batch-normalization layer](https://www.cntk.ai/pythondocs/layerref.htmlbatchnormalization-layernormalization-stabilizer) has this form:``` BatchNormalization()```So please go ahead and modify the configuration and see what happens.If everything went right, you will notice improved convergence speed (`loss` and `metric`)compared to the previous configuration.
###Code
# TODO: Add batch normalization
def create_model():
with default_options(initial_state=0.1):
return Sequential([
Embedding(emb_dim),
Recurrence(LSTM(hidden_dim), go_backwards=False),
Dense(num_labels)
])
do_train()
do_test()
###Output
_____no_output_____
###Markdown
Task 2: Add a Lookahead Our recurrent model suffers from a structural deficit:Since the recurrence runs from left to right, the decision for a slot labelhas no information about upcoming words. The model is a bit lopsided.Your task will be to modify the model such thatthe input to the recurrence consists not only of the current word, but also of the next one(lookahead).Your solution should be in function-composition style.Hence, you will need to write a Python function that does the following:* takes no input arguments* creates a placeholder sequence variable* computes the "next value" in this sequence using the `Delay()` layer (use this specific form: `Delay(T=-1)`); and* concatenate the current and the next value into a vector of twice the embedding dimension using `splice()`and then insert this function into `Sequential()`'s list between the embedding and the recurrent layer.
###Code
# TODO: Add lookahead
def create_model():
with default_options(initial_state=0.1):
return Sequential([
Embedding(emb_dim),
BatchNormalization(),
Recurrence(LSTM(hidden_dim), go_backwards=False),
BatchNormalization(),
Dense(num_labels)
])
do_train()
do_test()
###Output
_____no_output_____
###Markdown
Task 3: Bidirectional Recurrent ModelAha, knowledge of future words help. So instead of a one-word lookahead,why not look ahead until all the way to the end of the sentence, through a backward recurrence?Let us create a bidirectional model!Your task is to implement a new layer thatperforms both a forward and a backward recursion over the data, andconcatenates the output vectors.Note, however, that this differs from the previous task in thatthe bidirectional layer contains learnable model parameters.In function-composition style,the pattern to implement a layer with model parameters is to write a *factory function*that creates a *function object*.A function object, also known as *functor*, is an object that is both a function and an object.Which means nothing else that it contains data yet still can be invoked as if it was a function.For example, `Dense(outDim)` is a factory function that returns a function object that containsa weight matrix `W`, a bias `b`, and another function to compute `input @ W + b`.E.g. saying `Dense(1024)` will create this function object, which can then be usedlike any other function, also immediately: `Dense(1024)(x)`. Confused? Let's take an example: Let us implement a new layer that combinesa linear layer with a subsequent batch normalization. To allow function composition, the layer needs to be realized as a factory function,which could look like this:```pythondef DenseLayerWithBN(dim): F = Dense(dim) G = BatchNormalization() x = Placeholder() apply_x = G(F(x)) return apply_x```Invoking this factory function will create `F`, `G`, `x`, and `apply_x`. In this example, `F` and `G` are function objects themselves, and `apply_x` is the function to be applied to the data.Thus, e.g. calling `DenseLayerWithBN(1024)` willcreate an object containing a linear-layer function object called `F`, a batch-normalization function object `G`,and `apply_x` which is the function that implements the actual operation of this layerusing `F` and `G`. It will then return `apply_x`. To the outside, `apply_x` looks and behaveslike a function. Under the hood, however, `apply_x` retains access to its specific instances of `F` and `G`.Now back to our task at hand. You will now need to create a factory function,very much like the example above.You shall create a factory functionthat creates two recurrent layer instances (one forward, one backward), and then defines an `apply_x` functionwhich applies both layer instances to the same `x` and concatenate the two results.Allright, give it a try! To know how to realize a backward recursion in CNTK,please take a hint from how the forward recursion is done.Please also do the following:* remove the one-word lookahead you added in the previous task, which we aim to replace; and* change the `hidden_dim` parameter from 300 to 150, to keep the total number of model parameters limited.
###Code
# TODO: Add bidirectional recurrence
def create_model():
with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day
return Sequential([
Embedding(emb_dim),
BatchNormalization(),
Recurrence(LSTM(hidden_dim), go_backwards=False),
BatchNormalization(),
Dense(num_labels)
])
do_train()
do_test()
###Output
_____no_output_____
###Markdown
Works like a charm! This model achieves 1.83%, a tiny bit better than the lookahead model above.The bidirectional model has 40% less parameters than the lookahead one. However, if you go back and look closelyat the complete log output (not shown on this web page), you may find that the lookahead one trainedabout 30% faster.This is because the lookahead model has both less horizontal dependencies (one instead of tworecurrences) and larger matrix products, and can thus achieve higher parallelism. Solution 1: Adding Batch Normalization
###Code
def create_model():
with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day
return Sequential([
Embedding(emb_dim),
BatchNormalization(),
Recurrence(LSTM(hidden_dim), go_backwards=False),
BatchNormalization(),
Dense(num_labels)
])
reader = create_reader(data_dir + "/atis.train.ctf", is_training=True)
model = create_model()
train(reader, model, max_epochs=8)
###Output
_____no_output_____
###Markdown
Solution 2: Add a Lookahead
###Code
def OneWordLookahead():
x = Placeholder()
apply_x = splice ([x, future_value(x)])
return apply_x
def create_model():
with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day
return Sequential([
Embedding(emb_dim),
OneWordLookahead(),
BatchNormalization(),
Recurrence(LSTM(hidden_dim), go_backwards=False),
BatchNormalization(),
Dense(num_labels)
])
reader = create_reader(data_dir + "/atis.train.ctf", is_training=True)
model = create_model()
train(reader, model, max_epochs=1)
###Output
_____no_output_____
###Markdown
Solution 3: Bidirectional Recurrent Model
###Code
def BiRecurrence(fwd, bwd):
F = Recurrence(fwd)
G = Recurrence(bwd, go_backwards=True)
x = Placeholder()
apply_x = splice ([F(x), G(x)])
return apply_x
def create_model():
with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day
return Sequential([
Embedding(emb_dim),
BatchNormalization(),
BiRecurrence(LSTM(hidden_dim), LSTM(hidden_dim)),
BatchNormalization(),
Dense(num_labels)
])
reader = create_reader(data_dir + "/atis.train.ctf", is_training=True)
model = create_model()
train(reader, model, max_epochs=8)
###Output
_____no_output_____ |
analysis/.ipynb_checkpoints/Milestone2Task3-5-checkpoint.ipynb | ###Markdown
Task3P1 Step 1:
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
"""
If blue team wins(1), then red team win will be 0. For such paired columns, we can combine them into one column; the value can be "blue"/"red"/"noteam"
If there is a tie (game duration will for sure smaller than 300s), the row is an outlier and will be dropped.
KDA=(kill+assist)/death; It is usually for a single player, but in this project it will be calculated and used from a team's aspect.
KDA is useful to combine and repersent the information from kill, assist and death. But when death=0, KDA should not be calculated(divided by 0).
To handle such situation, we can simplely drop such rows because death=0 happens when one team is overpoweful or someone disconnected during the game.
Such situation can be considered as outliers and should be dropped. Same thing can happend if game is too short (<16min (960s))
"""
"""
This chain will first read the dataset, rename the game duration column, and drop null values. After that, it will created two
columns to save KDA for both teams using blue's/red's kill, assist, and death columns. Next, it will drop overliers and combine the info for paired columns.
Finally, it will drop useless columns(including columns of no interest).
"""
lol1 = (pd.read_csv('../data/raw/Master_Ranked_Games.csv')
.rename(columns={'gameDuraton':'duration'})
.dropna()
)
lol2 = (lol1.assign(blueKDA=lambda x: (x['blueKills']+x['blueAssist'])/x['blueDeath'])
.assign(redKDA=lambda x: (x['redKills']+x['redAssist'])/x['redDeath'])
.drop(lol1[(lol1['blueDeath']==0)|(lol1['redDeath']==0)|(lol1['blueKills']==0)|(lol1['redKills']==0)|(lol1['duration']<960)].index) #drop outliers
.assign(teamFirstBlood = lambda x: np.where(x.blueFirstBlood==1,'blue','red'))
.assign(teamFirstTower = lambda x: np.where(x.blueFirstTower==1,'blue',(np.where(x.redFirstTower==1,'red','noTeam'))))
.assign(teamFirstBaron = lambda x: np.where(x.blueFirstBaron==1,'blue',(np.where(x.redFirstBaron==1,'red','noTeam'))))
.assign(teamFirstDragon = lambda x: np.where(x.blueFirstDragon==1,'blue',(np.where(x.redFirstDragon==1,'red','noTeam'))))
.assign(teamFirstInhibitor = lambda x: np.where(x.blueFirstInhibitor==1,'blue',(np.where(x.redFirstInhibitor==1,'red','noTeam'))))
.assign(teamWins = lambda x: np.where(x.blueWins==1,'blue','red'))
.drop(columns={'gameId','blueWins', 'blueFirstBlood', 'blueFirstTower', 'blueFirstBaron', 'blueFirstDragon', 'blueFirstInhibitor',
'redWins', 'redFirstBlood', 'redFirstTower', 'redFirstBaron', 'redFirstDragon', 'redFirstInhibitor', 'blueJungleMinionKills',
'redJungleMinionKills','blueKills', 'blueAssist', 'blueDeath', 'redKills', 'redAssist', 'redDeath',
'blueTotalLevel', 'redTotalLevel'}) #drop now useless or no interesting columns
.reset_index()
.drop(columns='index')
)
lol2
###Output
_____no_output_____
###Markdown
Step 2:
###Code
#If blue team wins(1), then red team win will be 0. For such paired columns, we can combine them into one column(eg. teamWins).
#0 for red team, 1 for blue team
"""
If blue team wins(1), then red team win will be 0. For such paired columns, we can combine them into one column; the value can be "blue"/"red"/"noteam"
If there is a tie (game duration will for sure smaller than 300s), the row is an outlier and will be dropped.
KDA=(kill+assist)/death; It is usually for a single player, but in this project it will be calculated and used from a team's aspect.
KDA is useful to combine and repersent the information from kill, assist and death. But when death=0, KDA should not be calculated(divided by 0).
To handle such situation, we can simplely drop such rows because death=0 happens when one team is overpoweful or someone disconnected during the game.
Such situation can be considered as outliers and should be dropped. Same thing can happend if game is too short (<16min (960s))
"""
def load_and_process(url_or_path_to_csv_file):
# Method Chain 1 (Load data and deal with missing data)
lol1 = (pd.read_csv('../data/raw/Master_Ranked_Games.csv')
.rename(columns={'gameDuraton':'duration'})
.dropna()
)
# Method Chain 2 (Create new columns, drop others, and do processing)
lol2 = (lol1.assign(blueKDA=lambda x: (x['blueKills']+x['blueAssist'])/x['blueDeath'])
.assign(redKDA=lambda x: (x['redKills']+x['redAssist'])/x['redDeath'])
.drop(lol1[(lol1['blueDeath']==0)|(lol1['redDeath']==0)|(lol1['blueKills']==0)|(lol1['redKills']==0)|(lol1['duration']<960)].index) #drop outliers
.assign(teamFirstBlood = lambda x: np.where(x.blueFirstBlood==1,'blue','red'))
.assign(teamFirstTower = lambda x: np.where(x.blueFirstTower==1,'blue',(np.where(x.redFirstTower==1,'red','noTeam'))))
.assign(teamFirstBaron = lambda x: np.where(x.blueFirstBaron==1,'blue',(np.where(x.redFirstBaron==1,'red','noTeam'))))
.assign(teamFirstDragon = lambda x: np.where(x.blueFirstDragon==1,'blue',(np.where(x.redFirstDragon==1,'red','noTeam'))))
.assign(teamFirstInhibitor = lambda x: np.where(x.blueFirstInhibitor==1,'blue',(np.where(x.redFirstInhibitor==1,'red','noTeam'))))
.assign(teamWins = lambda x: np.where(x.blueWins==1,'blue','red'))
.drop(columns={'gameId','blueWins', 'blueFirstBlood', 'blueFirstTower', 'blueFirstBaron', 'blueFirstDragon', 'blueFirstInhibitor',
'redWins', 'redFirstBlood', 'redFirstTower', 'redFirstBaron', 'redFirstDragon', 'redFirstInhibitor', 'blueJungleMinionKills',
'redJungleMinionKills','blueKills', 'blueAssist', 'blueDeath', 'redKills', 'redAssist', 'redDeath',
'blueTotalLevel', 'redTotalLevel'}) #drop now useless or no interesting columns
.reset_index()
.drop(columns='index')
)
return lol2
load_and_process('../data/raw/Master_Ranked_Games.csv')
###Output
_____no_output_____
###Markdown
Task3P2_Hexuan
###Code
from scripts import project_functions
df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv')
df
###Output
_____no_output_____
###Markdown
M2 Task4(EDA):
###Code
# import pandas as pd
# import numpy as np
# import matplotlib.pyplot as plt
# import seaborn as sns
# from scripts import project_functions
# df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv')
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 90498 entries, 0 to 90497
Data columns (total 35 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 duration 90498 non-null int64
1 blueDragonKills 90498 non-null int64
2 blueBaronKills 90498 non-null int64
3 blueTowerKills 90498 non-null int64
4 blueInhibitorKills 90498 non-null int64
5 blueWardPlaced 90498 non-null int64
6 blueWardkills 90498 non-null int64
7 blueChampionDamageDealt 90498 non-null int64
8 blueTotalGold 90498 non-null int64
9 blueTotalMinionKills 90498 non-null int64
10 blueAvgLevel 90498 non-null float64
11 blueKillingSpree 90498 non-null int64
12 blueTotalHeal 90498 non-null int64
13 blueObjectDamageDealt 90498 non-null int64
14 redDragonKills 90498 non-null int64
15 redBaronKills 90498 non-null int64
16 redTowerKills 90498 non-null int64
17 redInhibitorKills 90498 non-null int64
18 redWardPlaced 90498 non-null int64
19 redWardkills 90498 non-null int64
20 redChampionDamageDealt 90498 non-null int64
21 redTotalGold 90498 non-null int64
22 redTotalMinionKills 90498 non-null int64
23 redAvgLevel 90498 non-null float64
24 redKillingSpree 90498 non-null int64
25 redTotalHeal 90498 non-null int64
26 redObjectDamageDealt 90498 non-null int64
27 blueKDA 90498 non-null float64
28 redKDA 90498 non-null float64
29 teamFirstBlood 90498 non-null object
30 teamFirstTower 90498 non-null object
31 teamFirstBaron 90498 non-null object
32 teamFirstDragon 90498 non-null object
33 teamFirstInhibitor 90498 non-null object
34 teamWins 90498 non-null object
dtypes: float64(4), int64(25), object(6)
memory usage: 24.2+ MB
###Markdown
From the info, we can tell that all columns are numeric values.code: "df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv')" has already drop null, drop useless columns(including columns of no interest), and created two new columns for KDA, which is a better repersentation(only need 1 column) of kills,assists, and deaths.
###Code
df.columns
###Output
_____no_output_____
###Markdown
Columns explainationduration: length of the game in secondsblueDragonKills: of drangons killed by blue teamblueBaronKills: of Barons killed by blue teamblueTowerKills: of Towers killed by blue teamblueInhibitorKills: of Inhibitors killed by blue teamblueWardPlaced: of wards placed by blue teamblueWardkills: of wards killed by blue teamblueChampionDamageDealt: Amount of damage caused by blue team to the opponent's team's championsblueTotalGold: Amount of gold gained by blue teamblueTotalMinionKills: of minions killed by the blue teamblueAvgLevel: Average level of the blue team when game endsblueKillingSpree: of times blue team get a killingSpree recordblueTotalHeal: Amount of heal caused by blue teamblueObjectDamageDealt: Amount of damage caused by blue team to objectsblueKDA: blue team's (kill+assist)/death ratio(above are the same to red version)teamFirstBlood: team that kills a champion first in the gameteamFirstTower: team that kills a tower first in the gameteamFirstBaron: team that kills a Baron first in the gameteamFirstDragon: team that kills a dragon first in the gameteamFirstInhibitor: team that kills an Inhibitor first in the gameteamWins: team that wins the game
###Code
df.head()
df.describe().T
###Output
_____no_output_____
###Markdown
We double make sure that there is no outlier now, and have a better view of the dataset.
###Code
plt.figure(figsize=(10,6))
sns.countplot(y=df['teamWins']).set_title('Count of team wins for blue and red side')
plt.ylabel('team wins')
###Output
_____no_output_____
###Markdown
Without other information, there is no much difference between the number of wins for two team. We can tell that game is fair for both side.
###Code
hisPlot1 = sns.displot(df, x='duration', aspect=2, palette='pastel')
hisPlot1
###Output
_____no_output_____
###Markdown
If a game's duration is more than about 1200s, the distribution looks similar to a normal distribution, but need further prove. But many games also end early(some of them are dropped as overliers, but we can still tell it from this plot).
###Code
sns.displot(df, x="duration", hue="teamWins", multiple="dodge", aspect=3)
###Output
_____no_output_____
###Markdown
For any duration of the game, both teams seem to have about the same number of winnings.
###Code
sns.countplot(y=df['teamFirstBaron']).set_title('Count of team that takes the first Baron')
plt.ylabel('team kills first Baron')
###Output
_____no_output_____
###Markdown
Many games end without a Baron get killed. We need to keep this in mind when we are doing further analysis.
###Code
## M2 Task5:
###Output
_____no_output_____
###Markdown
From EDA, we can tell that the game is fair; both sides have about the same chance to win the game at start. RQ1: Does the team takes the first blood has more chance to win? RQ2: Does the team takes the first Baron has more chance to win?(Keep in mind that many games end without first Baron) RQ3: Does the team takes the first dragon has more chance to win? RQ4: What is relationship between team KDA and chance to win? RQ5: What is relationship between two team's total champion damage difference and chance to win? RQ6: What is relationship between two team's total heal difference and chance to win?
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scripts import project_functions
df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv')
df.head()
df1 = df.assign(temp1= lambda x: np.where(x.teamFirstBlood==x.teamWins,True,False))
sns.countplot(data=df1, y='temp1').set_title('Team with first blood wins')
plt.ylabel('')
###Output
_____no_output_____
###Markdown
Team with the first blood do have a clear more times to win the game. But the chance doesn't differ **very** much.
###Code
df2 = df.assign(temp2= lambda x: np.where((x.teamFirstBaron!='noTeam')&(x.teamFirstBaron==x.teamWins),
'True',np.where((x.teamFirstBaron!='noTeam'),'False','noTeam')))
sns.countplot(data=df2, y='temp2').set_title('Team with first Baron wins')
plt.ylabel('')
###Output
_____no_output_____
###Markdown
According to the plot, if a team takes the first Baron, it is very likely to win the game. It may either because the 'better' team is easier to get first Baron or the first Baron gives that team very strong buff. But the relationship exists anyway.
###Code
df3 = df.assign(temp3= lambda x: np.where((x.teamFirstDragon!='noTeam')&(x.teamFirstDragon==x.teamWins),
'True',np.where((x.teamFirstDragon!='noTeam'),'False','noTeam')))
sns.countplot(data=df3, y='temp3').set_title('Team with first dragon wins')
plt.ylabel('')
###Output
_____no_output_____
###Markdown
According to the plot, if a team takes the first dragon, it is also more likely to win the game, but the chance difference is smaller than the "first Baron's." This one's is about 5.2/3 where first Baron is about 4.4/0.9 and first blood is about 5.2/3.5.
###Code
sns.displot(df, x="blueKDA", y="teamWins",aspect=3)
sns.displot(df, x="redKDA", y="teamWins",aspect=3)
###Output
_____no_output_____
###Markdown
According to the plot, as a team's KDA goes higher, that team is more likely to win. But, there are times that a team losses with high KDA(In plots, there are lines for the opposite team when the team KDA is high). Generally speaking, looking at two darkest area, those teams who win the game usually have a KDA larger than 2, ortherwise, teams are more likely to lose.
###Code
#We are using blue's-red's
df5 = df.assign(dmgD= df['blueChampionDamageDealt']-df['redChampionDamageDealt'])
sns.displot(df5, x="dmgD", y="teamWins",aspect=3)
###Output
_____no_output_____
###Markdown
According to the plot, the team who wins usually has higher damage than the other team(dark area). But there are also many cases that a team wins without higher damage.
###Code
#We are using blue's-red's
df6 = df.assign(healD= df['blueTotalHeal']-df['redTotalHeal'])
sns.displot(df6, x="healD", y="teamWins",aspect=4)
###Output
_____no_output_____ |
notebooks/part-i-data-exploration.ipynb | ###Markdown
Welcome to introduction to dashboards with Plotly and Dash------------------------------------------------------------------------------------------------------------------------------- Workshop facilitators: Laura Gutierrez Funderburk, Hanh Tong About this workshopIn this workshop we will explore some characteristics of the housing market in Canada. It is important to note that this workshop assumes:1. Data cleaning and exploration was completed prior to developing the dashboard2. Some comfort with `pandas` and visualization is assumed3. Comfort navigating the Jupyter environment is needed Workshop schedule:------------------------------------------------------------------------------------------------------------------------------- 1. Part I: Data explorationIn this section, we will first spend time getting familiar with the data. We will use the `pandas` and `plotly` libraries, we will also explore the `DEX` feature within Noteable to ease getting a good sense for what the data contains.In this section, we will also explore the notion of factoring code into functions, and the notion of writing a Python script that we can use to easily recreate our results. 2. Part II: Dashboard componentsIn this section, we will take what we built together in part I and explore the main components in a Dash dashboard. Part I: Data exploration
###Code
import pandas as pd
import plotly.express as px
# Read data
url = 'https://raw.githubusercontent.com/Vancouver-Datajam/dashboard-workshop-dash/main/data/delinquency_mortgage_population_2021_2020.csv'
data_pop_del_mort_df = pd.read_csv(url, index_col=0)
data_pop_del_mort_df.head(10)
###Output
_____no_output_____
###Markdown
Exercise: Get familiar with the table-------------------------------------------------------------------------------------------------------------------------------Run the cell below. Questionsa) What are relevant variables in the data?b)What is the extent (range), mean and median of columns `DelinquencyRate`, `AverageMortgageAmount` and `PopulationSize`?c) What is the time range and frequency of the data?
###Code
data_pop_del_mort_df.info()
data_pop_del_mort_df.describe()
###Output
_____no_output_____
###Markdown
Using Python and Plotly to generate interactive plots-------------------------------------------------------------------------------------------------------------------------------In this section we are going to write a few commands to get started with visualizations.
###Code
# First attempt
px.line(data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate")
###Output
_____no_output_____
###Markdown
The plot above is quite difficult to read. Let's colour the values by Geography, and add a title.
###Code
# Second attempt
px.line(data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate",
color="Geography",
title = "Chart: line plot of Time and DelinquencyRate by Geography")
###Output
_____no_output_____
###Markdown
Exercise: Let's take a look at the average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount. Change the code to visualize changes in population size.
###Code
variable =
px.line(data_pop_del_mort_df,
x = "Time",
y=variable,
color="Geography",
title = f"Chart: line plot of Time and {variable} by Geography")
###Output
_____no_output_____
###Markdown
Let's take a look at their distribution by using a box plot.
###Code
px.box(data_pop_del_mort_df,
x = 'Geography',
y = 'DelinquencyRate',
color = 'Geography',
title = 'Chart: box plot of Delinquency rate by Geoography.')
###Output
_____no_output_____
###Markdown
Exercise: Let's take a look at distribution of average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount and population size.
###Code
variable =
px.box(data_pop_del_mort_df,
x = 'Geography',
y = variable,
color = 'Geography',
title = f'Chart: box plot of {variable} by Geoography.')
###Output
_____no_output_____
###Markdown
Let's work on a scatter plot to see if there is a relationship between average mortgage amount and delinquency.
###Code
px.scatter(data_frame=data_pop_del_mort_df,
y = "AverageMortgageAmount",
x = "DelinquencyRate",
title="Average mortgage rate to delinquency rate")
###Output
_____no_output_____
###Markdown
Exercise: modify the code above to colour the dots by Geography, add hover name with Time
###Code
px.scatter(data_frame=data_pop_del_mort_df,
y = "AverageMortgageAmount",
x = "DelinquencyRate",
title="Average mortgage rate to delinquency rate",
color=,
hover_name=)
###Output
_____no_output_____
###Markdown
Using dictionaries to access different kind of functions-------------------------------------------------------------------------------------------------------------------------------We need to do quite a bit of work refactoring our code in preparation for our dashboard.We will use dictionaries to access different plotting functions.Recall, a dictionary is a data structure with `keys` and `values`. The syntax of a dictionary is as follows: dictionary = { key1 : value1, key2 : value2, key3 : value3} Where keys are typically a string, and values can be a data structure such as a string, list, set, tuple, or a function.
###Code
sample_dictionary = {"list_numbers" : [1, 2, 3, 4, 5],
"set_numbers": set([1, 2, 3, 4, 5]),
"tuple_numbers": tuple([1, 2, 3, 4, 5]),
"function_sum": sum}
###Output
_____no_output_____
###Markdown
To access the values within a dictionary, we use the following notation dictionary[key] For example
###Code
sample_dictionary['list_numbers']
sample_dictionary['set_numbers']
sample_dictionary['tuple_numbers']
sample_dictionary['function_sum']
###Output
_____no_output_____
###Markdown
To use the function `sum`, simply pass a list of numbers you want to add.
###Code
sum([1,2,3])
###Output
_____no_output_____
###Markdown
We can obtain the same result with our dictionary as follows:
###Code
sample_dictionary['function_sum']([1,2,3])
###Output
_____no_output_____
###Markdown
We can use the following dictionary to generate different kinds of plots.
###Code
# Dictionary
plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line}
###Output
_____no_output_____
###Markdown
We can then use the dictionary to try different kinds of plots.
###Code
plot_dict['scatter'](data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate",
color="Geography",
title = "Chart: line plot of Time and DelinquencyRate by Geography")
###Output
_____no_output_____
###Markdown
Exercise 1: change the key `scatter` for `line` , `box` and `violin` and run the cell Exercise 2: change the `x` variable to be one of `Geography` or `Time` Exercise 3: Change the `y` variable to be one of `PopulationSize`, `DelinquencyRate` or `AverageMortgageAmount`
###Code
plot_dict['scatter'](data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate",
color="Geography",
title = "Playing with several kinds of charts")
###Output
_____no_output_____
###Markdown
Refactoring code into functions-------------------------------------------------------------------------------------------------------------------------------In the next section we will refactor our code to ease reproducibility and also to ensure our Dash app is cleaner. We can then put our function dictionary into a Python function.
###Code
def graph_region(region_df, graph_type: str, dimension1: str, dimension2: str):
"""
Parameters
----------
region_df: (dataframe object) reshaped data frame object with mortage, delinquency and population data
graph_type: (string) "box", "violin", "scatter", "line"
dimension1: (str) one of 'Time' or 'Geography'
dimension2: (str) one of 'AverageMortgageAmount', 'AverageMortgageAmount' or 'PopulationSize'
Returns:
--------
Plotly figure
"""
plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line}
try:
# Initialize function
fig = plot_dict[graph_type](region_df,
x=dimension1,
y=dimension2,
color = "Geography",
hover_name = "Time")
# Format figure
title_string = f'Chart: {graph_type} plot of {dimension1} and {dimension2} by Geography'
fig.update_layout(title = title_string)
fig.update_xaxes(tickangle=-45)
return fig
except KeyError:
print("Key not found. Make sure that 'graph_type' is in ['box','violin', 'scatter', 'line']")
except ValueError:
print("Dimension is not valid. dimension1 is one of 'Time' or 'Geography'")
print("dimension2 is one of 'AverageMortgageAmount', 'DelinquencyRate', 'PopulationSize'")
graph_region(data_pop_del_mort_df, 'line', "Time", "AverageMortgageAmount")
graph_region(data_pop_del_mort_df, 'box', "Geography", "PopulationSize")
graph_region(data_pop_del_mort_df, 'scatter', "AverageMortgageAmount", "DelinquencyRate")
###Output
_____no_output_____
###Markdown
Bonus, incorporating time series plots
###Code
# Optional to have regions
fig = px.scatter(data_frame=data_pop_del_mort_df,
y = "AverageMortgageAmount",
x = "DelinquencyRate",
size= "PopulationSize",
color= "Geography",
animation_frame="Time",
animation_group="Geography",
title = "Delinquency rate vs average mortgage over time"
)
fig.update_layout(yaxis_range=[100000,500000])
fig.update_layout(xaxis_range=[0,1])
fig.show()
###Output
_____no_output_____
###Markdown
Welcome to introduction to dashboards with Plotly and Dash------------------------------------------------------------------------------------------------------------------------------- Workshop facilitators: Laura Gutierrez Funderburk, Hanh Tong About this workshopIn this workshop we will explore some characteristics of the housing market in Canada. It is important to note that this workshop assumes:1. Data cleaning and exploration was completed prior to developing the dashboard2. Some comfort with `pandas` and visualization is assumed3. Comfort navigating the Jupyter environment is needed Workshop schedule:------------------------------------------------------------------------------------------------------------------------------- 1. Part I: Data explorationIn this section, we will first spend time getting familiar with the data. We will use the `pandas` and `plotly` libraries, we will also explore the `DEX` feature within Noteable to ease getting a good sense for what the data contains.In this section, we will also explore the notion of factoring code into functions, and the notion of writing a Python script that we can use to easily recreate our results. 2. Part II: Dashboard componentsIn this section, we will take what we built together in part I and explore the main components in a Dash dashboard. Part I: Data exploration
###Code
import pandas as pd
import plotly.express as px
# Read data
url = 'https://raw.githubusercontent.com/Vancouver-Datajam/dashboard-workshop-dash/main/data/delinquency_mortgage_population_2021_2020.csv'
data_pop_del_mort_df = pd.read_csv(url, index_col=0)
data_pop_del_mort_df.head(10)
###Output
_____no_output_____
###Markdown
Exercise: Get familiar with the table-------------------------------------------------------------------------------------------------------------------------------Run the cell below. Questionsa) What are relevant variables in the data?b)What is the extent (range), mean and median of columns `DelinquencyRate`, `AverageMortgageAmount` and `PopulationSize`?c) What is the time range and frequency of the data?
###Code
data_pop_del_mort_df.info()
data_pop_del_mort_df.describe()
###Output
_____no_output_____
###Markdown
Using Python and Plotly to generate interactive plots-------------------------------------------------------------------------------------------------------------------------------In this section we are going to write a few commands to get started with visualizations.
###Code
# First attempt
px.line(data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate")
###Output
_____no_output_____
###Markdown
The plot above is quite difficult to read. Let's colour the values by Geography, and add a title.
###Code
# Second attempt
px.line(data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate",
color="Geography",
title = "Chart: line plot of Time and DelinquencyRate by Geography")
###Output
_____no_output_____
###Markdown
Exercise: Let's take a look at the average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount. Change the code to visualize changes in population size.
###Code
variable =
px.line(data_pop_del_mort_df,
x = "Time",
y=variable,
color="Geography",
title = f"Chart: line plot of Time and {variable} by Geography")
###Output
_____no_output_____
###Markdown
Let's take a look at their distribution by using a box plot.
###Code
px.box(data_pop_del_mort_df,
x = 'Geography',
y = 'DelinquencyRate',
color = 'Geography',
title = 'Chart: box plot of Delinquency rate by Geoography.')
###Output
_____no_output_____
###Markdown
Exercise: Let's take a look at distribution of average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount and population size.
###Code
variable =
px.box(data_pop_del_mort_df,
x = 'Geography',
y = variable,
color = 'Geography',
title = f'Chart: box plot of {variable} by Geoography.')
###Output
_____no_output_____
###Markdown
Let's work on a scatter plot to see if there is a relationship between average mortgage amount and delinquency.
###Code
px.scatter(data_frame=data_pop_del_mort_df,
y = "AverageMortgageAmount",
x = "DelinquencyRate",
title="Average mortgage rate to delinquency rate")
###Output
_____no_output_____
###Markdown
Exercise: modify the code above to colour the dots by Geography, add hover name with Time
###Code
px.scatter(data_frame=data_pop_del_mort_df,
y = "AverageMortgageAmount",
x = "DelinquencyRate",
title="Average mortgage rate to delinquency rate",
color=,
hover_name=)
###Output
_____no_output_____
###Markdown
Using dictionaries to access different kind of functions-------------------------------------------------------------------------------------------------------------------------------We need to do quite a bit of work refactoring our code in preparation for our dashboard.We will use dictionaries to access different plotting functions.Recall, a dictionary is a data structure with `keys` and `values`. The syntax of a dictionary is as follows: dictionary = { key1 : value1, key2 : value2, key3 : value3} Where keys are typically a string, and values can be a data structure such as a string, list, set, tuple, or a function.
###Code
sample_dictionary = {"list_numbers" : [1, 2, 3, 4, 5],
"set_numbers": set([1, 2, 3, 4, 5]),
"tuple_numbers": tuple([1, 2, 3, 4, 5]),
"function_sum": sum}
###Output
_____no_output_____
###Markdown
To access the values within a dictionary, we use the following notation dictionary[key] For example
###Code
sample_dictionary['list_numbers']
sample_dictionary['set_numbers']
sample_dictionary['tuple_numbers']
sample_dictionary['function_sum']
###Output
_____no_output_____
###Markdown
To use the function `sum`, simply pass a list of numbers you want to add.
###Code
sum([1,2,3])
###Output
_____no_output_____
###Markdown
We can obtain the same result with our dictionary as follows:
###Code
sample_dictionary['function_sum']([1,2,3])
###Output
_____no_output_____
###Markdown
We can use the following dictionary to generate different kinds of plots.
###Code
# Dictionary
plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line}
###Output
_____no_output_____
###Markdown
We can then use the dictionary to try different kinds of plots.
###Code
plot_dict['scatter'](data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate",
color="Geography",
title = "Chart: line plot of Time and DelinquencyRate by Geography")
###Output
_____no_output_____
###Markdown
Exercise 1: change the key `scatter` for `line` , `box` and `violin` and run the cell Exercise 2: change the `x` variable to be one of `Geography` or `Time` Exercise 3: Change the `y` variable to be one of `PopulationSize`, `DelinquencyRate` or `AverageMortgageAmount`
###Code
plot_dict['scatter'](data_pop_del_mort_df,
x = "Time",
y="DelinquencyRate",
color="Geography",
title = "Playing with several kinds of charts")
###Output
_____no_output_____
###Markdown
Refactoring code into functions-------------------------------------------------------------------------------------------------------------------------------In the next section we will refactor our code to ease reproducibility and also to ensure our Dash app is cleaner. We can then put our function dictionary into a Python function.
###Code
def graph_region(region_df, graph_type: str, dimension1: str, dimension2: str):
"""
Parameters
----------
region_df: (dataframe object) reshaped data frame object with mortage, delinquency and population data
graph_type: (string) "box", "violin", "scatter", "line"
dimension1: (str) one of 'Time' or 'Geography'
dimension2: (str) one of 'AverageMortgageAmount', 'AverageMortgageAmount' or 'PopulationSize'
Returns:
--------
Plotly figure
"""
plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line}
try:
# Initialize function
fig = plot_dict[graph_type](region_df,
x=dimension1,
y=dimension2,
color = "Geography",
hover_name = "Time")
# Format figure
title_string = f'Chart: {graph_type} plot of {dimension1} and {dimension2} by Geography'
fig.update_layout(title = title_string)
fig.update_xaxes(tickangle=-45)
return fig
except KeyError:
print("Key not found. Make sure that 'graph_type' is in ['box','violin', 'scatter', 'line']")
except ValueError:
print("Dimension is not valid. dimension1 is one of 'Time' or 'Geography'")
print("dimension2 is one of 'AverageMortgageAmount', 'DelinquencyRate', 'PopulationSize'")
graph_region(data_pop_del_mort_df, 'line', "Time", "AverageMortgageAmount")
graph_region(data_pop_del_mort_df, 'box', "Geography", "PopulationSize")
graph_region(data_pop_del_mort_df, 'scatter', "AverageMortgageAmount", "DelinquencyRate")
###Output
_____no_output_____
###Markdown
Bonus, incorporating time series plots
###Code
# Optional to have regions
fig = px.scatter(data_frame=data_pop_del_mort_df,
y = "AverageMortgageAmount",
x = "DelinquencyRate",
size= "PopulationSize",
color= "Geography",
animation_frame="Time",
animation_group="Geography",
title = "Delinquency rate vs average mortgage over time"
)
fig.update_layout(yaxis_range=[100000,500000])
fig.update_layout(xaxis_range=[0,1])
fig.show()
###Output
_____no_output_____ |
notebooks/01_ensemble.ipynb | ###Markdown
Dev comments Project definitions:- Set a method called dfit or overwrite fit method in order to have sklearn pipeline support?- Delegates for an instance of estimator inside class or increment classes through factories (better support in sklearn pipelines)? - [X] TODO: Solve Tree Predict mistery- [ ] TODO: make meta bagged randomized prior class- [X] TODO: make a multivariate joint dist estimator for AdaBoostingRegressor- [X] TODO: make boosting and baging tree estimator classes- [X] TODO: implement bagging and boosting meta estimator (include sampling from sub models)- [X] TODO: include target scaling in pipeline- [ ] TODO: Create DensityEstimator Base Class containing sample and density methods (density calls sample and returns RV CLASS)- [ ] TODO: Mean, Variance and Entropy explainer tool (LIME, SHAP, foressts...)- [X] TODO: Decide wether to allow multi output (doesnt model well multivariate bimodal joint probabilities, cheeky to sample (two different random samples))- [X] Entropy based regression with random forest embeddings + entropy of targets in each node- [X] enhance multioutput estimator class- [X] make possible for user defined sample weights for y_ in self.sample(allow time exponential decay sampling, for example) WE MAY GET THE DISTRIBUTION FOR FREE WITH self.proba_preds- [X] Make EntropyEstimator class to handle any estimator turning regression problem into a classification one- [X] fix resolution to any number of bins- [X] Make EntropyEstimator Ensemble (Bagging and Boosting) - Came for free with BaggingClassifier (predict_proba)- [ ] Make _ChainedJointEstimator, define joint estimation strategy, maybe using mu- [X] Update EntropyEstimator sampling method encompassing KDE sampling- [ ] Incorporate _ChainedJointEstimator on MultiOutputEntropyEstimator- [ ] Reorganize 01_ensemble in 01_entropy_estimator, 02_joint_estimator and 03_ensemble_estimator- [X] Change name of MultiOutputEtimator to JointOutputEstimtor- [ ] Make cov_add_noise in estimators (not needed for kde since data is whitened with PCA)- [ ] Make chained/stacked estimator (good for time series) (creates a new feature-as the predicted bin for each estim) make parallel (with kernel tree) and sequential estimators (timeseries vs joint)- [ ] Make quantile calibration of probability densisties (to make it uniform)- [X] Make Probability calibrations for entropy estimation Imports -
###Code
#export
from warnings import warn
from functools import partial
import copy
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
from sklearn import ensemble
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.preprocessing import OneHotEncoder, normalize, QuantileTransformer, FunctionTransformer, MinMaxScaler
from sklearn.calibration import CalibratedClassifierCV
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
from sklearn.utils.fixes import _joblib_parallel_args
from sklearn.metrics import pairwise
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import Pipeline
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.neighbors import NearestNeighbors
from numpy.linalg import LinAlgError
from scipy.spatial.distance import cdist
import scipy
from joblib import Parallel, delayed
from skdensity.utils import (cos_sim_query, sample_multi_dim, ctqdm, add_noise,sample_from_dist_array,
DelegateEstimatorMixIn, _fix_X_1d, _fix_one_dist_1d, _fix_one_dist_2d,
_add_n_dists_axis,_add_n_samples_axis,_add_n_dims_axis,sample_idxs, make_batches
)
from skdensity.metrics import kde_entropy, quantile, marginal_variance, bimodal_variance, kde_likelihood, kde_quantile, agg_smallest_distance, cdf
from skdensity.core.random_variable import KDE, RandomVariable, RVArray
###Output
_____no_output_____
###Markdown
Ensemble density estimators -Density estimators based on ensemble methods, such as baging, boosting and some decision tree algorithms. All 'classes' are actually factories that dinamically extends the funcitonality of the original sklearn class with methods such as sample. Example data
###Code
import seaborn as sns
from sklearn.datasets import make_regression
from matplotlib import cm
def sigmoid(x):
return 1/(1+np.exp(x))
X,y = make_regression(
n_samples=100000,
n_features=15,
n_informative=6,
n_targets=2,
bias=500,
effective_rank=None,
tail_strength=10,
noise=150,
shuffle=True,
coef=False,
random_state=None
)
#make one of X[1] feature mode weightening
bimodal_factor_wieght = 2
bimodal_factors = (sigmoid(bimodal_factor_wieght*X[:,-1]) > np.random.random(size = X.shape[0])).astype(int)
bimodal_factors[bimodal_factors == 0] = -1
bimodal_factors = bimodal_factors.reshape(-1,1)
y = bimodal_factors*y
colors = cm.get_cmap('binary')(256*(sigmoid(bimodal_factor_wieght*X[:,-1]) > np.random.random(size = X.shape[0])).astype(int))
sns.jointplot(y[:,0],y[:,1], joint_kws = {'color': colors}, alpha = 0.01)
sns.jointplot(X[:,-1], y[:,1], alpha = 0.1)
X_train, X_test = X[:int(0.8*len(X))], X[int(0.8*len(X)):]
y_train, y_test = y[:int(0.8*len(X))], y[int(0.8*len(X)):]
###Output
_____no_output_____
###Markdown
QuantileCalibrator> A class to calibrates samples in order to bring the quantile distribution closer to a uniform distribution
###Code
#export
class QuantileCalibrator(BaseEstimator):
def __init__(self, estimator, bins = 100):
self.estimator = estimator
self.bins = bins
return
def fit(self, X, y, **sampling_kws):
'''
X are the samples from the trainning set
y is the true value, alligned with its respective distribution (X)
'''
samples = self.estimator.sample(X, **sampling_kws)
q_x = quantile(y, samples)
q_dist, _ = np.histogram(q_x, bins=self.bins, range=[0,1], weights=None, density=False)
#fill zeros with 1 to avoid problems in division
q_dist = np.where(q_dist == 0, 1, q_dist)
self.q_dist = q_dist
return self
def _make_resampling_weights(self, X):
'''
define new sampling wieghts for each set of samples
'''
cdfs = cdf(X)
cdfs = cdfs[:,:,0] #works only for 1d dists
weights = []
for dist in cdfs:
dg = np.digitize(dist, bins=np.linspace(0,1,self.bins-1, endpoint = False))
weights.append(self.q_dist[dg])
#create weights as the bin count and then normalize
weights = normalize(np.array(weights), norm = 'l1')
return weights
def sample(self, X, sample_size = 1000, weight_func = None,
alpha = None, replace = True, noise_factor = 0, **sampling_kws):
'''
resamples values in each dist taking into account quantile calibration factor learned from
training set
'''
X = self.estimator.sample(X, sample_size = 1000, weight_func = None,
alpha = None, replace = True, noise_factor = 0, **sampling_kws)
p = self._make_resampling_weights(X)
samples = sample_from_dist_array(X, sample_size, weights = p)
noise = agg_smallest_distance(samples, agg_func = np.std)
noise = _add_n_dims_axis(noise)
return add_noise(samples, noise_factor*noise)
###Output
_____no_output_____
###Markdown
HistogramEstimator> An estimator that performas a classification on a discretized transformation of a continuous space using QuantileTransformer, predicts a probability distribution using proba_preds and maps back to continuous domain.Base estimator can be any estimator that performs predict_proba method.
###Code
#export
#TESTE
def identity_func(x):
return x
IDENTITY_TRANSFORMER = FunctionTransformer(
func = identity_func,
inverse_func = identity_func,
validate=False,
accept_sparse=True,
check_inverse=True,
kw_args=None,
inv_kw_args=None,
)
class HistogramEstimator(BaseEstimator, ClassifierMixin, DelegateEstimatorMixIn):
'''
Meanwhile only performs marginal density estiamtion, not joint. Thus, only 1dimensional y.
For joint, should try something using RegressionChain (to pass dimension information to the prediction of other dims)
'''
def __init__(self,estimator, resolution = 'auto' ,alpha = 1, calibrated_classifier = None, calibration_cv = 4,rv_bins_kws = {}):
'''
resolution can be int (number of bins of uniform quantile transformation) or hist array
'''
self.cumulative_target = False #used only in ClassificationKernelEstimator thorugh inheritance
assert hasattr(estimator, 'predict_proba') or ('predict_proba' in dir(estimator)), 'estimator should implement `predict_proba` method'
self.estimator = estimator
self.alpha = alpha
assert isinstance(resolution, (np.ndarray, int, str)), f'resolution should be Array of bin edges, str or int, got {resolution.__class__}'
self.resolution = resolution
self.rv_bins_kws = rv_bins_kws
self.calibration_cv = calibration_cv
if calibrated_classifier == 'default':
self.calibrated_classifier = CalibratedClassifierCV(base_estimator=self.estimator, method='isotonic', cv = calibration_cv, ensemble = False)
elif calibrated_classifier is None:
self.calibrated_classifier = None
else:
assert hasattr(calibrated_classifier, 'predict_proba') or ('predict_proba' in dir(calibrated_classifier)), f'calibrated_classifier should implement `predict_proba method`'
assert not isinstance(calibrated_classifier, type), f'calibrated_classifier should be an instance, not type'
self.calibrated_classifier = calibrated_classifier
return
def _q_transformer_fit(self, y):
'''
fits self.q_transformer
'''
y = _fix_X_1d(y)
if type(self.resolution) == str:
self.bin_edges = np.histogram_bin_edges(y, bins = self.resolution)
print(f'base classifier will be trained with {len(self.bin_edges)} classes')
return self.bin_edges
elif type(self.resolution) == np.ndarray:
self.bin_edges = self.resolution
elif type(self.resolution) == int:
self.q_transformer = QuantileTransformer(n_quantiles = self.resolution)
self._q_minmax_scaler = MinMaxScaler()
y = self.q_transformer.fit_transform(y)
#for case when output_distribution != uniform
self._q_minmax_scaler.fit(y)
return self.q_transformer
elif isinstance(self.resolution, list):
return self.resolution
else: raise TypeError(f'self.resolution should be np.array of bin edges, str or int, got {self.resolution.__class__}')
def _q_transformer_transform(self, y, cumulative = False):
'''
maps floats to int (bin_id in histogram)
'''
y = _fix_X_1d(y)
if self.cumulative_target:
if type(self.resolution) in (str, np.ndarray):
hist_bins = np.digitize(y, self.bin_edges)
max_bin = len(self.bin_edges)
elif type(self.resolution) == int:
hist_bins = self.q_transformer.transform(y)
#scale between 0 and 1
hist_bins = self._q_minmax_scaler.transform(hist_bins)
hist_bins = np.around(hist_bins*(self.resolution - 1), decimals = 0).astype(int)
max_bin = self.resolution
elif isinstance(self.resolution,np.ndarray):
hist_bins = np.digitize(y, self.resolution)
max_bin = self.resolution
y_transformed = np.zeros((y.shape[0],max_bin), dtype = 'int8')
for i in range(len(y_transformed)):
bin_idx = int(hist_bins[i])
y_transformed[i, :bin_idx] = 1
y_transformed = y_transformed[:,:-1]
else:
if type(self.resolution) in (str, np.ndarray):
y_transformed = np.digitize(y, self.bin_edges)
elif type(self.resolution) == int:
y_transformed = self.q_transformer.transform(y)
#scale between 0 and 1
y_transformed = self._q_minmax_scaler.transform(y_transformed)
y_transformed = np.around(y_transformed*(self.resolution - 1), decimals = 0).astype(int)
elif isinstance(self.resolution,np.ndarray):
y_transformed = np.digitize(y, self.resolution)
y_transformed = y_transformed.flatten()
return y_transformed
def _q_transformer_inverse_transform(self,y):
'''
maps from bin_id in histogram (int) to float.
beware that during transform, information is lost due to downsampling, so inverse_transform will
not be an exact inverse_transform.
'''
y = _fix_X_1d(y)
if type(self.resolution) == int:
y_transformed = (y/(self.resolution - 1)).astype(float)
y_transformed = self._q_minmax_scaler.inverse_transform(y_transformed)
return self.q_transformer.inverse_transform(y_transformed).flatten() #1d asserted already
else: raise NotImplementedError('inverse transform only implemented for case when self.resolution == int')
def _preprocess_y_fit(self, y):
#set y_dim
if len(y.shape) == 1:
self.y_dim = 1
elif len(y.shape) == 2:
# assert 1d
assert y.shape[-1] == 1, 'y should be 1d. For joint estimation use KernelTreeHistogramEstimator or joint estimators'
self.y_dim = y.shape[-1]
else:
raise AssertionError('y should be 1d vector or 2d column array (n_samples,1)')
#reshape when y.dim == 1 and array dim equals 2
if self.y_dim == 1:
y = y.reshape(y.shape[0])
self._q_transformer_fit(y)
return self
def _preprocess_y_transform(self, y):
#set y_dim
if len(y.shape) == 1:
self.y_dim = 1
elif len(y.shape) == 2:
# assert 1d
assert y.shape[-1] == 1, 'y should be 1d. For joint estimation use KernelTreeHistogramEstimator or joint estimators'
self.y_dim = y.shape[-1]
else:
raise AssertionError('y should be 1d vector or 2d column array (n_samples,1)')
#reshape when y.dim == 1 and array dim equals 2
if self.y_dim == 1:
y = y.reshape(y.shape[0])
# Fit one instance of RandomVariable or KDE for each bin:
y_transformed = self._q_transformer_transform(y)
return y_transformed
def _preprocess_y_fit_transform(self, y):
self._preprocess_y_fit(y)
return self._preprocess_y_transform(y)
def fit(self, X, y = None, **estimator_fit_kws):
#fit y transformer
self._preprocess_y_fit(y)
#transform y
y_transformed = self._preprocess_y_transform(y)
# fit kdes
bin_ids = list(set(y_transformed))
bins_data_mapper = [y[y_transformed == i] for i in bin_ids]
print('fitting RandomVariable for each bin')
self._bin_dist_rvs = [RandomVariable(**self.rv_bins_kws).fit(d) for d in bins_data_mapper]
#fit calibrated classifier
if not self.calibrated_classifier is None:
self.calibrated_classifier.fit(X = X, y = y_transformed, **estimator_fit_kws)
self.estimator = self.calibrated_classifier.calibrated_classifiers_[0].base_estimator
else:
#fit classifier
print('fitting estimator')
self.estimator.fit(X = X, y = y_transformed, **estimator_fit_kws)
return self
def _get_bin_pdf(self,X):
'''
returns pdf array of shape (n_dists, n_bins, n_dims)
the values are the probability "density" for that bin
'''
if not self.calibrated_classifier is None:
probas = self.calibrated_classifier.predict_proba(X)
probas = np.array(probas)
return np.array(probas)
else:
probas = self.estimator.predict_proba(X)
return np.array(probas)
def custom_predict(self, X, agg_func = np.mean, sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 0):
'''
performs aggregation in a samples drawn for a specific X and returns the custom predicted value
as the result of the aggregation. Could be mean, mode, median, std, entropy, likelihood...
note that agg_func recieves an array of shape (n_samples, n_dims). If you want to perform
aggregation along dimensions, dont forget to tell agg_func to perform operations along axis = 0
'''
samples = self.sample(X, sample_size, weight_func, alpha, replace, noise_factor)
return np.array([agg_func(sample) for sample in samples])
def _rv_bin_sample(self, bin_probas, sample_size):
'''
Generate RV samples from bins of 1 observation
'''
assert len(bin_probas.shape) == 2, f'Passed weights array should be 2d not {bin_probas.shape}'
#SAMPLE ALL KDES AND THE SAMPLE FROM SAMPLED ARRAY
samples_dist = np.array([bin_dist.sample(sample_size) for bin_dist in self._bin_dist_rvs])
samples_dist = _add_n_dims_axis(samples_dist)
samples_dist = samples_dist[:,:,0]
idxs = sample_idxs(bin_probas, sample_size = sample_size)
samples = []
print('Sampling data from bins...')
for i in tqdm(np.arange(bin_probas.shape[0])):
idx = idxs[i]
idx, counts = np.unique(idx, return_counts = True)
s = [np.random.choice(samples_dist[i],c, replace = True) for i,c in zip(idx,counts)]
samples.append(np.concatenate(s))
return np.array(samples)
def sample(self, X, sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 0):
'''
weight func is a function that takes weight array (n_dists, n_bins) and returned
an array of the same shape but with desired processing of the weights. if weight_func is not None,
alpha is ignored
'''
#set alpha if not None, else use self.alpha
alpha = alpha if not alpha is None else self.alpha
#apply weight_func if not None, else, power to alpha
bins_probas = self._get_bin_pdf(X)
if self.y_dim == 1:
bins_probas = _add_n_dists_axis(bins_probas)
# for 1d case
bins_probas = bins_probas[0,:,:]
if not weight_func is None:
bins_probas = normalize(weight_func(bins_probas), norm = 'l1')
else:
bins_probas = normalize(bins_probas**alpha, norm = 'l1')
samples = self._rv_bin_sample(bins_probas, sample_size)
samples = _add_n_dims_axis(samples) # make a 3d sample array with dim axis = 1
noise = agg_smallest_distance(samples, agg_func = np.std)
noise = _add_n_dims_axis(noise)
return add_noise(samples, noise_factor*noise)
def density(self, X, dist = 'empirical', sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 1e-7, **dist_kws):
'''
returns a RVArray instance of RandomVariable objects fitted on sampled data based on X and other sample params
'''
samples = self.sample(X, sample_size, weight_func, alpha, replace, noise_factor)
print('Fitting random variable objects for each dsitribution...')
rv_objects = [RandomVariable(keep_samples = False).fit(sample, dist, **dist_kws) for sample in tqdm(samples)]
return RVArray(rv_objects)
def score(self, X, y = None, **score_kws):
return self.estimator.score(X, self._q_transformer_transform(y), **score_kws)
def predict_proba(self, X):
'''
predict proba handling multilabel outputs
'''
probas = self.estimator.predict_proba(X)
if self.cumulative_target:
probas = np.hstack([i for i in probas])
return probas
###Output
_____no_output_____
###Markdown
Usage Example`HistogramEstimator` turns the regression problem into a classification one, predicts the expected bins and the turns it back to continuous domain.It accepts any estimator with the `predict_proba` method as a base estimator.It works well for any kind of distribution, but only supports marginal distribution estimation
###Code
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
estim1 = HistogramEstimator(LogisticRegression(tol = 0.001, solver = 'sag'), 'auto', rv_bins_kws = {'default_dist':['empirical']})
#estim1 = JointEntropyEstimator(LogisticRegression(tol = 0.001, solver = 'sag'), resolution = 'auto', )
estim2 = HistogramEstimator(LogisticRegression(tol = 0.001, solver = 'sag'), 30, rv_bins_kws = {'default_dist':['empirical']})
estim1.fit(X_train, y_train[:,1])
estim2.fit(X_train, y_train[:,1])
i = np.random.choice(np.arange(y_test.shape[0]))
alpha = 1
noise_factor = 0.2
samples2 = estim2.sample(X_test[i:i+1], sample_size = 200, alpha = alpha, noise_factor = noise_factor)
#prediction = density_estimator1.custom_predict(
# X_test[i:i+1], agg_func = lambda x: np.mean(x,axis = 0), alpha = alpha, beta = beta, gamma = gamma)
#naive_prediction = density_estimator1.predict(X_test[i:i+1])
if (len(samples2.shape) > 1) and (samples2.shape[-1] == 2):
jntplot = sns.jointplot(samples2[0,:,0], samples2[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.1})
jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution')
jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value')
jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value')
jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value')
jntplot.ax_joint.legend()
else:
dst = sns.distplot(samples2, kde = True, bins = 20, hist_kws = {'label':'Infered Conditional Distribution'})
dst = sns.distplot(y_test[:,1], kde = True, bins = 20, hist_kws = {'label':'Target Total Distribution'})
dst._axes.axvline(y_test[i,1], color = 'r', label = 'True Value')
dst._axes.legend()
alpha = 1
noise_factor = 0.0
samples1 = estim1.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor)
samples2 = estim2.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor)
ll1 = np.log2(kde_likelihood(y_test[:,1:2],samples1))
ll2 = np.log2(kde_likelihood(y_test[:,1:2],samples2))
sns.distplot(ll1[ll1 > -30], label = 'model1')
sns.distplot(ll2[ll2 > -30], label = 'model2')
plt.legend()
print(np.median(ll1[ll1 > -1e10]), np.median(ll2[ll2 > -1e10]))
entr1 = kde_entropy(samples1, frac = 0.2,sample_size = 1000)
entr2 = kde_entropy(samples2, frac = 0.2,sample_size = 1000)
sns.distplot(entr1, label = 'model1')
sns.distplot(entr2, label = 'model2')
plt.legend()
print(entr1.mean(),entr2.mean())
q2 = quantile(y_test[:,1:],samples2)
q1 = quantile(y_test[:,1:],samples1)
sns.distplot(q1)
sns.distplot(q2)
###Output
_____no_output_____
###Markdown
`ClassificationKernelEstimator`> Performs a classification task and durying density estimation, queries the n_neighbors closest to predict_proba output (distribution vector)
###Code
#export
def minkowski_similarity(X):
X = normalize(X, norm = 'l2')
return 2/(1+np.exp(X))
class ClassificationKernelEstimator(HistogramEstimator):
'''
Estimator that uses the predicted proba vector of the estimator as a kernel and then performs knn search
in order to estimate the distribution
'''
def __init__(
self, estimator, resolution = 'auto', cumulative_target = True,alpha = 1, calibrated_classifier = None, calibration_cv = None,
prefit_estimator=False, n_neighbors=30, scale_query_space=True, knn_indexer=None,
knn_metric='euclidean', similarity_function=None, noise_factor = 0, n_jobs = None,
):
self.n_jobs = n_jobs
if cumulative_target:
estimator = MultiOutputClassifier(estimator, n_jobs = n_jobs)
super().__init__(estimator, resolution, alpha, calibrated_classifier, calibration_cv)
self.n_neighbors = n_neighbors
self.prefit_estimator = prefit_estimator
self.scale_query_space = scale_query_space
if knn_indexer is None:
self.knn_indexer = NearestNeighbors(
n_neighbors=n_neighbors, metric=knn_metric, algorithm='kd_tree')
else:
self.knn_indexer = knn_indexer
self.knn_metric = knn_metric
self.similarity_function = similarity_function
self.alpha = alpha
self.noise_factor = noise_factor
self.cumulative_target = cumulative_target
return
def fit(self, X, y=None, **estimator_fit_kws):
#fit y transformer
self._preprocess_y_fit(y)
#transform y
y_transformed = self._q_transformer_transform(y)
if not self.prefit_estimator:
print('fitting estimator')
if not self.calibrated_classifier is None:
#fit calibrated classifier
self.calibrated_classifier.fit(X = X, y = y_transformed, **estimator_fit_kws)
self.estimator = self.calibrated_classifier.calibrated_classifiers_[0].base_estimator
else:
#fit classifier
self.estimator.fit(X, y_transformed, **estimator_fit_kws)
#get probas for query space
if self.cumulative_target:
probas = self.estimator.predict_proba(X)
probas = np.hstack([i for i in probas])
else:
probas = self.estimator.predict_proba(X)
#set space transformer for probability space
if self.scale_query_space and not self.cumulative_target:
self._query_space_scaler = QuantileTransformer().fit(probas)
probas = self._query_space_scaler.transform(probas)
probas = normalize(probas)
else:
# make a identity transformer in case of no scalling
self._query_space_scaler = FunctionTransformer()
self.knn_indexer.fit(probas)
self.y_ = y
return self
def _query_idx_and_sim(self, query_vector, n_neighbors):
# apply scaler
query_vector = self._query_space_scaler.transform(query_vector)
# query distances and indexes
dist, idx = [], []
batches = make_batches(query_vector, batch_size = np.ceil(query_vector.shape[0]/100).astype(int))
print('Querying neighbors...')
for batch in tqdm(batches):
dist_i, idx_i = self.knn_indexer.kneighbors(batch, n_neighbors)
dist.append(dist_i)
idx.append(idx_i)
dist = np.vstack(dist)
idx = np.vstack(idx)
if (self.knn_metric in ('minkowski', 'euclidean')) and (self.similarity_function is None):
sim = minkowski_similarity(dist)
else:
sim = self.similarity_function(dist)
# take l1 norm of similarity vectors to make a valid probability distribution
sim = normalize(sim, norm='l1')
return idx, sim + 1e-9 #ensure vector is not null
def _sample_from_idx_sim(self, idx, sim, sample_size, noise_factor):
samples = []
for i in np.arange(len(idx)):
ys = self.y_[sample_multi_dim(idx[i], sample_size = sample_size, weights = sim[i], axis = 0)]
if len(ys.shape) == 1:
ys = ys.reshape(-1,1)
if abs(noise_factor) > 0:
noise = agg_smallest_distance(ys.reshape(1,*ys.shape), agg_func = np.median)
ys = add_noise(ys, noise_factor*noise)
samples.append(ys)
return np.array(samples)
def sample(self, X, sample_size=1000, n_neighbors=None,
alpha=None, noise_factor=None):
#handle args:
n_neighbors, alpha, noise_factor = self._handle_similarity_sample_parameters(
n_neighbors = n_neighbors, alpha = alpha, noise_factor = noise_factor)
#get probas
probas = self.estimator.predict_proba(X)
if isinstance(probas, list):
#handle multilabel probas
probas = np.hstack([i for i in probas])
# get idx and sim using proba vector as query vector
idx, sim = self._query_idx_and_sim(probas, n_neighbors)
# sample indexes and data, add noise
if not alpha is None:
sim = normalize(sim**alpha, norm = 'l1')
return self._sample_from_idx_sim(idx, sim, sample_size, noise_factor)
def density(self, X, dist = 'empirical', sample_size=1000, n_neighbors=None,
alpha=None, noise_factor=None, **dist_kws):
samples = self.sample(X, sample_size, n_neighbors, alpha, noise_factor)
print('fitting distribution objects...')
rv_objects = [RandomVariable(keep_samples = False).fit(sample, dist, **dist_kws) for sample in tqdm(samples)]
return RVArray(rv_objects)
def _handle_similarity_sample_parameters(self, **kwargs):
args = []
for key in kwargs:
if kwargs[key] is None:
if hasattr(self, key):
args.append(getattr(self, key))
else:
args.append(kwargs[key])
else:
args.append(kwargs[key])
return args
###Output
_____no_output_____
###Markdown
which distance to use?
###Code
from scipy.spatial.distance import jensenshannon
from sklearn.preprocessing import normalize
from sklearn.metrics import pairwise_distances
import numpy as np
import matplotlib.pyplot as plt
random_X = normalize(np.abs(np.random.random((10000,10))), norm = 'l2')
js = 1- np.array([jensenshannon(random_X[0], i) for i in random_X])
cossim = (normalize(random_X)[0:1]@(normalize(random_X).T)).flatten()
euclidean = pairwise_distances(random_X[0:1], random_X).flatten()
msk = js > 0.0
js, cossim, euclidean = js[msk], cossim[msk], euclidean[msk]
plt.scatter(js, 2/(1+np.exp(euclidean)), alpha = 0.02, label = r'$\frac{2}{1+exp(L2)}$')
plt.scatter(js, 1/(1+euclidean), alpha = 0.02, label = r'$\frac{1}{1+L2}$')
plt.scatter(js, cossim, alpha = 0.02, label = 'cossim similarity')
plt.plot([0,1], [0,1])
plt.legend()
###Output
_____no_output_____
###Markdown
Usage Example
###Code
estim1 = ClassificationKernelEstimator(estim2.estimator, 20, prefit_estimator = False, n_neighbors = 100, scale_query_space = False)
estim1.fit(X_train, y_train[:,1])
i = np.random.choice(np.arange(y_test.shape[0]))
alpha = 1
noise_factor = 0.2
n_neighbors = 100
samples2 = estim1.sample(X_test[i:i+1], sample_size = 300, alpha = alpha, noise_factor = noise_factor, n_neighbors = n_neighbors)
#prediction = density_estimator1.custom_predict(
# X_test[i:i+1], agg_func = lambda x: np.mean(x,axis = 0), alpha = alpha, beta = beta, gamma = gamma)
#naive_prediction = density_estimator1.predict(X_test[i:i+1])
if (len(samples2.shape) > 1) and (samples2.shape[-1] == 2):
jntplot = sns.jointplot(samples2[0,:,0], samples2[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.1})
jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution')
jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value')
jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value')
jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value')
jntplot.ax_joint.legend()
else:
dst = sns.distplot(samples2, kde = True, bins = 20, hist_kws = {'label':'Infered Conditional Distribution'})
dst = sns.distplot(y_test[:,1], kde = True, bins = 20, hist_kws = {'label':'Target Total Distribution'})
dst._axes.axvline(y_test[i,1], color = 'r', label = 'True Value')
dst._axes.legend()
alpha = 1
noise_factor = 0.0
n_neighbors = 300
samples1 = estim1.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor, n_neighbors = n_neighbors)
#samples2 = estim2.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor)
ll1 = np.log2(kde_likelihood(y_test[:,1:2],samples1, frac = 0.1))
ll2 = np.log2(kde_likelihood(y_test[:,1:2],samples2, frac = 0.1))
sns.distplot(ll1[ll1 > -30], label = 'model1')
sns.distplot(ll2[ll2 > -30], label = 'model2')
plt.legend()
print(np.median(ll1[ll1 > -1e10]), np.median(ll2[ll2 > -1e10]))
entr1 = kde_entropy(samples1, frac = 0.1,sample_size = 1000)
entr2 = kde_entropy(samples2, frac = 0.1,sample_size = 1000)
sns.distplot(entr1, label = 'model1')
sns.distplot(entr2, label = 'model2')
plt.legend()
print(entr1.mean(),entr2.mean())
q2 = quantile(y_test[:,1:],samples2)
q1 = quantile(y_test[:,1:],samples1)
sns.distplot(q1)
sns.distplot(q2)
###Output
_____no_output_____
###Markdown
Ensemble Tree methods TreeEstimatorMixin class>creates some functionalities for similarity sampling based random forests and naive sampling random forests
###Code
#export
#node quality functions
def expected_likelihood(node_data, sample_size = 100):
kde = KDE().fit(node_data)
return np.mean(kde.evaluate(kde.rvs(size = sample_size)))
def inverese_log_node_var(node_data): #makes no sense for multivariate distribtuions
centroid = node_data.mean(axis = 0).reshape(1,-1)
distances = cdist(node_data, centroid, 'seuclidean').flatten()
return 1/np.log1p(np.mean(distances))
# datapoint-node functions
def datapoint_pdf(node_data):
return KDE().fit(node_data).pdf(node_data)
def datapoint_gaussian_likelihood(node_data):
centroid = node_data.mean(axis = 0).reshape(1,-1)
distances = cdist(node_data, centroid, 'seuclidean').flatten()
distance_std = distances.std()
#if distance_std == 0:
# return 1
z = (distances - distances.mean())/distance_std
return 1/(distance_std*np.pi**(1/2))*np.exp(-1/2*z**2)
def _bimodal_variance_fix_dim(x):
if len(x.shape) == 1:
return 1/np.log1p(bimodal_variance(_fix_one_dist_1d(x)))
else:
return 1/np.log1p(bimodal_variance(_fix_one_dist_2d(x)))
AVALIBLE_NODE_AGG_FUNC = {
'expected_likelihood':expected_likelihood,
'inverse_log_variance':inverese_log_node_var,
'inverse_log_bimodal_variance': _bimodal_variance_fix_dim
}
AVALIBLE_DATAPOINT_WEIGHT_FUNC = {
'kde_likelihood': datapoint_pdf,
'gaussian_likelihood': datapoint_gaussian_likelihood
}
#export
class TreeEstimatorMixin():
'''Base Class containing important methods for building Naive and Similarity Density Tree estimators'''
@property
def _node_data_generator(self):
return self._make_node_data_generator(self.y_, self._raw_leaf_node_matrix)
def _make_node_data_generator(self, y, node_matrix):
'''
creates a generator from sparse matrix where each iter retrns a row
'''
s1 = node_matrix.sum(axis = 0).cumsum().A.astype(int).flatten()
s2 = np.concatenate([[0],s1[:-1]])
slices = [slice(i[0],i[1]) for i in zip(s2,s1)]
idxs = node_matrix.tocsc().indices
idxs = [idxs[s] for s in slices]
return (y[idx] for idx in idxs)
def _make_node_kde_array(self): #<- since kde esitmation is the best approach, save kde fitted instances for each node
#to make use of it during node and node_data wieght inference
#maybe its better to get data from multiple nodes before fitting kde
raise NotImplementedError
def _make_node_cdist_array(self): #<- gaussian likelihood works fine as well, so save cdist matrix for each node
raise NotImplementedError
def _apply(self, X):
'''
A substitute for estimator.apply in case it returns 3d arrays (such as sklearns gradient boosting classifier)
instead of 2d. In case returned array from estimator.apply returns a 2dim array, the returned value of the function
is the same as the returned array of self.estimator.apply
'''
applied_arr = self.estimator.apply(X)
dim1_shape = applied_arr.shape[0]
dim2_shape = np.prod(applied_arr.shape[1:])
return applied_arr.reshape(dim1_shape, dim2_shape)
def _fit_leaf_node_matrix(self, X, y, node_rank_func, node_data_rank_func, max_nodes = None, max_data = None, sample_weight = None):
nodes_array = self._apply(X)
self._leaf_node_transformer = OneHotEncoder(handle_unknown = 'ignore')
leaf_node_matrix = self._leaf_node_transformer.fit_transform(nodes_array)
if max_nodes is None:
self._keep_nodes_in_query = slice(None)
else:
if 0 < max_nodes <= 1:
#case max_nodes is fraction
max_nodes = max(1,int(max_nodes*leaf_node_matrix.shape[1]))
self._keep_nodes_in_query = np.random.choice(np.arange(leaf_node_matrix.shape[1]), size = max_nodes, replace = False)
if max_data is None:
self._keep_data_in_query = slice(None)
else:
if 0 < max_data <= 1:
#case max_data is fraction
max_data = max(1,int(max_data*leaf_node_matrix.shape[0]))
self._keep_data_in_query = np.random.choice(np.arange(leaf_node_matrix.shape[0]), size = max_nodes, replace = False, p = sample_weight)
leaf_node_matrix = leaf_node_matrix[self._keep_data_in_query, :]
leaf_node_matrix = leaf_node_matrix[:, self._keep_nodes_in_query]
self._raw_leaf_node_matrix = leaf_node_matrix
#self._node_data_generator = self.#self._make_node_data_generator(y, leaf_node_matrix)
self._leaf_node_weights = self._calculate_node_weights(y, leaf_node_matrix, node_rank_func)
self._leaf_node_matrix = self._make_weighted_query_space(y, leaf_node_matrix, node_data_rank_func)# <- try making this a property
return self
def _transform_query_matrix(self, X):
node_matrix = self._leaf_node_transformer.transform(self._apply(X))
node_matrix = node_matrix[:, self._keep_nodes_in_query]
return self._make_weighted_query_vector(
agg_node_weights = self._leaf_node_weights,
node_matrix = node_matrix)
def _query_idx_and_sim(self, X, n_neighbors, lower_bound, beta, gamma):
idx, sim = cos_sim_query(
self._transform_query_matrix(X), self._leaf_node_matrix, n_neighbors=n_neighbors,
lower_bound=lower_bound, beta = beta, gamma = gamma)
return idx, sim + 1e-9 #ensure sim vector is not null
def _entropy_estimator_sample(self, X, sample_size, weight_func,alpha, noise_factor):
'''
samples from a forest embedding fitted linear entropy estimator.
Works only for marginal distributions
'''
nodes_array = self._apply(X)
forest_embeddings = self._leaf_node_transformer.transform(nodes_array)
samples = self.entropy_estimator_sampler.sample(forest_embeddings, sample_size, weight_func, alpha, noise_factor)
return samples
def _fit_entropy_estimator_sampler(self, X, y = None, **fit_kws):
'''
fit a linear entropy estimator
Works only for marginal distributions
'''
nodes_array = self._apply(X)
self._leaf_node_transformer = OneHotEncoder()
forest_embeddings = self._leaf_node_transformer.fit_transform(nodes_array)
self.entropy_estimator_sampler.fit(forest_embeddings, y, **fit_kws)
return self
def _kde_similarity_sample(self, X, sample_size, weight_func, n_neighbors,
lower_bound, alpha, beta, gamma, noise_factor, **rv_kwargs):
idx, sim = self._query_idx_and_sim(X ,n_neighbors=n_neighbors, lower_bound=lower_bound,beta = beta, gamma = gamma)
idx, sim = np.array(idx), np.array(sim)
p = self._handle_sample_weights(weight_func = weight_func, sim = sim, alpha = alpha)
samples = []
for i in np.arange(len(idx)):
ys = self.y_[sample_multi_dim(idx[i], sample_size = sample_size, weights = p[i], axis = 0)]
if len(ys.shape) == 1:
ys = ys.reshape(-1,1)
noise = agg_smallest_distance(ys.reshape(1,*ys.shape), agg_func = np.std)
ys = add_noise(ys, noise_factor*noise)
samples.append(RandomVariable(**rv_kwargs).fit(ys, sample_weight = None).sample(sample_size = sample_size))
return np.array(samples)
def _similarity_sample(self, X, sample_size, weights, n_neighbors,
lower_bound, alpha, beta, gamma, noise_factor):
idx, sim = self._query_idx_and_sim(
X ,n_neighbors=n_neighbors, lower_bound=lower_bound,beta = beta, gamma = gamma)
idx, sim = np.array(idx), np.array(sim)
p = self._handle_sample_weights(weight_func = weights, sim = sim, alpha = alpha)
samples = []
for i in np.arange(len(idx)):
ys = self.y_[sample_multi_dim(idx[i], sample_size = sample_size, weights = p[i], axis = 0)]
if len(ys.shape) == 1:
ys = ys.reshape(-1,1)
noise = agg_smallest_distance(ys.reshape(1,*ys.shape), agg_func = np.std)
ys = add_noise(ys, noise_factor*noise)
samples.append(ys)
return np.array(samples)
def _density(self, X, dist, sample_size, weights, n_neighbors,
lower_bound, alpha, beta, gamma, noise_factor, **dist_kws):
'''
returns a RVArray instance of RandomVariable objects fitted on sampled data based on X and other sample params
'''
samples = self._similarity_sample(X, sample_size, weights, n_neighbors,
lower_bound, alpha, beta, gamma, noise_factor)
rv_objects = [RandomVariable(keep_samples = False).fit(sample, dist, **dist_kws) for sample in tqdm(samples)]
return RVArray(rv_objects)
def _similarity_sample_idx(self, X, sample_size, weight_func, n_neighbors,
lower_bound, alpha, beta, gamma):
idxs, sim = self._query_idx_and_sim(X ,n_neighbors=n_neighbors, lower_bound=lower_bound,beta = beta, gamma = gamma)
idxs, sim = np.array(idxs), np.array(sim)
p = self._handle_sample_weights(weight_func = weight_func, sim = sim, alpha = alpha)
samples_idxs = sample_from_dist_array(idxs.reshape(*idxs.shape,1), sample_size, p)
samples_idxs = samples_idxs.reshape(samples_idxs.shape[:-1])
return samples_idxs
def _similarity_empirical_pdf(self, X, weights, n_neighbors, lower_bound, alpha, beta, gamma):
idx, sim = cos_sim_query(
self._transform_query_matrix(X),
self._leaf_node_matrix,
n_neighbors=n_neighbors,
lower_bound=lower_bound,
beta = beta,
gamma = gamma)
p = self._handle_sample_weights(weight_func = weights, sim = sim, alpha = alpha)
return np.array([self.y_[i] for i in idx]), p
def _custom_predict(self, X, agg_func, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor):
'''
performs aggregation in a samples drawn for a specific X and returns the custom predicted value
as the result of the aggregation. Could be mean, mode, median, std, entropy, likelihood...
note that agg_func recieves an array of shape (n_samples, n_dims). If you want to perform
aggregation along dimensions, dont forget to tell agg_func to perform operations along axis = 0
'''
samples = self._similarity_sample(X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor)
return np.array([agg_func(sample) for sample in samples])
def _calculate_node_weights(self, y, node_matrix, node_rank_func):
'''
calculates node weights that maultiplies the query space matrix, in order to make some nodes more relevant
according to some target data node agg metric.
input should be a list containing array of node samples as each one of its elements
'''
if not node_rank_func is None:
# cannot call in a vectorized fashion because data from nodes may have different sizes
#node_weights = Parallel(n_jobs=-1, verbose=0,
# **_joblib_parallel_args(prefer="threads"))(
# delayed(node_rank_func)(X)
# for X in self._node_data_generator)
node_weights = [node_rank_func(X) for X in self._node_data_generator]
else:
node_weights = np.ones(node_matrix.shape[1])
return np.array(node_weights)
def _calculate_node_datapoint_weights(self, y, node_matrix, node_data_rank_func):
'''
Calculates node-datapoint(y values) weights. higher values meansa datapoint "belongs tighter"
to that point and is more loleky to be sampled when that node is reached. some cases of node-datapount wieghts
could be the likelihood of that point given the node pdf, or some sort of median/mean deviance from point to node samples
'''
#datapoint_node_weights = Parallel(n_jobs=1, verbose=0,
# **_joblib_parallel_args(prefer="threads"))(
# delayed(node_data_rank_func)(X)
# for X in node_data_generator)
datapoint_node_weights = [node_data_rank_func(node_data) for node_data in self._node_data_generator]
return datapoint_node_weights
def _handle_sample_weights(self, weight_func, sim, alpha):
'''
sampling wights should sum to 1, since its a sampling probability
'''
if weight_func is None:
return np.array([normalize((i**alpha).reshape(1,-1), norm = 'l1').flatten() for i in sim])
else:
return np.array([normalize((weight_func(i)).reshape(1,-1), norm = 'l1').flatten() for i in sim])
def _make_weighted_query_vector(self, agg_node_weights, node_matrix):
'''
multiplies elements of query vector by their respective weights
the greater the weights, the better the "quality" of the nodes
'''
if not isinstance(node_matrix, scipy.sparse.csr_matrix):
node_matrix = scipy.sparse.csr_matrix(node_matrix)
node_matrix.data = node_matrix.data*np.take(agg_node_weights, node_matrix.indices)
return node_matrix
def _make_weighted_query_space(self, y, node_matrix, node_data_rank_func = None):
'''
query space is the leaf_node_matrix multiplied by node_data_weights
the greater the value in the matrix, the better the "quality" of that data point
'''
if not isinstance(node_matrix, scipy.sparse.csr_matrix):
node_matrix = scipy.sparse.csr_matrix(node_matrix)
if not node_data_rank_func is None:
# datapoint_node_weights multiplication (columns)
#make copy
node_matrix = copy.deepcopy(node_matrix)
#cast to csc to make .data order columnwise
node_matrix = node_matrix.tocsc()
datapoint_node_weights = self._calculate_node_datapoint_weights(y, node_matrix, node_data_rank_func)
node_matrix.data = node_matrix.data*np.concatenate(datapoint_node_weights)
#convert back to csr
node_matrix = node_matrix.tocsr()
else:
pass
return node_matrix
###Output
_____no_output_____
###Markdown
KernelTreeEstimator - Estimates the conditional distribution based on samples from dataset taking into account the `leaf_node_matrix`
###Code
#export
#MAKE WARNING REGARDING NUMBER OF NODES IN TREE TAKING KNEIGHBORS QUERY INTO ACCOUNT, mayvbe set max_leaf_nodes automatically
class KernelTreeEstimator(BaseEstimator, ClassifierMixin, DelegateEstimatorMixIn ,TreeEstimatorMixin):
def __init__(self, estimator, entropy_estimator_sampler = None, alpha = 1, beta = 1, gamma = 1, node_rank_func = None,
node_data_rank_func = None,n_neighbors = 30, lower_bound = 0.0):
#assert estimator.min_samples_leaf >= 3, 'min_samples_leaf should be greater than 2'
assert hasattr(estimator, 'apply'), 'estimator should have `apply` method'
self.estimator = estimator
self.n_neighbors = n_neighbors
self.lower_bound = lower_bound
self.alpha = alpha
self.beta = beta
self.gamma = gamma
if node_rank_func is None:
self.node_rank_func = node_rank_func
else:
try: self.node_rank_func = node_rank_func if callable(node_rank_func) else AVALIBLE_NODE_AGG_FUNC[node_rank_func]
except KeyError: raise KeyError(f'if not callable, node_rank_func should be one of {list(AVALIBLE_NODE_AGG_FUNC)}, not {node_rank_func}')
if node_data_rank_func is None:
self.node_data_rank_func = node_data_rank_func
else:
try: self.node_data_rank_func = node_data_rank_func if callable(node_data_rank_func) else AVALIBLE_DATAPOINT_WEIGHT_FUNC[node_data_rank_func]
except KeyError: raise KeyError(f'if not callable, node_rank_func should be one of {list(AVALIBLE_DATAPOINT_WEIGHT_FUNC)}, not {node_data_rank_func}')
if not entropy_estimator_sampler is None:
assert hasattr(entropy_estimator_sampler, 'sample'), f'entropy_estimator_sampler should implement `sample` method'
self.entropy_estimator_sampler = entropy_estimator_sampler
else:
self.entropy_estimator_sampler = entropy_estimator_sampler
return
def __repr__(self):
return self.__class__.__name__
def fit(self, X, y = None, sample_weight = None, **fit_kws):
#fix y shape
if len(y.shape) == 1:
y = y.reshape(-1,1)
try:
self.estimator.fit(X, y, sample_weight = sample_weight, **fit_kws)
except TypeError:
self.estimator.fit(X, y, **fit_kws)
if self.entropy_estimator_sampler is None:
self._fit_leaf_node_matrix(
X, y, node_rank_func = self.node_rank_func, node_data_rank_func = self.node_data_rank_func)# <- MAKE NODE WIEGHTED VERSION
else:
self._fit_entropy_estimator_sampler(X, y)
self.y_ = y
return self
def density(self, X, dist = 'kde', sample_size = 1000, weight_func = None, n_neighbors = None,
lower_bound = None, alpha = None, beta = None, gamma = None, noise_factor = 1e-7, **dist_kwargs):
n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters(
n_neighbors, lower_bound, alpha, beta, gamma)
return super()._density(X, dist, sample_size, weight_func, n_neighbors,
lower_bound, alpha, beta, gamma, noise_factor, **dist_kwargs)
def sample(self, X, sample_size = 1000, weight_func = None, n_neighbors = None,
lower_bound = None, alpha = None, beta = None, gamma = None, noise_factor = 0):
'''wieghts should be callable (recieves array returns array of same shape) or None'''
n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters(
n_neighbors, lower_bound, alpha, beta, gamma)
if self.entropy_estimator_sampler is None:
samples = super()._similarity_sample(
X = X, sample_size = sample_size, weights = weight_func, n_neighbors = n_neighbors,
lower_bound = lower_bound, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor
)
else:
samples = super()._entropy_estimator_sample(X, sample_size, weight_func,alpha, noise_factor)
return samples
def custom_predict(
self, X, agg_func, sample_size = 1000, weights = None, n_neighbors = None,
lower_bound = None, alpha = None, beta = None, gamma = None, noise_factor = 0
):
n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters(n_neighbors, lower_bound, alpha, beta, gamma)
return self._custom_predict(X, agg_func, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor)
def sample_histogram(self, X, weights, n_neighbors, lower_bound, alpha, beta, gamma):
n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters(n_neighbors, lower_bound, alpha, beta, gamma)
return self._similarity_empirical_pdf(X, weights, n_neighbors, lower_bound, alpha, beta, gamma)
def _handle_similarity_sample_parameters(self, n_neighbors, lower_bound, alpha, beta, gamma):
if n_neighbors is None:
n_neighbors = self.n_neighbors
if lower_bound is None:
lower_bound = self.lower_bound
if alpha is None:
alpha = self.alpha
if beta is None:
beta = self.beta
if gamma is None:
gamma = self.gamma
return n_neighbors, lower_bound, alpha, beta, gamma
###Output
_____no_output_____
###Markdown
Usage ExampleWe can see that forest estimators are better in dealing with bimodal data
###Code
estimator1 = ensemble.RandomForestRegressor(n_estimators = 10,min_samples_leaf = 5, warm_start = False)
#estimator = ensemble.ExtraTreesRegressor(n_estimators = 10,min_samples_leaf = 10, warm_start = False)
density_estimator1 = KernelTreeEstimator(
estimator1,node_rank_func = None, node_data_rank_func = None).fit(X_train,y_train[:,:])
i = np.random.choice(np.arange(y_test.shape[0]))
alpha, beta, gamma = 1,1,0
noise_factor = 1
samples = density_estimator1.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta,gamma = gamma, noise_factor = noise_factor)
prediction = density_estimator1.custom_predict(
X_test[i:i+1], agg_func = lambda x: np.mean(x,axis = 0), alpha = alpha, beta = beta, gamma = gamma, noise_factor = 1)
naive_prediction = density_estimator1.predict(X_test[i:i+1])
if (len(samples.shape) > 1) and (samples.shape[-1] == 2):
jntplot = sns.jointplot(samples[0,:,0], samples[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.1})
jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution')
jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value')
jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value')
jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value')
jntplot.ax_joint.legend()
else:
sns.distplot(samples, kde = True, bins = 20, hist_kws = {'label':'Model Samples'})
dst = sns.distplot(y_test, kde = True, bins = 20, hist_kws = {'label':'Target Distribution'})
dst._axes.axvline(y_test[i,1], color = 'r')
dst._axes.legend()
alpha, beta, gamma = 1,1,1
samples1 = density_estimator1.sample(X_test, sample_size = 70, alpha = alpha, beta = beta, gamma = gamma)
kde_entropy(quantile(y_test,samples1)[:,0,:]), bimodal_variance(samples1).mean()
alpha, beta, gamma = 1,1,1
samples1 = density_estimator1.sample(X_test, sample_size = 70, alpha = alpha, beta = beta, gamma = gamma)
sns.jointplot(*quantile(y_test, samples1)[:,0,:].T)
###Output
Querying 30 nearest neighbors, this can take a while...
###Markdown
KernelTreeHistogramEstimator- appart from similarity factor $\alpha$, include node relevance factor $\beta$ in order to have $(\frac{NodeAggMetric}{NodeAggMetric_{max}})^\beta$ as node multipliers in the node - data adjacency matrix. NodeAggMetric could be variance, entropy, or user dfined metric, such as likelihood to some distribution.then sample according to $CosSim(A,B)^\alpha$May include KDE sampling for nodes, depending on ammount of nodesmake this framework default for every DensityTree (NaiveTree is a special case for alpha = 0 and beta = 0)
###Code
# export
class KernelTreeHistogramEstimator(KernelTreeEstimator):
'''
An ensemble that learn representitons of data turning target into bins
'''
def __init__(self, estimator, entropy_estimator_sampler=None, resolution='auto', cumulative_target = False, class_weight=None, alpha=1, beta=1, gamma=1, node_rank_func=None,
node_data_rank_func=None, n_neighbors=30, lower_bound=0.0):
assert hasattr(estimator, 'predict_proba') or 'predict_proba' in dir(
estimator), 'estimator should implement `predict_proba` method'
super().__init__(estimator, entropy_estimator_sampler, alpha, beta, gamma, node_rank_func,
node_data_rank_func, n_neighbors, lower_bound)
self.cumulative_target = cumulative_target
self.class_weight = class_weight
self.resolution = resolution
def _q_transformer_fit(self, y):
'''
fits self.q_transformer
'''
if len(y.shape) == 1:
y = _fix_X_1d(y)
if type(self.resolution) == str:
self.bin_edges = [np.histogram_bin_edges(
col, bins=self.resolution) for col in y.T]
print(
f'base classifier will be trained with {[len(i) for i in self.bin_edges]} classes')
return self.bin_edges
elif type(self.resolution) == np.ndarray:
self.bin_edges = [self.resolution for col in y.T]
elif type(self.resolution) == int:
self.q_transformer = QuantileTransformer(
n_quantiles=self.resolution)
self._q_minmax_scaler = MinMaxScaler()
y = self.q_transformer.fit_transform(y)
# for case when output_distribution != uniform
self._q_minmax_scaler.fit(y)
return self.q_transformer
elif isinstance(self.resolution, list):
assert len(self.resolution) == y.shape[-1], f'len of resolution list should be equal n_dims of y. got {len(self.resolution)} and {y.shape[-1]}'
return self.resolution
else:
raise TypeError(
f'self.resolution should be np.array of bin edges, str or int, got {self.resolution.__class__}')
def _q_transformer_transform(self, y):
'''
maps floats to int (bin_id in histogram)
'''
if len(y.shape) == 1:
y = _fix_X_1d(y)
if type(self.resolution) in (str, np.ndarray):
y_transformed = [np.digitize(
y[:, i:i+1], self.bin_edges[i]) for i in range(y.shape[-1])]
y_transformed = np.hstack(y_transformed)
max_bin = [len(edges) for edges in self.bin_edges]
elif type(self.resolution) == int:
y_transformed = self.q_transformer.transform(y)
# scale between 0 and 1
y_transformed = self._q_minmax_scaler.transform(y_transformed)
y_transformed = np.around(
y_transformed*(self.resolution - 1), decimals=0).astype(int)
max_bin = [self.resolution for _ in range(y.shape[-1])]
elif isinstance(self.resolution, list):
y_transformed = [np.digitize(
y[:, i:i+1], self.resolution[i]) for i in range(y.shape[-1])]
y_transformed = np.hstack(y_transformed)
max_bin = [len(resolution) if isinstance(resolution, (list, np.ndarray)) else resolution for resolution in self.resolution]
else:
raise TypeError(
f'self.resolution should be np.array of bin edges, str or int, got {self.resolution.__class__}')
if self.cumulative_target:
#make cumulative vector
y_transformed_list = []
for i in range(y_transformed.shape[-1]):
y_transformed_i = np.zeros((y_transformed.shape[0],max_bin[i]), dtype = 'int8')
for idx in range(len(y_transformed_i)):
bin_idx = int(y_transformed[idx, i])
y_transformed_i[i, :bin_idx] = 1
y_transformed_list.append(y_transformed[:,:-1]) #dropa last percentile to avoid all zeros
y_transformed = np.hstack(y_transformed_list)
return y_transformed
def _q_transformer_inverse_transform(self, y):
'''
maps from bin_id in histogram (int) to float.
beware that during transform, information is lost due to downsampling, so inverse_transform will
not be an exact inverse_transform.
'''
if len(y.shape) == 1:
y = _fix_X_1d(y)
if type(self.resolution) == int:
y_transformed = (y/(self.resolution - 1)).astype(float)
y_transformed = self._q_minmax_scaler.inverse_transform(
y_transformed)
# 1d asserted already
return self.q_transformer.inverse_transform(y_transformed).flatten()
else:
raise NotImplementedError(
'inverse transform only implemented for case when self.resolution == int')
def _preprocess_y(self, y):
if len(y.shape) == 1:
y = y.reshape(-1, 1)
# make uniform quantile bins
self._q_transformer_fit(y)
y = self._q_transformer_transform(y)
return y
def _handle_sample_weight(self, sample_weight, y, sample_alpha):
if self.class_weight == 'balanced':
class_weight = compute_sample_weight(class_weight='balanced', y=y)
if not sample_weight is None:
sample_weight = sample_weight*class_weight**sample_alpha
else:
sample_weight = class_weight**sample_alpha
return sample_weight
def fit(self, X, y=None, y_prep=None, sample_weight=None, sample_alpha=1, **fit_kws):
# digitize y
if y_prep is None:
y_prep = self._preprocess_y(y)
else:
assert y_prep.shape[0] == y.shape[0], f'y_prep and y should have same shape. got {y_prep.shape[0]} and {y.shape[0]}'
sample_weight = self._handle_sample_weight(
sample_weight, y_prep, sample_alpha)
# fit base estimator
try:
self.estimator.fit(
X, y_prep, sample_weight=sample_weight, **fit_kws)
except TypeError:
self.estimator.fit(X, y_prep, **fit_kws)
# save y continuous values
self.y_ = y
# fit leaf node matrix with tree nodes and its respective continuous values (y)
if self.entropy_estimator_sampler is None:
self._fit_leaf_node_matrix(
X, y, node_rank_func=self.node_rank_func, node_data_rank_func=self.node_data_rank_func) # <- MAKE NODE WIEGHTED VERSION
else:
self._fit_entropy_estimator_sampler(X, y)
return self
def predict_proba(self, X):
'''
handling multilabel output
'''
probas = self.estimator.predict_proba(X)
if self.cumulative_target:
probas = np.hstack([i for i in probas])
return probas
###Output
_____no_output_____
###Markdown
Usage Example
###Code
#estimator2 = ensemble.GradientBoostingClassifier(n_estimators = 4,learning_rate = 0.05,min_samples_leaf = 5, subsample = 0.4, max_features = 0.4, verbose = 2,)
estimator2 = ensemble.RandomForestClassifier(n_jobs = -1,n_estimators = 10,min_samples_leaf = 20, warm_start = True, criterion = 'entropy')
density_estimator2 = KernelTreeHistogramEstimator(
estimator2,resolution = 'auto', cumulative_target = True, class_weight = 'balanced')
density_estimator2.fit(X_train,y_train[:,:])
i = np.random.choice(np.arange(y_test.shape[0]))
alpha, beta, gamma = 1,0,0
noise_factor = 1
sample_size = 700
n_neighbors = 100
samples = density_estimator2.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta, gamma = gamma,n_neighbors = n_neighbors, noise_factor = noise_factor)
#samples = density_estimator2.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta, gamma = gamma)
prediction = density_estimator2.custom_predict(X_test[i:i+1],agg_func = lambda x: np.mean(x, axis = 0) ,alpha = alpha, beta = beta, gamma = gamma)
#naive_prediction = density_estimator.estimator.predict(X_test[i:i+1])
if (len(samples.shape) > 1) and (samples.shape[-1] == 2):
jntplot = sns.jointplot(samples[0,:,0], samples[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.01})
jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution')
jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value')
jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value')
#jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value')
jntplot.ax_joint.legend()
else:
sns.distplot(samples, kde = True, bins = 20, hist_kws = {'label':'Model Samples'})
dst = sns.distplot(y_test, kde = True, bins = 20, hist_kws = {'label':'Target Distribution'})
dst._axes.axvline(y_test[i,1], color = 'r')
dst._axes.legend()
alpha, beta, gamma = 1,2,0
noise_factor = 0.2
#KernelTreeEstimator
samples1 = density_estimator1.sample(X_test, sample_size = 700, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor)
#KernelTreeHistogramEstimator
samples2 = density_estimator2.sample(X_test, sample_size = 700, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor)
###Output
Querying 30 nearest neighbors, this can take a while...
###Markdown
We can plot the entropies of the distributions and the negative log likelihood of the generated KDE against de actual y_test value
###Code
ll1 = np.log2(kde_likelihood(y_test,samples1, frac = 0.2))
ll2 = np.log2(kde_likelihood(y_test,samples2, frac = 0.2))
print(np.median(ll1[ll1 > -10]), np.median(ll2[ll2 > -10]))
sns.distplot(ll1[ll1 > -10])
sns.distplot(ll2[ll2 > -10])
entr1 = kde_entropy(samples1, sample_size = 200 , frac = 0.2)
entr2 = kde_entropy(samples2, sample_size = 200 , frac = 0.2)
print(entr1.mean(),entr2.mean())
sns.distplot(entr1)
sns.distplot(entr2)
#we want the likelihood of our datapoints to be higher than the average likelihood of the distribution
# so we convert entropy and negative log likelihood to likelihoods and divide the point likelihood by the average dist likeliihood
t1 = 2**ll1/2**-entr1
t2 = 2**ll2/2**-entr2
sns.distplot(t1[t1 < 10], hist_kws = dict(cumulative = True))
sns.distplot(t2[t2 < 10], hist_kws = dict(cumulative = True))
np.mean(t1[t1 < 1000]),np.mean(t2[t2 < 1000])
q2 = quantile(y_test,samples2)
q1 = quantile(y_test,samples1)
sns.jointplot(q1[:,0,0],q1[:,0,1])
f'{round(((q1 == 0).mean()+(q1 == 1).mean())*100,2)}% of data points out of sugested boundaries'
sns.jointplot(q2[:,0,0],q2[:,0,1])
f'{round(((q2 == 0).mean()+(q2 == 1).mean())*100,2)}% of data points out of sugested boundaries'
###Output
_____no_output_____
###Markdown
We can check the estimated KDE of the samples of each model against the y_test(target) distribution and the actual y_test value
###Code
i+=1
kde = KDE().fit(samples1[i])
kde_cloud = kde.sample(100)
jnt = sns.jointplot(kde_cloud[:,0],kde_cloud[:,1], kind = 'kde')
jnt.ax_joint.scatter(density_estimator2.y_[:,0], density_estimator2.y_[:,1], color = 'r', alpha = 0.1)
jnt.ax_joint.scatter(samples1[i,:,0], samples1[i,:,1], alpha = 0.05)
jnt.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'yellow')
kde = KDE().fit(samples2[i])
kde_cloud = kde.sample(100)
jnt = sns.jointplot(kde_cloud[:,0],kde_cloud[:,1], kind = 'kde')
jnt.ax_joint.scatter(density_estimator2.y_[:,0], density_estimator2.y_[:,1], color = 'r', alpha = 0.1)
jnt.ax_joint.scatter(samples2[i,:,0], samples2[i,:,1], alpha = 0.05)
jnt.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'yellow')
###Output
_____no_output_____
###Markdown
CDFEstimator> tries to classify values as lower than some bin (maybe percentile bins?) and higher than some bin as a multilabel setting. the distribution is estimated querying the probability vectors.this might address the problem of disjoint bins in naive histogram setting
###Code
class CDFEstimator(HistogramEstimator):
def _q_transformer_transform(self, y):
'''
maps floats to vector of binary variables (bin in cdf)
'''
y = _fix_X_1d(y)
if type(self.resolution) in (str, np.ndarray):
hist_bins = np.digitize(y, self.bin_edges)
max_bin = len(self.bin_edges)
elif type(self.resolution) == int:
hist_bins = self.q_transformer.transform(y)
#scale between 0 and 1
hist_bins = self._q_minmax_scaler.transform(y_transformed)
hist_bins = np.around(y_transformed*(self.resolution - 1), decimals = 0).astype(int)
max_bin = self.resolution
elif isinstance(self.resolution,np.ndarray):
hist_bins = np.digitize(y, self.resolution)
max_bin = self.resolution
y_transformed = np.zeros((y.shape[0],max_bin), dtype = 'int8')
for i in range(len(y_transformed)):
bin_idx = int(hist_bins[i] + 1)
y_transformed[i, :bin_idx] = 1
print(y_transformed.mean(axis = 0))
return y_transformed
def fit(self, X, y = None, **estimator_fit_kws):
#fit y transformer
self._preprocess_y_fit(y)
#transform y
y_transformed = self._preprocess_y_transform(y)
#fit calibrated classifier
if not self.calibrated_classifier is None:
self.calibrated_classifier.fit(X = X, y = y_transformed, **estimator_fit_kws)
self.estimator = self.calibrated_classifier.calibrated_classifiers_[0].base_estimator
else:
#fit classifier
print('fitting estimator')
self.estimator.fit(X = X, y = y_transformed, **estimator_fit_kws)
return self
###Output
_____no_output_____
###Markdown
Joint and Chained Estimators
###Code
# export
class JointHistogramEstimator(MultiOutputClassifier):
'''
Performs a joint entropy estimation based on stacked model of marginal distribution estimators.
All the marginal distributions are merged using a KernelTreeEntropyEstimator, that nativelly supports
joint estimation
'''
def __init__(self, estimator, resolution = 'auto', joint_tree_estimator=None, stacking_method='auto', prefit = False, n_jobs=None, **joint_tree_kwargs):
# make estimator iterable
if not estimator.__class__ in (list,tuple,set):
estimator = [estimator]
else:
estimator = list(estimator)
#check if estimator is valid
for estim in estimator:
assert hasattr(
estim, 'predict_proba') or ('predict_proba' in dir(estim)), f'Estimator {estim} should have `predict_proba` method'
#instantiate MultiOutputClassifier
super().__init__(estimator[0], n_jobs)
#save fitted estimators if prefit
if prefit:
self.estimators_ = estimator
#set joint_tree_estimator as default
if joint_tree_estimator is None:
rf = ensemble.RandomForestClassifier(
n_estimators=100, max_leaf_nodes = 10000, n_jobs = -1)
self.joint_tree_estimator = KernelTreeHistogramEstimator(
rf, resolution=resolution, **joint_tree_kwargs)
else:
self.joint_tree_estimator = KernelTreeHistogramEstimator(
joint_tree_estimator, resolution=resolution, **joint_tree_kwargs)
self.prefit = prefit
self.stacking_method = stacking_method
self.resolution = resolution
return
def _make_stacked_predictors(self, X, stacking_method):
if stacking_method == 'auto':
attr_hierarchy = (
'predict_proba', 'decision_function', 'predict', 'transform')
predictors = []
for estim in self.estimators_:
passed = False
for attr in attr_hierarchy:
if hasattr(estim, attr):
predictors.append(getattr(estim, attr)(X))
passed = True
break
if passed == False:
raise AttributeError(
f'{estim} does not have any of these methods: {attr_hierarchy}')
else:
predictors = [getattr(estim, stacking_method)(X)
for estim in self.estimators_]
return np.hstack(predictors)
def fit(self, X, y=None, sample_weight=None):
y_prep = self.joint_tree_estimator._preprocess_y(y)
if not self.prefit:
super().fit(X, y_prep, sample_weight)
marginal_results = self._make_stacked_predictors(
X, self.stacking_method)
self.joint_tree_estimator.fit(marginal_results, y, y_prep = y_prep)
return self
def sample(self, X, sample_size=10, weight_func=None, n_neighbors=None, lower_bound=None,
alpha=None, beta=None, gamma=None, noise_factor=0,):
marginal_results = self._make_stacked_predictors(
X, self.stacking_method)
return self.joint_tree_estimator.sample(marginal_results, sample_size, weight_func, n_neighbors, lower_bound,
alpha, beta, gamma, noise_factor,)
def density(self, X, dist='kde', sample_size=10, weight_func=None, n_neighbors=None, lower_bound=None,
alpha=None, beta=None, gamma=None, noise_factor=1e-07, **dist_kwargs,):
marginal_results = self._make_stacked_predictors(
X, self.stacking_method)
return self.joint_tree_estimator.density(marginal_results, dist, sample_size, weight_func, n_neighbors, lower_bound,
alpha, beta, gamma, noise_factor, **dist_kwargs,)
def custom_predict(self, X, agg_func, sample_size=100, weights=None, n_neighbors=None,
lower_bound=None, alpha=None, beta=None, gamma=None, noise_factor=0,):
marginal_results = self._make_stacked_predictors(
X, self.stacking_method)
return self.joint_tree_estimator.custom_predict(marginal_results, agg_func, sample_size, weights,
n_neighbors, lower_bound, alpha, beta, gamma, noise_factor,)
class ChainedHistogramEstimator(MultiOutputClassifier):
'''
make chained joint estimator based on previous estimations
there are three ways to pass predictors to next estimator in the chain:
pass only predictions from previous estimator based on stacking_method
pass predictions from all previous estimators based on stacking_method
pass predictions from all previous estimators based on stacking_method and also the features used in all estimators
'''
def __init__(self):
raise NotImplementedError('ChainedHistogramEstimator is not implemented yet')
class JointKernelTreeEstimator(MultiOutputClassifier):
'''Custom multioutput for multioutput estimator for `KernelTreeEstimator`s'''
@property
def y_(self,):
'''stacked y_ attributes of each estimator (one for each dim)'''
return np.hstack([_fix_X_1d(estim.y_) for estim in self.estimators_])
def _similarity_sample_idx(self, X, sample_size=100, weights=None, n_neighbors=10,
lower_bound=0.0, alpha=1, beta=0, gamma=0):
sampled_idxs = np.hstack([
_fix_X_1d(estim._similarity_sample_idx(X, sample_size, weights, n_neighbors,
lower_bound, alpha, beta, gamma)
) for estim in self.estimators_
])
return sampled_idxs
def sample(self, X, sample_size=100, weights=None, n_neighbors=10,
lower_bound=0.0, alpha=1, beta=0, gamma=0, noise_factor=0):
idxs = self._similarity_sample_idx(
X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma)
samples = self.y_[[idx for idx in idxs]]
# fix ndim if sampling for a single value (1, n_samples, n_dims) instead of (n_samples, n_dims)
samples = samples if len(
samples.shape) != 2 else _add_n_dists_axis(samples)
# samples will have n_dims*sample_size samples, resample with no replacement to match sample_size
samples = sample_from_dist_array(
samples, sample_size=sample_size, weights=None, replace=False)
# define noise to be added
noise = agg_smallest_distance(samples, agg_func=np.std)
noise = _add_n_samples_axis(noise)
print(noise.shape, samples.shape)
return add_noise(samples, noise_factor*noise)
def custom_predict(self, X, agg_func, sample_size=100, weights=None, n_neighbors=10,
lower_bound=0.0, alpha=1, beta=0, gamma=0, noise_factor=0):
samples = self.sample(X, sample_size, weights, n_neighbors,
lower_bound, alpha, beta, gamma, noise_factor)
return np.array([agg_func(sample) for sample in samples])
###Output
_____no_output_____
###Markdown
Usage Example
###Code
#base_estim = LogisticRegression(tol = 0.001, solver = 'sag')
joint_estim = JointHistogramEstimator(estim1, resolution = 20, class_weight = 'balanced', prefit = True, cumulative_target = True, n_jobs = -1)
joint_estim.fit(X_train, y_train[:,:])
i = np.random.choice(np.arange(y_test.shape[0]))
alpha, beta, gamma = 1,0,0
noise_factor = 0
sample_size = 300
n_neighbors = 100
density = joint_estim.density(X_test[i:i+1], dist = 'empirical',sample_size = sample_size, n_neighbors= n_neighbors, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor)
samples = density.sample(sample_size)
#samples = density_estimator2.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta, gamma = gamma)
prediction = np.median(samples, axis = 1)
#naive_prediction = density_estimator.estimator.predict(X_test[i:i+1])
if (len(samples.shape) > 1) and (samples.shape[-1] == 2):
jntplot = sns.jointplot(samples[0,:,0], samples[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.05})
jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution')
jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value')
jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Dist Median')
#jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value')
jntplot.ax_joint.legend()
else:
sns.distplot(samples, kde = True, bins = 20, hist_kws = {'label':'Model Samples'})
dst = sns.distplot(y_test, kde = True, bins = 20, hist_kws = {'label':'Target Distribution'})
dst._axes.axvline(y_test[i,1], color = 'r')
dst._axes.legend()
###Output
Querying 100 nearest neighbors, this can take a while...
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 01_ensemble.ipynb.
Converted 02_core.random_variable.ipynb.
Converted 03_utils.ipynb.
Converted 04_metrics.ipynb.
Converted 05_neighbors.ipynb.
Converted 06_kde_baesyan_nets.ipynb.
Converted index.ipynb.
|
docs/user_guide/extending/extending_elementwise_expr.ipynb | ###Markdown
Adding an Elementwise Operation This notebook will show you how to add a new elementwise operation to an existing backend.We are going to add `julianday`, a function supported by the SQLite database, to the SQLite Ibis backend.The Julian day of a date, is the number of days since January 1st, 4713 BC. For more information check the [Julian day](https://en.wikipedia.org/wiki/Julian_day) wikipedia page. Step 1: Define the Operation Let's define the `julianday` operation as a function that takes one string input argument and returns a float.```pythondef julianday(date: str) -> float: """Julian date"""```
###Code
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
from ibis.expr.operations import ValueOp
class JulianDay(ValueOp):
arg = rlz.string
output_type = rlz.shape_like('arg', 'float')
###Output
_____no_output_____
###Markdown
We just defined a `JulianDay` class that takes one argument of type string or binary, and returns a float. Step 2: Define the API Because we know the output type of the operation, to make an expression out of ``JulianDay`` we simply need to construct it and call its `ibis.expr.types.Node.to_expr` method.We still need to add a method to `StringValue` and `BinaryValue` (this needs to work on both scalars and columns).When you add a method to any of the expression classes whose name matches `*Value` both the scalar and column child classes will pick it up, making it easy to define operations for both scalars and columns in one place.We can do this by defining a function and assigning it to the appropriate classof expressions.
###Code
from ibis.expr.types import StringValue, BinaryValue
def julianday(string_value):
return JulianDay(string_value).to_expr()
StringValue.julianday = julianday
###Output
_____no_output_____
###Markdown
Interlude: Create some expressions with `sha1`
###Code
import ibis
t = ibis.table([('string_col', 'string')], name='t')
t.string_col.julianday()
###Output
_____no_output_____
###Markdown
Step 3: Turn the Expression into SQL
###Code
import sqlalchemy as sa
@ibis.sqlite.add_operation(JulianDay)
def _julianday(translator, expr):
# pull out the arguments to the expression
arg, = expr.op().args
# compile the argument
compiled_arg = translator.translate(arg)
# return a SQLAlchemy expression that calls into the SQLite julianday function
return sa.func.julianday(compiled_arg)
###Output
_____no_output_____
###Markdown
Step 4: Putting it all Together
###Code
!curl -LsS -o $TEMPDIR/geography.db 'https://storage.googleapis.com/ibis-tutorial-data/geography.db'
import os
import tempfile
import ibis
db_fname = os.path.join(tempfile.gettempdir(), 'geography.db')
con = ibis.sqlite.connect(db_fname)
###Output
_____no_output_____
###Markdown
Create and execute a `julianday` expression
###Code
independence = con.table('independence')
independence
day = independence.independence_date.cast('string')
day
julianday_expr = day.julianday()
julianday_expr
sql_expr = julianday_expr.compile()
print(sql_expr)
result = julianday_expr.execute()
result.head()
###Output
_____no_output_____
###Markdown
Because we've defined our operation on `StringValue`, and not just on `StringColumn` we get operations on both string scalars *and* string columns for free
###Code
scalar = ibis.literal('2010-03-14')
scalar
julianday_scalar = scalar.julianday()
con.execute(julianday_scalar)
###Output
_____no_output_____ |
_notebooks/2020-04-07-Tutorial_MNIST_Data_Aug.ipynb | ###Markdown
"MNIST - Data Augmentation Gone Wrong"> What happens to MNIST accuracy when input data is horizontally flipped.- toc: false- branch: master- badges: true- comments: true- categories: [fastpages, jupyter]- image: images/some_folder/your_image.png 1) Import libraries, and setup file paths
###Code
#collapse-hide
from fastai2.vision.all import *
from utils import *
path = untar_data(URLs.MNIST)
train_dir = path/'training'
#val_dir = path/'testing'
fns_train = get_image_files(train_dir)
#fns_val = get_image_files(val_dir)
print('train files: ', len(fns_train))
#print('val files: ', len(fns_val))
###Output
train files: 60000
###Markdown
2) Setup two dataloaders: baseline, horizontal flip
###Code
batch_tfms = [Flip(p=1)] # horizontal flip
db = DataBlock(
blocks = (ImageBlock, CategoryBlock),
get_items = get_image_files,
splitter = RandomSplitter(valid_pct=0.2, seed=42),
get_y = parent_label,
batch_tfms = None
)
db_flip = DataBlock(
blocks = (ImageBlock, CategoryBlock),
get_items = get_image_files,
splitter = RandomSplitter(valid_pct=0.2, seed=42),
get_y = parent_label,
batch_tfms = batch_tfms
)
dls = db.dataloaders(train_dir, bs=256)
dls_flip = db_flip.dataloaders(train_dir, bs=256)
###Output
_____no_output_____
###Markdown
3) Check each dataloader is working
###Code
dls.show_batch(ncols=5,nrows=1)
dls_flip.show_batch(ncols=5,nrows=1)
###Output
_____no_output_____
###Markdown
4) Train resnet18 on baseline, and check accuracy
###Code
learn = cnn_learner(dls, resnet18,
pretrained=False,
metrics=accuracy)
lr_min = learn.lr_find()[0]
f'lr_min: {lr_min:0.05f}'
# no horizontal flip
learn.fit_one_cycle(5, lr_min)
###Output
_____no_output_____
###Markdown
- With baseline MNIST, resnet18 is getting 99% accuracy- Note: train_loss and valid_loss are both low 5) Train new resnet18 on horizontally flipped dataset
###Code
learn = cnn_learner(dls_flip, resnet18,
pretrained=False,
metrics=accuracy)
lr_min = learn.lr_find()[0]
f'lr_min: {lr_min:0.05f}'
# yes horizontal flip
learn.fit_one_cycle(5, lr_min)
###Output
_____no_output_____
###Markdown
- With horizontally flipped numbers, accuracy dropped to ~41%- Note, train_loss is a lot lower than valid_loss -> overfitting 6) What happened?
###Code
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(4, nrows=1)
###Output
_____no_output_____
###Markdown
- Model is predicting a 5, when seeing a 2
###Code
interp.plot_confusion_matrix()
###Output
_____no_output_____
###Markdown
- Model is predicting 0, 1, 4, and 8 correctly -> 40% accuracy- Model confuses 5 for 2 | 6 for 2 | 3 for 8 | 9 for 8- Does this make sense?
###Code
interp.most_confused()[:5]
# top number is actual, bottom number is prediction
learn.show_results(max_n=12)
###Output
_____no_output_____ |
ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_tabular.ipynb | ###Markdown
Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages
###Code
import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
!pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
###Code
!python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
###Output
_____no_output_____
###Markdown
Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the AI Platform Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using AI Platform Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"into the filter box, and select **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Define some constants.
###Code
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "your-user-name" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Do some imports:
###Code
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler
from kfp.v2.google.client import AIPlatformClient
###Output
_____no_output_____
###Markdown
Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training.
###Code
TRAIN_FILE_NAME = "california_housing_train.csv"
!gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/
gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}"
###Output
_____no_output_____
###Markdown
Define the pipeline:
###Code
@kfp.dsl.pipeline(name="automl-tab-training-v2")
def pipeline():
dataset_create_op = gcc_aip.TabularDatasetCreateOp(
project=PROJECT_ID, display_name="housing", gcs_source=gcs_csv_path
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=PROJECT_ID,
display_name="train-housing-automl_1",
optimization_prediction_type="regression",
optimization_objective="minimize-rmse",
column_transformations=[
{"numeric": {"column_name": "longitude"}},
{"numeric": {"column_name": "latitude"}},
{"numeric": {"column_name": "housing_median_age"}},
{"numeric": {"column_name": "total_rooms"}},
{"numeric": {"column_name": "total_bedrooms"}},
{"numeric": {"column_name": "population"}},
{"numeric": {"column_name": "households"}},
{"numeric": {"column_name": "median_income"}},
],
dataset=dataset_create_op.outputs["dataset"],
target_column="longitude",
)
deploy_op = gcc_aip.ModelDeployOp( # noqa: F841
model=training_op.outputs["model"],
project=PROJECT_ID,
machine_type="n1-standard-4",
)
###Output
_____no_output_____
###Markdown
Compile and run the pipelineNow, you're ready to compile the pipeline:
###Code
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="tab_regression_pipeline.json"
)
###Output
_____no_output_____
###Markdown
The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object:
###Code
from kfp.v2.google.client import AIPlatformClient # noqa: F811
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)
###Output
_____no_output_____
###Markdown
Then, you run the defined pipeline like this:
###Code
response = api_client.create_run_from_job_spec(
"tab_regression_pipeline.json", pipeline_root=PIPELINE_ROOT
)
###Output
_____no_output_____
###Markdown
Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint.
###Code
# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.
# ! gsutil -m rm -r $PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages
###Code
import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
!python3 -m pip install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
###Code
!python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
###Output
_____no_output_____
###Markdown
Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the AI Platform Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using AI Platform Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"into the filter box, and select **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Define some constants.
###Code
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "your-user-name" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Do some imports:
###Code
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler
from kfp.v2.google.client import AIPlatformClient
###Output
_____no_output_____
###Markdown
Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training.
###Code
TRAIN_FILE_NAME = "california_housing_train.csv"
!gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/
gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}"
###Output
_____no_output_____
###Markdown
Define the pipeline:
###Code
@kfp.dsl.pipeline(name="automl-tab-training-v2")
def pipeline():
dataset_create_op = gcc_aip.TabularDatasetCreateOp(
project=PROJECT_ID, display_name="housing", gcs_source=gcs_csv_path
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=PROJECT_ID,
display_name="train-housing-automl_1",
optimization_prediction_type="regression",
optimization_objective="minimize-rmse",
column_transformations=[
{"numeric": {"column_name": "longitude"}},
{"numeric": {"column_name": "latitude"}},
{"numeric": {"column_name": "housing_median_age"}},
{"numeric": {"column_name": "total_rooms"}},
{"numeric": {"column_name": "total_bedrooms"}},
{"numeric": {"column_name": "population"}},
{"numeric": {"column_name": "households"}},
{"numeric": {"column_name": "median_income"}},
],
dataset=dataset_create_op.outputs["dataset"],
target_column="longitude",
)
deploy_op = gcc_aip.ModelDeployOp( # noqa: F841
model=training_op.outputs["model"],
project=PROJECT_ID,
machine_type="n1-standard-4",
)
###Output
_____no_output_____
###Markdown
Compile and run the pipelineNow, you're ready to compile the pipeline:
###Code
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="tab_regression_pipeline.json"
)
###Output
_____no_output_____
###Markdown
The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object:
###Code
from kfp.v2.google.client import AIPlatformClient # noqa: F811
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)
###Output
_____no_output_____
###Markdown
Then, you run the defined pipeline like this:
###Code
response = api_client.create_run_from_job_spec(
"tab_regression_pipeline.json", pipeline_root=PIPELINE_ROOT
)
###Output
_____no_output_____
###Markdown
Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint.
###Code
# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.
# ! gsutil -m rm -r $PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages
###Code
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
!pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
###Code
!python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
###Output
_____no_output_____
###Markdown
Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI, Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the Vertex Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using Google Cloud Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Define some constants.
###Code
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "your-user-name" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Do some imports:
###Code
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler
from kfp.v2.google.client import AIPlatformClient
###Output
_____no_output_____
###Markdown
Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training.
###Code
TRAIN_FILE_NAME = "california_housing_train.csv"
!gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/
gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}"
###Output
_____no_output_____
###Markdown
Define the pipeline:
###Code
@kfp.dsl.pipeline(name="automl-tab-training-v2")
def pipeline(project: str = PROJECT_ID):
dataset_create_op = gcc_aip.TabularDatasetCreateOp(
project=project, display_name="housing", gcs_source=gcs_csv_path
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=project,
display_name="train-housing-automl_1",
optimization_prediction_type="regression",
optimization_objective="minimize-rmse",
column_transformations=[
{"numeric": {"column_name": "longitude"}},
{"numeric": {"column_name": "latitude"}},
{"numeric": {"column_name": "housing_median_age"}},
{"numeric": {"column_name": "total_rooms"}},
{"numeric": {"column_name": "total_bedrooms"}},
{"numeric": {"column_name": "population"}},
{"numeric": {"column_name": "households"}},
{"numeric": {"column_name": "median_income"}},
{"numeric": {"column_name": "median_house_value"}},
],
dataset=dataset_create_op.outputs["dataset"],
target_column="median_house_value",
)
deploy_op = gcc_aip.ModelDeployOp( # noqa: F841
model=training_op.outputs["model"],
project=project,
machine_type="n1-standard-4",
)
###Output
_____no_output_____
###Markdown
Compile and run the pipelineNow, you're ready to compile the pipeline:
###Code
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="tab_regression_pipeline.json"
)
###Output
_____no_output_____
###Markdown
The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object:
###Code
from kfp.v2.google.client import AIPlatformClient # noqa: F811
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)
###Output
_____no_output_____
###Markdown
Then, you run the defined pipeline like this:
###Code
response = api_client.create_run_from_job_spec(
"tab_regression_pipeline.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={"project": PROJECT_ID},
)
###Output
_____no_output_____
###Markdown
Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint.
###Code
# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.
# ! gsutil -m rm -r $PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages
###Code
import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
!pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
###Code
!python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
###Output
_____no_output_____
###Markdown
Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the AI Platform Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using AI Platform Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"into the filter box, and select **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Define some constants.
###Code
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "your-user-name" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Do some imports:
###Code
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler
from kfp.v2.google.client import AIPlatformClient
###Output
_____no_output_____
###Markdown
Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training.
###Code
TRAIN_FILE_NAME = "california_housing_train.csv"
!gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/
gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}"
###Output
_____no_output_____
###Markdown
Define the pipeline:
###Code
@kfp.dsl.pipeline(name="automl-tab-training-v2")
def pipeline(project: str = PROJECT_ID):
dataset_create_op = gcc_aip.TabularDatasetCreateOp(
project=project, display_name="housing", gcs_source=gcs_csv_path
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=project,
display_name="train-housing-automl_1",
optimization_prediction_type="regression",
optimization_objective="minimize-rmse",
column_transformations=[
{"numeric": {"column_name": "longitude"}},
{"numeric": {"column_name": "latitude"}},
{"numeric": {"column_name": "housing_median_age"}},
{"numeric": {"column_name": "total_rooms"}},
{"numeric": {"column_name": "total_bedrooms"}},
{"numeric": {"column_name": "population"}},
{"numeric": {"column_name": "households"}},
{"numeric": {"column_name": "median_income"}},
],
dataset=dataset_create_op.outputs["dataset"],
target_column="longitude",
)
deploy_op = gcc_aip.ModelDeployOp( # noqa: F841
model=training_op.outputs["model"],
project=project,
machine_type="n1-standard-4",
)
###Output
_____no_output_____
###Markdown
Compile and run the pipelineNow, you're ready to compile the pipeline:
###Code
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="tab_regression_pipeline.json"
)
###Output
_____no_output_____
###Markdown
The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object:
###Code
from kfp.v2.google.client import AIPlatformClient # noqa: F811
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)
###Output
_____no_output_____
###Markdown
Then, you run the defined pipeline like this:
###Code
response = api_client.create_run_from_job_spec(
"tab_regression_pipeline.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={"project": PROJECT_ID},
)
###Output
_____no_output_____
###Markdown
Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint.
###Code
# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.
# ! gsutil -m rm -r $PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages
###Code
import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
!pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
###Code
!python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
###Output
_____no_output_____
###Markdown
Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI, Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the Vertex Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using Google Cloud Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebooks, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Define some constants.
###Code
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "your-user-name" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
PIPELINE_ROOT
###Output
_____no_output_____
###Markdown
Do some imports:
###Code
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler
from kfp.v2.google.client import AIPlatformClient
###Output
_____no_output_____
###Markdown
Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training.
###Code
TRAIN_FILE_NAME = "california_housing_train.csv"
!gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/
gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}"
###Output
_____no_output_____
###Markdown
Define the pipeline:
###Code
@kfp.dsl.pipeline(name="automl-tab-training-v2")
def pipeline(project: str = PROJECT_ID):
dataset_create_op = gcc_aip.TabularDatasetCreateOp(
project=project, display_name="housing", gcs_source=gcs_csv_path
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=project,
display_name="train-housing-automl_1",
optimization_prediction_type="regression",
optimization_objective="minimize-rmse",
column_transformations=[
{"numeric": {"column_name": "longitude"}},
{"numeric": {"column_name": "latitude"}},
{"numeric": {"column_name": "housing_median_age"}},
{"numeric": {"column_name": "total_rooms"}},
{"numeric": {"column_name": "total_bedrooms"}},
{"numeric": {"column_name": "population"}},
{"numeric": {"column_name": "households"}},
{"numeric": {"column_name": "median_income"}},
{"numeric": {"column_name": "median_house_value"}},
],
dataset=dataset_create_op.outputs["dataset"],
target_column="median_house_value",
)
deploy_op = gcc_aip.ModelDeployOp( # noqa: F841
model=training_op.outputs["model"],
project=project,
machine_type="n1-standard-4",
)
###Output
_____no_output_____
###Markdown
Compile and run the pipelineNow, you're ready to compile the pipeline:
###Code
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="tab_regression_pipeline.json"
)
###Output
_____no_output_____
###Markdown
The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object:
###Code
from kfp.v2.google.client import AIPlatformClient # noqa: F811
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)
###Output
_____no_output_____
###Markdown
Then, you run the defined pipeline like this:
###Code
response = api_client.create_run_from_job_spec(
"tab_regression_pipeline.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={"project": PROJECT_ID},
)
###Output
_____no_output_____
###Markdown
Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint.
###Code
# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.
# ! gsutil -m rm -r $PIPELINE_ROOT
###Output
_____no_output_____ |
Homework 1/.ipynb_checkpoints/hw1-checkpoint.ipynb | ###Markdown
ENVECON 147*Assignment 1 : There are two parts to this assignment. The first asks you to work within this jupyter notebook. The second part can be submitted separately via bcourses*Credit to Eric Van Dusen for jupyter notebook support and Q1
###Code
from datascience import *
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import pandas as pd
plt.style.use('seaborn-muted')
###Output
_____no_output_____
###Markdown
Question 1 `costs` is a table showing the Output, Average Fixed Cost and Total Cost. Use this information to calcuate the following and add them to the table `costs`.1. Total Fixed Cost *(Hint: check out [np.ones](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html))* 2. Total Variable Cost3. Marginal Cost4. Average Variable Cost5. Average Cost
###Code
costs = Table.read_table('hw02.csv')
costs
#You can use as many cells as you want. Just add them below.
#Do not change or delete this cell
costs
###Output
_____no_output_____ |
docs/samples/transformer/image_transformer/kfserving_sdk_transformer.ipynb | ###Markdown
Sample for using transformer with KFServing SDK The notebook shows how to use KFServing SDK to create InferenceService with transformer, predictor.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TransformerSpec
from kfserving import V1alpha2PyTorchSpec
from kfserving import V1alpha2CustomSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from kubernetes.client import V1Container
from kubernetes.client import V1ResourceRequirements
import kubernetes.client
import os
import requests
import json
import numpy as np
###Output
_____no_output_____
###Markdown
Define InferenceService with Transformer Add predictor and transformer on the endpoint spec
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas=1,
pytorch=V1alpha2PyTorchSpec(
storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
model_class_name= "Net",
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))),
transformer=V1alpha2TransformerSpec(
min_replicas=1,
custom=V1alpha2CustomSpec(
container=V1Container(
image='yuzisun/image-transformer:latest',
name='user-container',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'})))))
isvc = V1alpha2InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='cifar10', namespace='kubeflow'),
spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create InferenceService with Transformer Call KFServingClient to create InferenceService.
###Code
KFServing = KFServingClient()
KFServing.create(isvc)
###Output
_____no_output_____
###Markdown
Check the InferenceService
###Code
KFServing.get('cifar10', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
NAME READY DEFAULT_TRAFFIC CANARY_TRAFFIC URL
cifar10 Unknown http://cifar10-predict.kubeflow.example.com
cifar10 Unknown http://cifar10-predict.kubeflow.example.com
cifar10 True http://cifar10-predict.kubeflow.example.com
###Markdown
Predict the image
###Code
api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient())
service = api_instance.read_namespaced_service("istio-ingressgateway", "istio-system", exact='true')
cluster_ip = service.status.load_balancer.ingress[0].ip
url = "http://" + cluster_ip + "/v1/models/cifar10:predict"
headers = { 'Host': 'cifar10-predict.kubeflow.example.com' }
with open('./input.json') as json_file:
data = json.load(json_file)
response = requests.post(url, json.dumps(data), headers=headers)
probs = json.loads(response.content.decode('utf-8'))["predictions"]
print(probs)
print(np.argmax(probs))
###Output
[[-1.6099601984024048, -2.6461076736450195, 0.32844462990760803, 2.4825074672698975, 0.43524616956710815, 2.3108043670654297, 1.00056791305542, -0.4232763648033142, -0.5100948214530945, -1.7978394031524658]]
3
###Markdown
Delete the InferenceService
###Code
KFServing.delete('cifar10', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Sample for using transformer with KFServing SDK The notebook shows how to use KFServing SDK to create KFService with transformer, predictor.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TransformerSpec
from kfserving import V1alpha2PyTorchSpec
from kfserving import V1alpha2CustomSpec
from kfserving import V1alpha2KFServiceSpec
from kfserving import V1alpha2KFService
from kubernetes.client import V1Container
from kubernetes.client import V1ResourceRequirements
import kubernetes.client
import os
import requests
import json
import numpy as np
###Output
_____no_output_____
###Markdown
Define KFService with Transformer Add predictor and transformer on the endpoint spec
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas=1,
pytorch=V1alpha2PyTorchSpec(
storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
model_class_name= "Net",
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))),
transformer=V1alpha2TransformerSpec(
min_replicas=1,
custom=V1alpha2CustomSpec(
container=V1Container(
image='yuzisun/image-transformer:latest',
name='user-container',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'})))))
kfsvc = V1alpha2KFService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='cifar10', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create KFService with Transformer Call KFServingClient to create KFService.
###Code
KFServing = KFServingClient()
KFServing.create(kfsvc)
###Output
_____no_output_____
###Markdown
Check the KFService
###Code
KFServing.get('cifar10', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
NAME READY DEFAULT_TRAFFIC CANARY_TRAFFIC URL
cifar10 Unknown http://cifar10-predict.kubeflow.example.com
cifar10 Unknown http://cifar10-predict.kubeflow.example.com
cifar10 True http://cifar10-predict.kubeflow.example.com
###Markdown
Predict the image
###Code
api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient())
service = api_instance.read_namespaced_service("istio-ingressgateway", "istio-system", exact='true')
cluster_ip = service.status.load_balancer.ingress[0].ip
url = "http://" + cluster_ip + "/v1/models/cifar10:predict"
headers = { 'Host': 'cifar10-predict.kubeflow.example.com' }
with open('./input.json') as json_file:
data = json.load(json_file)
response = requests.post(url, json.dumps(data), headers=headers)
probs = json.loads(response.content.decode('utf-8'))["predictions"]
print(probs)
print(np.argmax(probs))
###Output
[[-1.6099601984024048, -2.6461076736450195, 0.32844462990760803, 2.4825074672698975, 0.43524616956710815, 2.3108043670654297, 1.00056791305542, -0.4232763648033142, -0.5100948214530945, -1.7978394031524658]]
3
###Markdown
Delete the KFService
###Code
KFServing.delete('cifar10', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Sample for using transformer with KFServing SDK The notebook shows how to use KFServing SDK to create InferenceService with transformer, predictor.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TransformerSpec
from kfserving import V1alpha2PyTorchSpec
from kfserving import V1alpha2CustomSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from kubernetes.client import V1Container
from kubernetes.client import V1ResourceRequirements
import kubernetes.client
import os
import requests
import json
import numpy as np
###Output
_____no_output_____
###Markdown
Define InferenceService with Transformer Add predictor and transformer on the endpoint spec
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas=1,
pytorch=V1alpha2PyTorchSpec(
storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
model_class_name= "Net",
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))),
transformer=V1alpha2TransformerSpec(
min_replicas=1,
custom=V1alpha2CustomSpec(
container=V1Container(
image='gcr.io/kubeflow-ci/kfserving/image-transformer:latest',
name='user-container',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'})))))
isvc = V1alpha2InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='cifar10', namespace='default'),
spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create InferenceService with Transformer Call KFServingClient to create InferenceService.
###Code
KFServing = KFServingClient()
KFServing.create(isvc)
###Output
_____no_output_____
###Markdown
Check the InferenceService
###Code
KFServing.get('cifar10', namespace='default', watch=True, timeout_seconds=120)
###Output
NAME READY DEFAULT_TRAFFIC CANARY_TRAFFIC URL
cifar10 False
cifar10 False
cifar10 False
cifar10 False
cifar10 True 100 http://cifar10.default.example.com/v1/models/ci...
###Markdown
Predict the image
###Code
api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient())
service = api_instance.read_namespaced_service("istio-ingressgateway", "istio-system", exact='true')
cluster_ip = service.status.load_balancer.ingress[0].ip
url = "http://" + cluster_ip + "/v1/models/cifar10:predict"
headers = { 'Host': 'cifar10.default.example.com' }
with open('./input.json') as json_file:
data = json.load(json_file)
print(url, headers)
response = requests.post(url, json.dumps(data), headers=headers)
probs = json.loads(response.content.decode('utf-8'))["predictions"]
print(probs)
print(np.argmax(probs))
###Output
http://9.21.53.162/v1/models/cifar10:predict {'Host': 'cifar10.default.example.com'}
[[-1.6099603176116943, -2.6461076736450195, 0.3284446597099304, 2.4825077056884766, 0.43524616956710815, 2.3108043670654297, 1.00056791305542, -0.4232763946056366, -0.5100947022438049, -1.797839641571045]]
3
###Markdown
Delete the InferenceService
###Code
KFServing.delete('cifar10', namespace='default')
###Output
_____no_output_____ |
sagemaker-lineage/sagemaker-lineage-multihop-queries.ipynb | ###Markdown
Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli
###Code
# Fallback in case wheels are unavailable
! pip install sagemaker botocore boto3 awscli --upgrade
import subprocess
def execute_cmd(cmd):
print(cmd)
output = subprocess.getstatusoutput(cmd)
return output
def _download_from_s3(_file_path):
_path = f"s3://reinvent21-sm-rc-wheels/{_file_path}"
print(f"Path is {_path}")
ls_cmd = f"aws s3 ls {_path}"
print(execute_cmd(ls_cmd))
cmd = f"aws s3 cp {_path} /tmp/"
print("Downloading: ", cmd)
return execute_cmd(cmd)
def _install_wheel(wheel_name):
cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall"
ret = execute_cmd(cmd)
_name = wheel_name.split(".")[0]
_, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'")
for package in ["botocore", "sagemaker", "boto3", "awscli"]:
print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'"))
print(f"Installed {_name}:{_version}")
return ret
def install_sm_py_sdk():
pySDK_name = "sagemaker.tar.gz"
exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz")
if not exit_code:
_install_wheel(pySDK_name)
else:
print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...")
execute_cmd("pip install sagemaker")
def install_boto_wheels():
WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"]
for wheel_name in WHEELS:
_path = f"boto3/{wheel_name}"
exit_code, _ = _download_from_s3(_path)
if not exit_code:
_install_wheel(wheel_name)
else:
print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...")
install_boto_wheels()
install_sm_py_sdk()
!pip install sagemaker-experiments pyvis
###Output
_____no_output_____
###Markdown
Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint.
###Code
import os
import boto3
import sagemaker
import pprint
from botocore.config import Config
boto_session = boto3.Session()
config = Config(retries={"max_attempts": 50, "mode": "adaptive"})
sm_client = boto3.client("sagemaker", config=config)
region = boto_session.region_name
sagemaker_session = sagemaker.Session(sagemaker_client=sm_client, boto_session=boto_session)
default_bucket = sagemaker_session.default_bucket()
role = sagemaker.get_execution_role()
# Helper function to print query outputs
pp = pprint.PrettyPrinter()
from datetime import datetime
training_instance_type = "ml.m5.xlarge"
inference_instance_type = "ml.m5.xlarge"
s3_prefix = "multihop-example"
unique_id = str(datetime.now().timestamp()).split(".")[0]
###Output
_____no_output_____
###Markdown
Create an Experiment and Trial for a training job
###Code
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
experiment_name = f"MultihopQueryExperiment-{unique_id}"
exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client)
trial = Trial.create(
experiment_name=exp.experiment_name,
trial_name=f"MultihopQueryTrial-{unique_id}",
sagemaker_boto_client=sm_client,
)
print(exp.experiment_name)
print(trial.trial_name)
###Output
_____no_output_____
###Markdown
Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job.
###Code
if not os.path.exists("./data/"):
os.makedirs("./data/")
print("Directory Created ")
else:
print("Directory already exists")
# Download the processed abalone dataset files
!curl https://sagemaker-sample-files.s3.amazonaws.com/datasets/tabular/uci_abalone/preprocessed/test.csv > ./data/test.csv
!curl https://sagemaker-sample-files.s3.amazonaws.com/datasets/tabular/uci_abalone/preprocessed/train.csv > ./data/train.csv
!curl https://sagemaker-sample-files.s3.amazonaws.com/datasets/tabular/uci_abalone/preprocessed/validation.csv > ./data/validation.csv
# Upload the datasets to the SageMaker session default bucket
!aws s3 cp data/train.csv s3://{default_bucket}/experiments-demo/train.csv
!aws s3 cp data/validation.csv s3://{default_bucket}/experiments-demo/validation.csv
training_data = f"s3://{default_bucket}/experiments-demo/train.csv"
validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv"
###Output
_____no_output_____
###Markdown
Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above.
###Code
from sagemaker.estimator import Estimator
model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model"
training_instance_type = "ml.m5.large"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
sagemaker_session=sagemaker_session,
role=role,
)
xgb_train.set_hyperparameters(
objective="reg:linear",
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
from sagemaker.inputs import TrainingInput
xgb_train.fit(
inputs={
"train": TrainingInput(
s3_data=training_data,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=validation_data,
content_type="text/csv",
),
},
experiment_config={
"ExperimentName": experiment_name,
"TrialName": trial.trial_name,
"TrialComponentDisplayName": "MultiHopQueryTrialComponent",
},
)
###Output
_____no_output_____
###Markdown
Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model
###Code
model_package_group_name = "lineage-test-" + unique_id
mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name)
mpg_arn = mpg["ModelPackageGroupArn"]
###Output
_____no_output_____
###Markdown
Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API.
###Code
inference_instance_type = "ml.m5.xlarge"
model_package = xgb_train.register(
model_package_group_name=mpg_arn,
inference_instances=[inference_instance_type],
transform_instances=[inference_instance_type],
content_types=["text/csv"],
response_types=["text/csv"],
approval_status="Approved",
)
model_package_arn = model_package.model_package_arn
print("Model Package ARN : ", model_package_arn)
###Output
_____no_output_____
###Markdown
Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements.
###Code
endpoint_name = "lineage-test-endpoint-" + unique_id
model_package.deploy(
endpoint_name=endpoint_name,
initial_instance_count=1,
instance_type=inference_instance_type,
)
# Get the endpoint ARN
endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"]
print(endpoint_arn)
###Output
_____no_output_____
###Markdown
SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job.
###Code
from sagemaker.lineage.context import Context, EndpointContext
from sagemaker.lineage.action import Action
from sagemaker.lineage.association import Association
from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact
from sagemaker.lineage.query import (
LineageQuery,
LineageFilter,
LineageSourceEnum,
LineageEntityEnum,
LineageQueryDirectionEnum,
)
###Output
_____no_output_____
###Markdown
Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker.
###Code
# Find the endpoint context and model artifact that should be used for the lineage queries.
contexts = Context.list(source_uri=endpoint_arn)
context_name = list(contexts)[0].context_name
endpoint_context = EndpointContext.load(context_name=context_name)
###Output
_____no_output_____
###Markdown
Find all datasets associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the datasets
dataset_artifacts = []
for vertex in query_result.vertices:
dataset_artifacts.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(dataset_artifacts)
###Output
_____no_output_____
###Markdown
Find the models associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the model
model_artifacts = []
for vertex in query_result.vertices:
model_artifacts.append(vertex.to_lineage_object().source.source_uri)
# The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with
# the S3 URI to the model.tar.gz file associated with the model
pp.pprint(model_artifacts)
###Output
_____no_output_____
###Markdown
Find the trial components associated with the endpoint
###Code
# Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.TRIAL_COMPONENT],
sources=[LineageSourceEnum.TRAINING_JOB],
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the ARNs of the training jobs associated with this Endpoint
trial_components = []
for vertex in query_result.vertices:
trial_components.append(vertex.arn)
pp.pprint(trial_components)
###Output
_____no_output_____
###Markdown
Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. RuntimeThis notebook takes approximately 15 minutes to run. Contents1. [Key Concepts](Key-Concepts)1. [Prerequisites](Prerequisites)1. [Notebook Overview](Notebook-Overview)1. [Create an Experiment and Trial for a training job](Create-an-Experiment-and-Trial-for-a-training-job)1. [Training Data](Training-Data)1. [Create a training job](Create-a-training-job)1. [Create a Model Package Group for the trained model to be registered](Create-a-Model-Package-Group-for-the-trained-model-to-be-registered)1. [Register the model in the Model Registry](Register-the-model-in-the-Model-Registry)1. [Deploy the model to a SageMaker Endpoint](Deploy-the-model-to-a-SageMaker-Endpoint)1. [SageMaker Lineage Queries](SageMaker-Lineage-Queries) 1. [Using the LineageQuery API to find entity associations](Using-the-LineageQuery-API-to-find-entity-associations) 1. [Find all datasets associated with an Endpoint](Find-all-datasets-associated-with-an-Endpoint) 1. [Find the models associated with an Endpoint](Find-the-models-associated-with-an-Endpoint) 1. [Find the trial components associated with an Endpoint](Find-the-trial-components-associated-with-an-Endpoint) 1. [Change the focal point of lineage](Change-the-focal-point-of-lineage) 1. [Use LineageQueryDirectionEnum.BOTH](Use-LineageQueryDirectionEnum.BOTH) 1. [Directions in LineageQuery: Ascendants vs. Descendants](Directions-in-LineageQuery:-Ascendants-vs.-Descendants) 1. [SDK helper functions](SDK-helper-functions) 1. [Lineage Graph Visualization](Lineage-Graph-Visualization)1. [Conclusion](Conclusion)1. [Cleanup](Cleanup) Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prerequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision two instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing the Python SDK, boto and AWS CLI.
###Code
!pip install sagemaker botocore boto3 awscli --upgrade
!pip install sagemaker-experiments pyvis
###Output
_____no_output_____
###Markdown
Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint.
###Code
import os
import boto3
import sagemaker
import pprint
from botocore.config import Config
config = Config(retries={"max_attempts": 50, "mode": "adaptive"})
sagemaker_session = sagemaker.Session()
sm_client = sagemaker_session.sagemaker_client
region = sagemaker_session.boto_region_name
default_bucket = sagemaker_session.default_bucket()
role = sagemaker.get_execution_role()
# Helper function to print query outputs
pp = pprint.PrettyPrinter()
from datetime import datetime
training_instance_type = "ml.m5.xlarge"
inference_instance_type = "ml.m5.xlarge"
s3_prefix = "multihop-example"
unique_id = str(datetime.now().timestamp()).split(".")[0]
###Output
_____no_output_____
###Markdown
Create an Experiment and Trial for a training job
###Code
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
experiment_name = f"MultihopQueryExperiment-{unique_id}"
exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client)
trial = Trial.create(
experiment_name=exp.experiment_name,
trial_name=f"MultihopQueryTrial-{unique_id}",
sagemaker_boto_client=sm_client,
)
print(exp.experiment_name)
print(trial.trial_name)
###Output
_____no_output_____
###Markdown
Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in the notebook [Orchestrating Jobs with Amazon SageMaker Model Building Pipelines](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job.
###Code
default_bucket
if not os.path.exists("./data/"):
os.makedirs("./data/")
print("Directory Created ")
else:
print("Directory already exists")
# Download the processed abalone dataset files
s3 = boto3.client("s3")
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/test.csv",
"./data/test.csv",
)
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/train.csv",
"./data/train.csv",
)
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/validation.csv",
"./data/validation.csv",
)
# Upload the datasets to the SageMaker session default bucket
boto3.Session().resource("s3").Bucket(default_bucket).Object(
"experiments-demo/train.csv"
).upload_file("data/train.csv")
boto3.Session().resource("s3").Bucket(default_bucket).Object(
"experiments-demo/validation.csv"
).upload_file("data/validation.csv")
training_data = f"s3://{default_bucket}/experiments-demo/train.csv"
validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv"
###Output
_____no_output_____
###Markdown
Create a training jobWe train a simple XGBoost model on the Abalone dataset. `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above.
###Code
from sagemaker.estimator import Estimator
model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model"
training_instance_type = "ml.m5.large"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
sagemaker_session=sagemaker_session,
role=role,
)
xgb_train.set_hyperparameters(
objective="reg:linear",
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
from sagemaker.inputs import TrainingInput
xgb_train.fit(
inputs={
"train": TrainingInput(
s3_data=training_data,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=validation_data,
content_type="text/csv",
),
},
experiment_config={
"ExperimentName": experiment_name,
"TrialName": trial.trial_name,
"TrialComponentDisplayName": "MultiHopQueryTrialComponent",
},
)
###Output
_____no_output_____
###Markdown
Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model.
###Code
model_package_group_name = "lineage-test-" + unique_id
mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name)
mpg_arn = mpg["ModelPackageGroupArn"]
###Output
_____no_output_____
###Markdown
Register the model in the Model RegistryOnce the model is registered, it appears in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API.
###Code
inference_instance_type = "ml.m5.xlarge"
model_package = xgb_train.register(
model_package_group_name=mpg_arn,
inference_instances=[inference_instance_type],
transform_instances=[inference_instance_type],
content_types=["text/csv"],
response_types=["text/csv"],
approval_status="Approved",
)
model_package_arn = model_package.model_package_arn
print("Model Package ARN : ", model_package_arn)
###Output
_____no_output_____
###Markdown
Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements.
###Code
endpoint_name = "lineage-test-endpoint-" + unique_id
model_package.deploy(
endpoint_name=endpoint_name,
initial_instance_count=1,
instance_type=inference_instance_type,
)
# Get the endpoint ARN
endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"]
print(endpoint_arn)
###Output
_____no_output_____
###Markdown
SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job.
###Code
from sagemaker.lineage.context import Context, EndpointContext
from sagemaker.lineage.action import Action
from sagemaker.lineage.association import Association
from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact
from sagemaker.lineage.query import (
LineageQuery,
LineageFilter,
LineageSourceEnum,
LineageEntityEnum,
LineageQueryDirectionEnum,
)
###Output
_____no_output_____
###Markdown
Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that is used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker.
###Code
# Find the endpoint context and model artifact that should be used for the lineage queries.
contexts = Context.list(source_uri=endpoint_arn)
context_name = list(contexts)[0].context_name
endpoint_context = EndpointContext.load(context_name=context_name)
###Output
_____no_output_____
###Markdown
Find all datasets associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET]
)
# Providing this `LineageFilter` to the `LineageQuery` constructs a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the datasets
dataset_artifacts = []
for vertex in query_result.vertices:
dataset_artifacts.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(dataset_artifacts)
###Output
_____no_output_____
###Markdown
Find the models associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL]
)
# Providing this `LineageFilter` to the `LineageQuery` constructs a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the model
model_artifacts = []
for vertex in query_result.vertices:
model_artifacts.append(vertex.to_lineage_object().source.source_uri)
# The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with
# the S3 URI to the model.tar.gz file associated with the model
pp.pprint(model_artifacts)
###Output
_____no_output_____
###Markdown
Find the trial components associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.TRIAL_COMPONENT],
sources=[LineageSourceEnum.TRAINING_JOB],
)
# Providing this `LineageFilter` to the `LineageQuery` constructs a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the ARNs of the training jobs associated with this Endpoint
trial_components = []
for vertex in query_result.vertices:
trial_components.append(vertex.arn)
pp.pprint(trial_components)
###Output
_____no_output_____
###Markdown
Change the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which changes the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.**
###Code
# Get the ModelArtifact
model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0]
model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn)
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that descend from the model, i.e. the endpoint
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that ascend from the model, i.e. the datasets
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Use LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal takes place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` shows both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it appears in the query result.
###Code
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# This specifies that the query should look for associations both ascending and descending for the start
direction=LineageQueryDirectionEnum.BOTH,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Directions in LineageQuery: Ascendants vs. DescendantsTo understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query returns the endpoint. If the direction is `ASCENDANTS`, the query returns the dataset."
###Code
# In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[
LineageSourceEnum.ENDPOINT,
LineageSourceEnum.MODEL,
LineageSourceEnum.DATASET,
LineageSourceEnum.TRAINING_JOB,
],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
ascendant_artifacts = []
# The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a
# lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN.
for vertex in query_result.vertices:
try:
ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
ascendant_artifacts.append(vertex.arn)
print("Ascendant artifacts:")
pp.pprint(ascendant_artifacts)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
descendant_artifacts = []
for vertex in query_result.vertices:
try:
descendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
# Handling TrialComponents.
descendant_artifacts.append(vertex.arn)
print("Descendant artifacts:")
pp.pprint(descendant_artifacts)
###Output
_____no_output_____
###Markdown
SDK helper functionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage.
###Code
# Find all the datasets associated with the endpoint
datasets = []
dataset_artifacts = endpoint_context.dataset_artifacts()
for dataset in dataset_artifacts:
datasets.append(dataset.source.source_uri)
print("Datasets : ", datasets)
# Find the training jobs associated with the endpoint
training_job_artifacts = endpoint_context.training_job_arns()
training_jobs = []
for training_job in training_job_artifacts:
training_jobs.append(training_job)
print("Training Jobs : ", training_jobs)
# Get the ARN for the pipeline execution associated with this endpoint (if any)
pipeline_executions = endpoint_context.pipeline_execution_arn()
if pipeline_executions:
for pipeline in pipelines_executions:
print(pipeline)
# Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model
dataset_artifacts = model_artifact.dataset_artifacts()
endpoint_contexts = model_artifact.endpoint_contexts()
datasets = [dataset.source.source_uri for dataset in dataset_artifacts]
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Datasets associated with this model : ")
pp.pprint(datasets)
print("Endpoints associated with this model : ")
pp.pprint(endpoints)
# Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset
# Find the artifact associated with the dataset
dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn
dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn)
# Find the endpoints that used this training dataset
endpoint_contexts = dataset_artifact.endpoint_contexts()
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Endpoints associated with the training dataset {}".format(training_data))
pp.pprint(endpoints)
###Output
_____no_output_____
###Markdown
Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` is displayed. From the `StartArns` the visualization shows the relationships with the other lineage entities returned in the `query_lineage` API call.
###Code
# Graph APIs
# Here we use the boto3 `query_lineage` API to generate the query response to plot.
from visualizer import Visualizer
query_response = sm_client.query_lineage(
StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True
)
viz = Visualizer()
viz.render(query_response, "Endpoint")
query_response = sm_client.query_lineage(
StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True
)
viz.render(query_response, "Model")
###Output
_____no_output_____
###Markdown
ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we clean up the resources created in this notebook.
###Code
# Delete endpoint
sm_client.delete_endpoint(EndpointName=endpoint_name)
# # Delete the model package
sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn)
# Delete the model package group
sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name)
# Delete the experiment and trial within it
import time
def delete_experiment(experiment):
for trial_summary in experiment.list_trials():
trial = Trial.load(trial_name=trial_summary.trial_name)
for trial_component_summary in trial.list_trial_components():
tc = TrialComponent.load(
trial_component_name=trial_component_summary.trial_component_name
)
trial.remove_trial_component(tc)
try:
# comment out to keep trial components
tc.delete()
except:
# tc is associated with another trial
continue
# to prevent throttling
time.sleep(0.5)
trial.delete()
experiment_name = experiment.experiment_name
experiment.delete()
print(f"\nExperiment {experiment_name} deleted")
# Delete the Experiment and Trials within it
experiment = Experiment.load(experiment_name=exp.experiment_name)
delete_experiment(experiment)
###Output
_____no_output_____
###Markdown
Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli
###Code
# Fallback in case wheels are unavailable
! pip install sagemaker botocore boto3 awscli --upgrade
import subprocess
def execute_cmd(cmd):
print(cmd)
output = subprocess.getstatusoutput(cmd)
return output
def _download_from_s3(_file_path):
_path = f"s3://reinvent21-sm-rc-wheels/{_file_path}"
print(f"Path is {_path}")
ls_cmd = f"aws s3 ls {_path}"
print(execute_cmd(ls_cmd))
cmd = f"aws s3 cp {_path} /tmp/"
print("Downloading: ", cmd)
return execute_cmd(cmd)
def _install_wheel(wheel_name):
cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall"
ret = execute_cmd(cmd)
_name = wheel_name.split(".")[0]
_, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'")
for package in ["botocore", "sagemaker", "boto3", "awscli"]:
print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'"))
print(f"Installed {_name}:{_version}")
return ret
def install_sm_py_sdk():
pySDK_name = "sagemaker.tar.gz"
exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz")
if not exit_code:
_install_wheel(pySDK_name)
else:
print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...")
execute_cmd("pip install sagemaker")
def install_boto_wheels():
WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"]
for wheel_name in WHEELS:
_path = f"boto3/{wheel_name}"
exit_code, _ = _download_from_s3(_path)
if not exit_code:
_install_wheel(wheel_name)
else:
print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...")
install_boto_wheels()
install_sm_py_sdk()
!pip install sagemaker-experiments pyvis
###Output
_____no_output_____
###Markdown
Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint.
###Code
import os
import boto3
import sagemaker
import pprint
from botocore.config import Config
config = Config(retries={"max_attempts": 50, "mode": "adaptive"})
sagemaker_session = sagemaker.Session()
sm_client = sagemaker_session.sagemaker_client
region = sagemaker_session.boto_region_name
default_bucket = sagemaker_session.default_bucket()
role = sagemaker.get_execution_role()
# Helper function to print query outputs
pp = pprint.PrettyPrinter()
from datetime import datetime
training_instance_type = "ml.m5.xlarge"
inference_instance_type = "ml.m5.xlarge"
s3_prefix = "multihop-example"
unique_id = str(datetime.now().timestamp()).split(".")[0]
###Output
_____no_output_____
###Markdown
Create an Experiment and Trial for a training job
###Code
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
experiment_name = f"MultihopQueryExperiment-{unique_id}"
exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client)
trial = Trial.create(
experiment_name=exp.experiment_name,
trial_name=f"MultihopQueryTrial-{unique_id}",
sagemaker_boto_client=sm_client,
)
print(exp.experiment_name)
print(trial.trial_name)
###Output
_____no_output_____
###Markdown
Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job.
###Code
default_bucket
if not os.path.exists("./data/"):
os.makedirs("./data/")
print("Directory Created ")
else:
print("Directory already exists")
# Download the processed abalone dataset files
s3 = boto3.client("s3")
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/test.csv",
"./data/test.csv",
)
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/train.csv",
"./data/train.csv",
)
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/validation.csv",
"./data/validation.csv",
)
# Upload the datasets to the SageMaker session default bucket
boto3.Session().resource("s3").Bucket(default_bucket).Object(
"experiments-demo/train.csv"
).upload_file("data/train.csv")
boto3.Session().resource("s3").Bucket(default_bucket).Object(
"experiments-demo/validation.csv"
).upload_file("data/validation.csv")
training_data = f"s3://{default_bucket}/experiments-demo/train.csv"
validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv"
###Output
_____no_output_____
###Markdown
Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above.
###Code
from sagemaker.estimator import Estimator
model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model"
training_instance_type = "ml.m5.large"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
sagemaker_session=sagemaker_session,
role=role,
)
xgb_train.set_hyperparameters(
objective="reg:linear",
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
from sagemaker.inputs import TrainingInput
xgb_train.fit(
inputs={
"train": TrainingInput(
s3_data=training_data,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=validation_data,
content_type="text/csv",
),
},
experiment_config={
"ExperimentName": experiment_name,
"TrialName": trial.trial_name,
"TrialComponentDisplayName": "MultiHopQueryTrialComponent",
},
)
###Output
_____no_output_____
###Markdown
Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model
###Code
model_package_group_name = "lineage-test-" + unique_id
mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name)
mpg_arn = mpg["ModelPackageGroupArn"]
###Output
_____no_output_____
###Markdown
Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API.
###Code
inference_instance_type = "ml.m5.xlarge"
model_package = xgb_train.register(
model_package_group_name=mpg_arn,
inference_instances=[inference_instance_type],
transform_instances=[inference_instance_type],
content_types=["text/csv"],
response_types=["text/csv"],
approval_status="Approved",
)
model_package_arn = model_package.model_package_arn
print("Model Package ARN : ", model_package_arn)
###Output
_____no_output_____
###Markdown
Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements.
###Code
endpoint_name = "lineage-test-endpoint-" + unique_id
model_package.deploy(
endpoint_name=endpoint_name,
initial_instance_count=1,
instance_type=inference_instance_type,
)
# Get the endpoint ARN
endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"]
print(endpoint_arn)
###Output
_____no_output_____
###Markdown
SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job.
###Code
from sagemaker.lineage.context import Context, EndpointContext
from sagemaker.lineage.action import Action
from sagemaker.lineage.association import Association
from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact
from sagemaker.lineage.query import (
LineageQuery,
LineageFilter,
LineageSourceEnum,
LineageEntityEnum,
LineageQueryDirectionEnum,
)
###Output
_____no_output_____
###Markdown
Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker.
###Code
# Find the endpoint context and model artifact that should be used for the lineage queries.
contexts = Context.list(source_uri=endpoint_arn)
context_name = list(contexts)[0].context_name
endpoint_context = EndpointContext.load(context_name=context_name)
###Output
_____no_output_____
###Markdown
Find all datasets associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the datasets
dataset_artifacts = []
for vertex in query_result.vertices:
dataset_artifacts.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(dataset_artifacts)
###Output
_____no_output_____
###Markdown
Find the models associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the model
model_artifacts = []
for vertex in query_result.vertices:
model_artifacts.append(vertex.to_lineage_object().source.source_uri)
# The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with
# the S3 URI to the model.tar.gz file associated with the model
pp.pprint(model_artifacts)
###Output
_____no_output_____
###Markdown
Find the trial components associated with the endpoint
###Code
# Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.TRIAL_COMPONENT],
sources=[LineageSourceEnum.TRAINING_JOB],
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the ARNs of the training jobs associated with this Endpoint
trial_components = []
for vertex in query_result.vertices:
trial_components.append(vertex.arn)
pp.pprint(trial_components)
###Output
_____no_output_____
###Markdown
Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.**
###Code
# Get the ModelArtifact
model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0]
model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn)
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that descend from the model, i.e. the endpoint
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that ascend from the model, i.e. the datasets
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result.
###Code
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# This specifies that the query should look for associations both ascending and descending for the start
direction=LineageQueryDirectionEnum.BOTH,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset."
###Code
# In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[
LineageSourceEnum.ENDPOINT,
LineageSourceEnum.MODEL,
LineageSourceEnum.DATASET,
LineageSourceEnum.TRAINING_JOB,
],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
ascendant_artifacts = []
# The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a
# lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN.
for vertex in query_result.vertices:
try:
ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
ascendant_artifacts.append(vertex.arn)
print("Ascendant artifacts : ")
pp.pprint(ascendant_artifacts)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
descendant_artifacts = []
for vertex in query_result.vertices:
try:
descendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
# Handling TrialComponents.
descendant_artifacts.append(vertex.arn)
print("Descendant artifacts : ")
pp.pprint(descendant_artifacts)
###Output
_____no_output_____
###Markdown
SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage.
###Code
# Find all the datasets associated with this endpoint
datasets = []
dataset_artifacts = endpoint_context.dataset_artifacts()
for dataset in dataset_artifacts:
datasets.append(dataset.source.source_uri)
print("Datasets : ", datasets)
# Find the training jobs associated with the endpoint
training_job_artifacts = endpoint_context.training_job_arns()
training_jobs = []
for training_job in training_job_artifacts:
training_jobs.append(training_job)
print("Training Jobs : ", training_jobs)
# Get the ARN for the pipeline execution associated with this endpoint (if any)
pipeline_executions = endpoint_context.pipeline_execution_arn()
if pipeline_executions:
for pipeline in pipelines_executions:
print(pipeline)
# Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model
dataset_artifacts = model_artifact.dataset_artifacts()
endpoint_contexts = model_artifact.endpoint_contexts()
datasets = [dataset.source.source_uri for dataset in dataset_artifacts]
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Datasets associated with this model : ")
pp.pprint(datasets)
print("Endpoints associated with this model : ")
pp.pprint(endpoints)
# Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset
# Find the artifact associated with the dataset
dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn
dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn)
# Find the endpoints that used this training dataset
endpoint_contexts = dataset_artifact.endpoint_contexts()
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Endpoints associated with the training dataset {}".format(training_data))
pp.pprint(endpoints)
###Output
_____no_output_____
###Markdown
Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call.
###Code
# Graph APIs
# Here we use the boto3 `query_lineage` API to generate the query response to plot.
from visualizer import Visualizer
query_response = sm_client.query_lineage(
StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True
)
viz = Visualizer()
viz.render(query_response, "Endpoint")
query_response = sm_client.query_lineage(
StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True
)
viz.render(query_response, "Model")
###Output
_____no_output_____
###Markdown
ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook.
###Code
# Delete endpoint
sm_client.delete_endpoint(EndpointName=endpoint_name)
# # Delete the model package
sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn)
# Delete the model package group
sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name)
# Delete the experiment and trial within it
import time
def delete_experiment(experiment):
for trial_summary in experiment.list_trials():
trial = Trial.load(trial_name=trial_summary.trial_name)
for trial_component_summary in trial.list_trial_components():
tc = TrialComponent.load(
trial_component_name=trial_component_summary.trial_component_name
)
trial.remove_trial_component(tc)
try:
# comment out to keep trial components
tc.delete()
except:
# tc is associated with another trial
continue
# to prevent throttling
time.sleep(0.5)
trial.delete()
experiment_name = experiment.experiment_name
experiment.delete()
print(f"\nExperiment {experiment_name} deleted")
# Delete the Experiment and Trials within it
experiment = Experiment.load(experiment_name=exp.experiment_name)
delete_experiment(experiment)
###Output
_____no_output_____
###Markdown
Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli
###Code
# Fallback in case wheels are unavailable
! pip install sagemaker botocore boto3 awscli --upgrade
import subprocess
def execute_cmd(cmd):
print(cmd)
output = subprocess.getstatusoutput(cmd)
return output
def _download_from_s3(_file_path):
_path = f"s3://reinvent21-sm-rc-wheels/{_file_path}"
print(f"Path is {_path}")
ls_cmd = f"aws s3 ls {_path}"
print(execute_cmd(ls_cmd))
cmd = f"aws s3 cp {_path} /tmp/"
print("Downloading: ", cmd)
return execute_cmd(cmd)
def _install_wheel(wheel_name):
cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall"
ret = execute_cmd(cmd)
_name = wheel_name.split(".")[0]
_, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'")
for package in ["botocore", "sagemaker", "boto3", "awscli"]:
print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'"))
print(f"Installed {_name}:{_version}")
return ret
def install_sm_py_sdk():
pySDK_name = "sagemaker.tar.gz"
exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz")
if not exit_code:
_install_wheel(pySDK_name)
else:
print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...")
execute_cmd("pip install sagemaker")
def install_boto_wheels():
WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"]
for wheel_name in WHEELS:
_path = f"boto3/{wheel_name}"
exit_code, _ = _download_from_s3(_path)
if not exit_code:
_install_wheel(wheel_name)
else:
print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...")
install_boto_wheels()
install_sm_py_sdk()
!pip install sagemaker-experiments pyvis
###Output
_____no_output_____
###Markdown
Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint.
###Code
import os
import boto3
import sagemaker
import pprint
from botocore.config import Config
boto_session = boto3.Session()
config = Config(retries={"max_attempts": 50, "mode": "adaptive"})
sm_client = boto3.client("sagemaker", config=config)
region = boto_session.region_name
sagemaker_session = sagemaker.Session(sagemaker_client=sm_client, boto_session=boto_session)
default_bucket = sagemaker_session.default_bucket()
role = sagemaker.get_execution_role()
# Helper function to print query outputs
pp = pprint.PrettyPrinter()
from datetime import datetime
training_instance_type = "ml.m5.xlarge"
inference_instance_type = "ml.m5.xlarge"
s3_prefix = "multihop-example"
unique_id = str(datetime.now().timestamp()).split(".")[0]
###Output
_____no_output_____
###Markdown
Create an Experiment and Trial for a training job
###Code
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
experiment_name = f"MultihopQueryExperiment-{unique_id}"
exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client)
trial = Trial.create(
experiment_name=exp.experiment_name,
trial_name=f"MultihopQueryTrial-{unique_id}",
sagemaker_boto_client=sm_client,
)
print(exp.experiment_name)
print(trial.trial_name)
###Output
_____no_output_____
###Markdown
Training DataUpload the training data provided in `data/` to S3 so that it can be used in the training job. The data in the folder has been created by preprocessing the [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook.
###Code
# Download the processed abalone dataset files
!aws s3 cp s3://sagemaker-sample-files/datasets/tabular/uci_abalone/preprocessed/train.csv data/train.csv
!aws s3 cp s3://sagemaker-sample-files/datasets/tabular/uci_abalone/preprocessed/test.csv data/test.csv
!aws s3 cp s3://sagemaker-sample-files/datasets/tabular/uci_abalone/preprocessed/validation.csv data/validation.csv
# Upload the datasets to the SageMaker session default bucket
!aws s3 cp data/train.csv s3://{default_bucket}/experiments-demo/train.csv
!aws s3 cp data/validation.csv s3://{default_bucket}/experiments-demo/validation.csv
training_data = f"s3://{default_bucket}/experiments-demo/train.csv"
validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv"
###Output
_____no_output_____
###Markdown
Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above.
###Code
from sagemaker.estimator import Estimator
model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model"
training_instance_type = "ml.m5.large"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
sagemaker_session=sagemaker_session,
role=role,
)
xgb_train.set_hyperparameters(
objective="reg:linear",
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
from sagemaker.inputs import TrainingInput
xgb_train.fit(
inputs={
"train": TrainingInput(
s3_data=training_data,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=validation_data,
content_type="text/csv",
),
},
experiment_config={
"ExperimentName": experiment_name,
"TrialName": trial.trial_name,
"TrialComponentDisplayName": "MultiHopQueryTrialComponent",
},
)
###Output
_____no_output_____
###Markdown
Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model
###Code
model_package_group_name = "lineage-test-" + unique_id
mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name)
mpg_arn = mpg["ModelPackageGroupArn"]
###Output
_____no_output_____
###Markdown
Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API.
###Code
inference_instance_type = "ml.m5.xlarge"
model_package = xgb_train.register(
model_package_group_name=mpg_arn,
inference_instances=[inference_instance_type],
transform_instances=[inference_instance_type],
content_types=["text/csv"],
response_types=["text/csv"],
approval_status="Approved",
)
model_package_arn = model_package.model_package_arn
print("Model Package ARN : ", model_package_arn)
###Output
_____no_output_____
###Markdown
Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements.
###Code
endpoint_name = "lineage-test-endpoint-" + unique_id
model_package.deploy(
endpoint_name=endpoint_name,
initial_instance_count=1,
instance_type=inference_instance_type,
)
# Get the endpoint ARN
endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"]
print(endpoint_arn)
###Output
_____no_output_____
###Markdown
SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job.
###Code
from sagemaker.lineage.context import Context, EndpointContext
from sagemaker.lineage.action import Action
from sagemaker.lineage.association import Association
from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact
from sagemaker.lineage.query import (
LineageQuery,
LineageFilter,
LineageSourceEnum,
LineageEntityEnum,
LineageQueryDirectionEnum,
)
###Output
_____no_output_____
###Markdown
Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker.
###Code
# Find the endpoint context and model artifact that should be used for the lineage queries.
contexts = Context.list(source_uri=endpoint_arn)
context_name = list(contexts)[0].context_name
endpoint_context = EndpointContext.load(context_name=context_name)
###Output
_____no_output_____
###Markdown
Find all datasets associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the datasets
dataset_artifacts = []
for vertex in query_result.vertices:
dataset_artifacts.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(dataset_artifacts)
###Output
_____no_output_____
###Markdown
Find the models associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the model
model_artifacts = []
for vertex in query_result.vertices:
model_artifacts.append(vertex.to_lineage_object().source.source_uri)
# The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with
# the S3 URI to the model.tar.gz file associated with the model
pp.pprint(model_artifacts)
###Output
_____no_output_____
###Markdown
Find the trial components associated with the endpoint
###Code
# Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.TRIAL_COMPONENT],
sources=[LineageSourceEnum.TRAINING_JOB],
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the ARNs of the training jobs associated with this Endpoint
trial_components = []
for vertex in query_result.vertices:
trial_components.append(vertex.arn)
pp.pprint(trial_components)
###Output
_____no_output_____
###Markdown
Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.**
###Code
# Get the ModelArtifact
model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0]
model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn)
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that descend from the model, i.e. the endpoint
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that ascend from the model, i.e. the datasets
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result.
###Code
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# This specifies that the query should look for associations both ascending and descending for the start
direction=LineageQueryDirectionEnum.BOTH,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset."
###Code
# In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[
LineageSourceEnum.ENDPOINT,
LineageSourceEnum.MODEL,
LineageSourceEnum.DATASET,
LineageSourceEnum.TRAINING_JOB,
],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
ascendant_artifacts = []
# The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a
# lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN.
for vertex in query_result.vertices:
try:
ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
ascendant_artifacts.append(vertex.arn)
print("Ascendant artifacts : ")
pp.pprint(ascendant_artifacts)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
descendant_artifacts = []
for vertex in query_result.vertices:
try:
descendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
# Handling TrialComponents.
descendant_artifacts.append(vertex.arn)
print("Descendant artifacts : ")
pp.pprint(descendant_artifacts)
###Output
_____no_output_____
###Markdown
SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage.
###Code
# Find all the datasets associated with this endpoint
datasets = []
dataset_artifacts = endpoint_context.dataset_artifacts()
for dataset in dataset_artifacts:
datasets.append(dataset.source.source_uri)
print("Datasets : ", datasets)
# Find the training jobs associated with the endpoint
training_job_artifacts = endpoint_context.training_job_arns()
training_jobs = []
for training_job in training_job_artifacts:
training_jobs.append(training_job)
print("Training Jobs : ", training_jobs)
# Get the ARN for the pipeline execution associated with this endpoint (if any)
pipeline_executions = endpoint_context.pipeline_execution_arn()
if pipeline_executions:
for pipeline in pipelines_executions:
print(pipeline)
# Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model
dataset_artifacts = model_artifact.dataset_artifacts()
endpoint_contexts = model_artifact.endpoint_contexts()
datasets = [dataset.source.source_uri for dataset in dataset_artifacts]
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Datasets associated with this model : ")
pp.pprint(datasets)
print("Endpoints associated with this model : ")
pp.pprint(endpoints)
# Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset
# Find the artifact associated with the dataset
dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn
dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn)
# Find the endpoints that used this training dataset
endpoint_contexts = dataset_artifact.endpoint_contexts()
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Endpoints associated with the training dataset {}".format(training_data))
pp.pprint(endpoints)
###Output
_____no_output_____
###Markdown
Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call.
###Code
# Graph APIs
# Here we use the boto3 `query_lineage` API to generate the query response to plot.
from visualizer import Visualizer
query_response = sm_client.query_lineage(
StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True
)
viz = Visualizer()
viz.render(query_response, "Endpoint")
query_response = sm_client.query_lineage(
StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True
)
viz.render(query_response, "Model")
###Output
_____no_output_____
###Markdown
ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook.
###Code
# Delete endpoint
sm_client.delete_endpoint(EndpointName=endpoint_name)
# # Delete the model package
sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn)
# Delete the model package group
sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name)
# Delete the experiment and trial within it
import time
def delete_experiment(experiment):
for trial_summary in experiment.list_trials():
trial = Trial.load(trial_name=trial_summary.trial_name)
for trial_component_summary in trial.list_trial_components():
tc = TrialComponent.load(
trial_component_name=trial_component_summary.trial_component_name
)
trial.remove_trial_component(tc)
try:
# comment out to keep trial components
tc.delete()
except:
# tc is associated with another trial
continue
# to prevent throttling
time.sleep(0.5)
trial.delete()
experiment_name = experiment.experiment_name
experiment.delete()
print(f"\nExperiment {experiment_name} deleted")
# Delete the Experiment and Trials within it
experiment = Experiment.load(experiment_name=exp.experiment_name)
delete_experiment(experiment)
###Output
_____no_output_____
###Markdown
Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.**
###Code
# Get the ModelArtifact
model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0]
model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn)
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that descend from the model, i.e. the endpoint
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that ascend from the model, i.e. the datasets
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result.
###Code
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# This specifies that the query should look for associations both ascending and descending for the start
direction=LineageQueryDirectionEnum.BOTH,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset."
###Code
# In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[
LineageSourceEnum.ENDPOINT,
LineageSourceEnum.MODEL,
LineageSourceEnum.DATASET,
LineageSourceEnum.TRAINING_JOB,
],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
ascendant_artifacts = []
# The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a
# lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN.
for vertex in query_result.vertices:
try:
ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
ascendant_artifacts.append(vertex.arn)
print("Ascendant artifacts : ")
pp.pprint(ascendant_artifacts)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
descendant_artifacts = []
for vertex in query_result.vertices:
try:
descendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
# Handling TrialComponents.
descendant_artifacts.append(vertex.arn)
print("Descendant artifacts : ")
pp.pprint(descendant_artifacts)
###Output
_____no_output_____
###Markdown
SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage.
###Code
# Find all the datasets associated with this endpoint
datasets = []
dataset_artifacts = endpoint_context.dataset_artifacts()
for dataset in dataset_artifacts:
datasets.append(dataset.source.source_uri)
print("Datasets : ", datasets)
# Find the training jobs associated with the endpoint
training_job_artifacts = endpoint_context.training_job_arns()
training_jobs = []
for training_job in training_job_artifacts:
training_jobs.append(training_job)
print("Training Jobs : ", training_jobs)
# Get the ARN for the pipeline execution associated with this endpoint (if any)
pipeline_executions = endpoint_context.pipeline_execution_arn()
if pipeline_executions:
for pipeline in pipelines_executions:
print(pipeline)
# Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model
dataset_artifacts = model_artifact.dataset_artifacts()
endpoint_contexts = model_artifact.endpoint_contexts()
datasets = [dataset.source.source_uri for dataset in dataset_artifacts]
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Datasets associated with this model : ")
pp.pprint(datasets)
print("Endpoints associated with this model : ")
pp.pprint(endpoints)
# Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset
# Find the artifact associated with the dataset
dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn
dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn)
# Find the endpoints that used this training dataset
endpoint_contexts = dataset_artifact.endpoint_contexts()
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Endpoints associated with the training dataset {}".format(training_data))
pp.pprint(endpoints)
###Output
_____no_output_____
###Markdown
Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call.
###Code
# Graph APIs
# Here we use the boto3 `query_lineage` API to generate the query response to plot.
from visualizer import Visualizer
query_response = sm_client.query_lineage(
StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True
)
viz = Visualizer()
viz.render(query_response, "Endpoint")
query_response = sm_client.query_lineage(
StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True
)
viz.render(query_response, "Model")
###Output
_____no_output_____
###Markdown
ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook.
###Code
# Delete endpoint
sm_client.delete_endpoint(EndpointName=endpoint_name)
# # Delete the model package
sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn)
# Delete the model package group
sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name)
# Delete the experiment and trial within it
import time
def delete_experiment(experiment):
for trial_summary in experiment.list_trials():
trial = Trial.load(trial_name=trial_summary.trial_name)
for trial_component_summary in trial.list_trial_components():
tc = TrialComponent.load(
trial_component_name=trial_component_summary.trial_component_name
)
trial.remove_trial_component(tc)
try:
# comment out to keep trial components
tc.delete()
except:
# tc is associated with another trial
continue
# to prevent throttling
time.sleep(0.5)
trial.delete()
experiment_name = experiment.experiment_name
experiment.delete()
print(f"\nExperiment {experiment_name} deleted")
# Delete the Experiment and Trials within it
experiment = Experiment.load(experiment_name=exp.experiment_name)
delete_experiment(experiment)
###Output
_____no_output_____
###Markdown
Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli
###Code
# Fallback in case wheels are unavailable
! pip install sagemaker botocore boto3 awscli --upgrade
import subprocess
def execute_cmd(cmd):
print(cmd)
output = subprocess.getstatusoutput(cmd)
return output
def _download_from_s3(_file_path):
_path = f"s3://reinvent21-sm-rc-wheels/{_file_path}"
print(f"Path is {_path}")
ls_cmd = f"aws s3 ls {_path}"
print(execute_cmd(ls_cmd))
cmd = f"aws s3 cp {_path} /tmp/"
print("Downloading: ", cmd)
return execute_cmd(cmd)
def _install_wheel(wheel_name):
cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall"
ret = execute_cmd(cmd)
_name = wheel_name.split(".")[0]
_, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'")
for package in ["botocore", "sagemaker", "boto3", "awscli"]:
print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'"))
print(f"Installed {_name}:{_version}")
return ret
def install_sm_py_sdk():
pySDK_name = "sagemaker.tar.gz"
exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz")
if not exit_code:
_install_wheel(pySDK_name)
else:
print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...")
execute_cmd("pip install sagemaker")
def install_boto_wheels():
WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"]
for wheel_name in WHEELS:
_path = f"boto3/{wheel_name}"
exit_code, _ = _download_from_s3(_path)
if not exit_code:
_install_wheel(wheel_name)
else:
print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...")
install_boto_wheels()
install_sm_py_sdk()
!pip install sagemaker-experiments pyvis
###Output
_____no_output_____
###Markdown
Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint.
###Code
import os
import boto3
import sagemaker
import pprint
from botocore.config import Config
boto_session = boto3.Session()
config = Config(retries={"max_attempts": 50, "mode": "adaptive"})
sm_client = boto3.client("sagemaker", config=config)
region = boto_session.region_name
sagemaker_session = sagemaker.Session(sagemaker_client=sm_client, boto_session=boto_session)
default_bucket = sagemaker_session.default_bucket()
role = sagemaker.get_execution_role()
# Helper function to print query outputs
pp = pprint.PrettyPrinter()
from datetime import datetime
training_instance_type = "ml.m5.xlarge"
inference_instance_type = "ml.m5.xlarge"
s3_prefix = "multihop-example"
unique_id = str(datetime.now().timestamp()).split(".")[0]
###Output
_____no_output_____
###Markdown
Create an Experiment and Trial for a training job
###Code
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
experiment_name = f"MultihopQueryExperiment-{unique_id}"
exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client)
trial = Trial.create(
experiment_name=exp.experiment_name,
trial_name=f"MultihopQueryTrial-{unique_id}",
sagemaker_boto_client=sm_client,
)
print(exp.experiment_name)
print(trial.trial_name)
###Output
_____no_output_____
###Markdown
Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job.
###Code
default_bucket
if not os.path.exists("./data/"):
os.makedirs("./data/")
print("Directory Created ")
else:
print("Directory already exists")
# Download the processed abalone dataset files
s3 = boto3.client("s3")
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/test.csv",
"./data/test.csv",
)
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/train.csv",
"./data/train.csv",
)
s3.download_file(
f"sagemaker-sample-files",
"datasets/tabular/uci_abalone/preprocessed/validation.csv",
"./data/validation.csv",
)
# Upload the datasets to the SageMaker session default bucket
boto3.Session().resource("s3").Bucket(default_bucket).Object(
"experiments-demo/train.csv"
).upload_file("data/train.csv")
boto3.Session().resource("s3").Bucket(default_bucket).Object(
"experiments-demo/validation.csv"
).upload_file("data/validation.csv")
training_data = f"s3://{default_bucket}/experiments-demo/train.csv"
validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv"
###Output
_____no_output_____
###Markdown
Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above.
###Code
from sagemaker.estimator import Estimator
model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model"
training_instance_type = "ml.m5.large"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
sagemaker_session=sagemaker_session,
role=role,
)
xgb_train.set_hyperparameters(
objective="reg:linear",
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
from sagemaker.inputs import TrainingInput
xgb_train.fit(
inputs={
"train": TrainingInput(
s3_data=training_data,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=validation_data,
content_type="text/csv",
),
},
experiment_config={
"ExperimentName": experiment_name,
"TrialName": trial.trial_name,
"TrialComponentDisplayName": "MultiHopQueryTrialComponent",
},
)
###Output
_____no_output_____
###Markdown
Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model
###Code
model_package_group_name = "lineage-test-" + unique_id
mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name)
mpg_arn = mpg["ModelPackageGroupArn"]
###Output
_____no_output_____
###Markdown
Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API.
###Code
inference_instance_type = "ml.m5.xlarge"
model_package = xgb_train.register(
model_package_group_name=mpg_arn,
inference_instances=[inference_instance_type],
transform_instances=[inference_instance_type],
content_types=["text/csv"],
response_types=["text/csv"],
approval_status="Approved",
)
model_package_arn = model_package.model_package_arn
print("Model Package ARN : ", model_package_arn)
###Output
_____no_output_____
###Markdown
Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements.
###Code
endpoint_name = "lineage-test-endpoint-" + unique_id
model_package.deploy(
endpoint_name=endpoint_name,
initial_instance_count=1,
instance_type=inference_instance_type,
)
# Get the endpoint ARN
endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"]
print(endpoint_arn)
###Output
_____no_output_____
###Markdown
SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job.
###Code
from sagemaker.lineage.context import Context, EndpointContext
from sagemaker.lineage.action import Action
from sagemaker.lineage.association import Association
from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact
from sagemaker.lineage.query import (
LineageQuery,
LineageFilter,
LineageSourceEnum,
LineageEntityEnum,
LineageQueryDirectionEnum,
)
###Output
_____no_output_____
###Markdown
Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker.
###Code
# Find the endpoint context and model artifact that should be used for the lineage queries.
contexts = Context.list(source_uri=endpoint_arn)
context_name = list(contexts)[0].context_name
endpoint_context = EndpointContext.load(context_name=context_name)
###Output
_____no_output_____
###Markdown
Find all datasets associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the datasets
dataset_artifacts = []
for vertex in query_result.vertices:
dataset_artifacts.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(dataset_artifacts)
###Output
_____no_output_____
###Markdown
Find the models associated with an Endpoint
###Code
# Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL]
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the lineage objects corresponding to the model
model_artifacts = []
for vertex in query_result.vertices:
model_artifacts.append(vertex.to_lineage_object().source.source_uri)
# The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with
# the S3 URI to the model.tar.gz file associated with the model
pp.pprint(model_artifacts)
###Output
_____no_output_____
###Markdown
Find the trial components associated with the endpoint
###Code
# Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.TRIAL_COMPONENT],
sources=[LineageSourceEnum.TRAINING_JOB],
)
# Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context`
# and find all datasets.
query_result = LineageQuery(sagemaker_session).query(
start_arns=[endpoint_context.context_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
# Parse through the query results to get the ARNs of the training jobs associated with this Endpoint
trial_components = []
for vertex in query_result.vertices:
trial_components.append(vertex.arn)
pp.pprint(trial_components)
###Output
_____no_output_____
###Markdown
Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.**
###Code
# Get the ModelArtifact
model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0]
model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn)
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that descend from the model, i.e. the endpoint
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# Find all the entities that ascend from the model, i.e. the datasets
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result.
###Code
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn], # Model is the starting artifact
query_filter=query_filter,
# This specifies that the query should look for associations both ascending and descending for the start
direction=LineageQueryDirectionEnum.BOTH,
include_edges=False,
)
associations = []
for vertex in query_result.vertices:
associations.append(vertex.to_lineage_object().source.source_uri)
pp.pprint(associations)
###Output
_____no_output_____
###Markdown
Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset."
###Code
# In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`.
query_filter = LineageFilter(
entities=[LineageEntityEnum.ARTIFACT],
sources=[
LineageSourceEnum.ENDPOINT,
LineageSourceEnum.MODEL,
LineageSourceEnum.DATASET,
LineageSourceEnum.TRAINING_JOB,
],
)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.ASCENDANTS,
include_edges=False,
)
ascendant_artifacts = []
# The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a
# lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN.
for vertex in query_result.vertices:
try:
ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
ascendant_artifacts.append(vertex.arn)
print("Ascendant artifacts : ")
pp.pprint(ascendant_artifacts)
query_result = LineageQuery(sagemaker_session).query(
start_arns=[model_artifact.artifact_arn],
query_filter=query_filter,
direction=LineageQueryDirectionEnum.DESCENDANTS,
include_edges=False,
)
descendant_artifacts = []
for vertex in query_result.vertices:
try:
descendant_artifacts.append(vertex.to_lineage_object().source.source_uri)
except:
# Handling TrialComponents.
descendant_artifacts.append(vertex.arn)
print("Descendant artifacts : ")
pp.pprint(descendant_artifacts)
###Output
_____no_output_____
###Markdown
SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage.
###Code
# Find all the datasets associated with this endpoint
datasets = []
dataset_artifacts = endpoint_context.dataset_artifacts()
for dataset in dataset_artifacts:
datasets.append(dataset.source.source_uri)
print("Datasets : ", datasets)
# Find the training jobs associated with the endpoint
training_job_artifacts = endpoint_context.training_job_arns()
training_jobs = []
for training_job in training_job_artifacts:
training_jobs.append(training_job)
print("Training Jobs : ", training_jobs)
# Get the ARN for the pipeline execution associated with this endpoint (if any)
pipeline_executions = endpoint_context.pipeline_execution_arn()
if pipeline_executions:
for pipeline in pipelines_executions:
print(pipeline)
# Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model
dataset_artifacts = model_artifact.dataset_artifacts()
endpoint_contexts = model_artifact.endpoint_contexts()
datasets = [dataset.source.source_uri for dataset in dataset_artifacts]
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Datasets associated with this model : ")
pp.pprint(datasets)
print("Endpoints associated with this model : ")
pp.pprint(endpoints)
# Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset
# Find the artifact associated with the dataset
dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn
dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn)
# Find the endpoints that used this training dataset
endpoint_contexts = dataset_artifact.endpoint_contexts()
endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts]
print("Endpoints associated with the training dataset {}".format(training_data))
pp.pprint(endpoints)
###Output
_____no_output_____
###Markdown
Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call.
###Code
# Graph APIs
# Here we use the boto3 `query_lineage` API to generate the query response to plot.
from visualizer import Visualizer
query_response = sm_client.query_lineage(
StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True
)
viz = Visualizer()
viz.render(query_response, "Endpoint")
query_response = sm_client.query_lineage(
StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True
)
viz.render(query_response, "Model")
###Output
_____no_output_____
###Markdown
ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook.
###Code
# Delete endpoint
sm_client.delete_endpoint(EndpointName=endpoint_name)
# # Delete the model package
sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn)
# Delete the model package group
sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name)
# Delete the experiment and trial within it
import time
def delete_experiment(experiment):
for trial_summary in experiment.list_trials():
trial = Trial.load(trial_name=trial_summary.trial_name)
for trial_component_summary in trial.list_trial_components():
tc = TrialComponent.load(
trial_component_name=trial_component_summary.trial_component_name
)
trial.remove_trial_component(tc)
try:
# comment out to keep trial components
tc.delete()
except:
# tc is associated with another trial
continue
# to prevent throttling
time.sleep(0.5)
trial.delete()
experiment_name = experiment.experiment_name
experiment.delete()
print(f"\nExperiment {experiment_name} deleted")
# Delete the Experiment and Trials within it
experiment = Experiment.load(experiment_name=exp.experiment_name)
delete_experiment(experiment)
###Output
_____no_output_____ |
NeolithicMath.ipynb | ###Markdown
[Digital Mathematics Curriculum](http://wikieducator.org/Digital_Math) Neolithic MathNeolithic Math provides a way to get back to basics, but without sacrificing the heuristics now believed to have guided many an ancient culture. Even though the tools were relatively primitive, we have ample evidence that geographic and astronomic information was embedded in these arts and crafts. We look at Stonehenge and further back to early hominids to establish our direction in time (towards the past), however our historical approach is welcome to bring the storylines right up to the present. Then we switch to science fiction mode to speculate about the future, in Martian Math.In each age along the timeline, civilization takes risks ([Casino Math](CasinoMath.ipynb)) and expresses some economy or logistics capability ([Supermarket Math](SuperMarketMath.ipynb)).Given the historical approach, expect to focus on the evolution of mathematical concepts and notations, with attention to how it gets passed on through various institutions (not only schools). Time & SpaceOur work here involves keeping track of time using calendars, and relating these calendars to astronomical cycles and relationships. With astronomical considerations comes geography and geodessy, and the history of map making. What is a map? What is a calendar?We will study the Gregorian Calendar, but also several others.We will not neglect to share multiple cosomologies right up to those of the present day.
###Code
import datetime
jan1_0001 = datetime.datetime(1,1,1)
jan1_0001.toordinal()
# uncomment the line below to see source code
?? jan1_0001
###Output
_____no_output_____ |
notebooks_to_get_image_data_for_cnns/get_images_for_both.ipynb | ###Markdown
Create new directory to store images, store image file names as indices to correspond to dataframe entries
###Code
# import urllib.request
# first_reset = False
# anger_count = 0
# non_anger_count = 0
# for i, row in df_anger.iterrows():
# print('image # : ' + str(i) + ' ' + row['image'])
# print(row['label numerical'])
# # train
# if i < 2410:
# if row['label numerical']:
# urllib.request.urlretrieve(row['image'], 'train/anger/anger_{}.jpg'.format(str(anger_count)))
# anger_count += 1
# else:
# urllib.request.urlretrieve(row['image'], 'train/non_anger/non_anger_{}.jpg'.format(str(non_anger_count)))
# non_anger_count += 1
# # test
# else:
# if first_reset == False:
# first_reset = True
# anger_count = 0
# non_anger_count = 0
# if row ['label numerical']:
# urllib.request.urlretrieve(row['image'], 'validation/anger/anger_{}.jpg'.format(str(anger_count)))
# anger_count += 1
# else:
# urllib.request.urlretrieve(row['image'], 'validation/non_anger/non_anger_{}.jpg'.format(str(non_anger_count)))
# non_anger_count += 1
# # urllib.request.urlretrieve(row['image'], 'images_from_dataset/{}.jpg'.format(str(i)))
import urllib.request
first_reset = False
both_count = 0
non_both_count = 0
for i, row in df_both.iterrows():
print('image # : ' + str(i) + ' ' + row['image'])
print(row['label numerical'])
if row['label numerical']:
urllib.request.urlretrieve(row['image'], 'both_classification/both/both_{}.jpg'.format(str(both_count)))
both_count += 1
else:
urllib.request.urlretrieve(row['image'], 'both_classification/non_both/non_both_{}.jpg'.format(str(non_both_count)))
non_both_count += 1
# from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# train_datagen = ImageDataGenerator(
# rotation_range = 40,
# width_shift_range = 0.2,
# height_shift_range = 0.2,
# rescale = 1./255,
# shear_range = 0.2,
# zoom_range = 0.2,
# horizontal_flip = True)
# validation_datagen = ImageDataGenerator(rescale=1./255)
# train_generator = train_datagen.flow_from_directory(
# 'train',
# batch_size=32,
# class_mode='binary')
# validation_generator = validation_datagen.flow_from_directory(
# 'validation',
# batch_size=32,
# class_mode='binary')
###Output
_____no_output_____ |
docs/r2/image_summaries.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Displaying image data in TensorBoard View on TensorFlow.org Run in Google Colab View source on GitHub OverviewUsing the **TensorFlow Image Summary API,** you can easily log tensors and arbitrary images and view them in TensorBoard. This can be extremely helpful to sample and examine your input data, or to [visualize layer weights](http://cs231n.github.io/understanding-cnn/) and [generated tensors](https://hub.packtpub.com/generative-adversarial-networks-using-keras/). You can also log diagnostic data as images that can be helpful in the course of your model development.In this tutorial, you will use learn how to use the Image Summary API to visualize tensors as images. You will also learn how to take an arbitrary image, convert it to a tensor, and visualize it in TensorBoard. You will work through a simple but real example that uses Image Summaries to help you understand how your model is performing. Setup
###Code
# Ensure TensorFlow 2.0 is installed.
!pip install -q tf-nightly-2.0-preview
# Load the TensorBoard notebook extension.
%load_ext tensorboard.notebook
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import io
import itertools
from packaging import version
from six.moves import range
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >= 2, \
"This notebook requires TensorFlow 2.0 or above."
###Output
TensorFlow version: 2.0.0-dev20190228
###Markdown
Download the Fashion-MNIST datasetYou're going to construct a simple neural network to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 28x28 grayscale images of fashion products from 10 categories, with 7,000 images per category.First, download the data:
###Code
# Download the data. The data is already divided into train and test.
# The labels are integers representing classes.
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = \
fashion_mnist.load_data()
# Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc.
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
###Markdown
Visualizing a single imageTo understand how the Image Summary API works, you're now going to simply log the first training image in your training set in TensorBoard.Before you do that, examine the shape of your training data:
###Code
print("Shape: ", train_images[0].shape)
print("Label: ", train_labels[0], "->", class_names[train_labels[0]])
###Output
Shape: (28, 28)
Label: 9 -> Ankle boot
###Markdown
Notice that the shape of each image in the data set is a rank-2 tensor of shape (28, 28), representing the height and the width.However, ```tf.summary.image()``` expects a rank-4 tensor containing ```(batch_size, height, width, channels)```. Therefore, the tensors need to be reshaped. You're logging only one image, so ```batch_size``` is 1. The images are grayscale, so set ```channels``` to 1.
###Code
# Reshape the image for the Summary API.
img = np.reshape(train_images[0], (-1, 28, 28, 1))
###Output
_____no_output_____
###Markdown
You're now ready to log this image and view it in TensorBoard.
###Code
# Clear out any prior log data.
!rm -rf logs
# Sets up a timestamped log directory.
logdir = "logs/train_data/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# Creates a file writer for the log directory.
file_writer = tf.summary.create_file_writer(logdir)
# Using the file writer, log the reshaped image.
with file_writer.as_default():
tf.summary.image("Training data", img, step=0)
###Output
_____no_output_____
###Markdown
Now, use TensorBoard to examine the image. Wait a few seconds for the UI to spin up.
###Code
%tensorboard --logdir logs/train_data
###Output
_____no_output_____
###Markdown
The "Images" tab displays the image you just logged. It's an "ankle boot". The image is scaled to a default size for easier viewing. If you want to view the unscaled original image, check "Show actual image size" at the upper left. Play with the brightness and contrast sliders to see how they affect the image pixels. Visualizing multiple imagesLogging one tensor is great, but what if you wanted to log multiple training examples?Simply specify the number of images you want to log when passing data to ```tf.summary.image()```.
###Code
with file_writer.as_default():
# Don't forget to reshape.
images = np.reshape(train_images[0:25], (-1, 28, 28, 1))
tf.summary.image("25 training data examples", images, max_outputs=25, step=0)
%tensorboard --logdir logs/train_data
###Output
_____no_output_____
###Markdown
Logging arbitrary image dataWhat if you want to visualize an image that's not a tensor, such as an image generated by [matplotlib](https://matplotlib.org/)?You need some boilerplate code to convert the plot to a tensor, but after that, you're good to go.In the code below, you'll log the first 25 images as a nice grid using matplotlib's ```subplot()``` function. You'll then view the grid in TensorBoard:
###Code
# Clear out prior logging data.
!rm -rf logs/plots
logdir = "logs/plots/" + datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(logdir)
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def image_grid():
"""Return a 5x5 grid of the MNIST images as a matplotlib figure."""
# Create a figure to contain the plot.
figure = plt.figure(figsize=(10,10))
for i in range(25):
# Start next subplot.
plt.subplot(5, 5, i + 1, title=class_names[train_labels[i]])
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
return figure
# Prepare the plot
figure = image_grid()
# Convert to image and log
with file_writer.as_default():
tf.summary.image("Training data", plot_to_image(figure), step=0)
%tensorboard --logdir logs/plots
###Output
_____no_output_____
###Markdown
Building an image classifierNow put this all together with a real example. After all, you're here to do machine learning and not plot pretty pictures!You're going to use image summaries to understand how well your model is doing while training a simple classifier for the Fashion-MNIST dataset. First, create a very simple model and compile it, setting up the optimizer and loss function. The compile step also specifies that you want to log the accuracy of the classifier along the way.
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
###Output
_____no_output_____
###Markdown
When training a classifier, it's useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.Define a function that calculates the confusion matrix. You'll use a convenient [Scikit-learn](https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) function to do this, and then plot it using matplotlib.
###Code
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
###Output
_____no_output_____
###Markdown
You're now ready to train the classifier and regularly log the confusion matrix along the way.Here's what you'll do:1. Create the [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics2. Create a [Keras LambdaCallback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LambdaCallback) to log the confusion matrix at the end of every epoch3. Train the model using Model.fit(), making sure to pass both callbacksAs training progresses, scroll down to see TensorBoard start up.
###Code
# Clear out prior logging data.
!rm -rf logs/image
logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# Define the basic TensorBoard callback.
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
file_writer_cm = tf.summary.create_file_writer(logdir + '/cm')
def log_confusion_matrix(epoch, logs):
# Use the model to predict the values from the validation dataset.
test_pred_raw = model.predict(test_images)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix.
cm = sklearn.metrics.confusion_matrix(test_labels, test_pred)
# Log the confusion matrix as an image summary.
figure = plot_confusion_matrix(cm, class_names=class_names)
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define the per-epoch callback.
cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix)
# Start TensorBoard.
%tensorboard --logdir logs/image
# Train the classifier.
model.fit(
train_images,
train_labels,
epochs=5,
verbose=0, # Suppress chatty output
callbacks=[tensorboard_callback, cm_callback],
validation_data=(test_images, test_labels),
)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Displaying image data in TensorBoard View on TensorFlow.org Run in Google Colab View source on GitHub OverviewUsing the **TensorFlow Image Summary API,** you can easily log tensors and arbitrary images and view them in TensorBoard. This can be extremely helpful to sample and examine your input data, or to [visualize layer weights](http://cs231n.github.io/understanding-cnn/) and [generated tensors](https://hub.packtpub.com/generative-adversarial-networks-using-keras/). You can also log diagnostic data as images that can be helpful in the course of your model development.In this tutorial, you will use learn how to use the Image Summary API to visualize tensors as images. You will also learn how to take an arbitrary image, convert it to a tensor, and visualize it in TensorBoard. You will work through a simple but real example that uses Image Summaries to help you understand how your model is performing. Setup
###Code
# Ensure TensorFlow 2.0 is installed.
!pip install -q tf-nightly-2.0-preview
# Load the TensorBoard notebook extension.
%load_ext tensorboard
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import io
import itertools
from packaging import version
from six.moves import range
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >= 2, \
"This notebook requires TensorFlow 2.0 or above."
###Output
TensorFlow version: 2.0.0-dev20190228
###Markdown
Download the Fashion-MNIST datasetYou're going to construct a simple neural network to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 28x28 grayscale images of fashion products from 10 categories, with 7,000 images per category.First, download the data:
###Code
# Download the data. The data is already divided into train and test.
# The labels are integers representing classes.
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = \
fashion_mnist.load_data()
# Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc.
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
###Markdown
Visualizing a single imageTo understand how the Image Summary API works, you're now going to simply log the first training image in your training set in TensorBoard.Before you do that, examine the shape of your training data:
###Code
print("Shape: ", train_images[0].shape)
print("Label: ", train_labels[0], "->", class_names[train_labels[0]])
###Output
Shape: (28, 28)
Label: 9 -> Ankle boot
###Markdown
Notice that the shape of each image in the data set is a rank-2 tensor of shape (28, 28), representing the height and the width.However, ```tf.summary.image()``` expects a rank-4 tensor containing ```(batch_size, height, width, channels)```. Therefore, the tensors need to be reshaped. You're logging only one image, so ```batch_size``` is 1. The images are grayscale, so set ```channels``` to 1.
###Code
# Reshape the image for the Summary API.
img = np.reshape(train_images[0], (-1, 28, 28, 1))
###Output
_____no_output_____
###Markdown
You're now ready to log this image and view it in TensorBoard.
###Code
# Clear out any prior log data.
!rm -rf logs
# Sets up a timestamped log directory.
logdir = "logs/train_data/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# Creates a file writer for the log directory.
file_writer = tf.summary.create_file_writer(logdir)
# Using the file writer, log the reshaped image.
with file_writer.as_default():
tf.summary.image("Training data", img, step=0)
###Output
_____no_output_____
###Markdown
Now, use TensorBoard to examine the image. Wait a few seconds for the UI to spin up.
###Code
%tensorboard --logdir logs/train_data
###Output
_____no_output_____
###Markdown
The "Images" tab displays the image you just logged. It's an "ankle boot". The image is scaled to a default size for easier viewing. If you want to view the unscaled original image, check "Show actual image size" at the upper left. Play with the brightness and contrast sliders to see how they affect the image pixels. Visualizing multiple imagesLogging one tensor is great, but what if you wanted to log multiple training examples?Simply specify the number of images you want to log when passing data to ```tf.summary.image()```.
###Code
with file_writer.as_default():
# Don't forget to reshape.
images = np.reshape(train_images[0:25], (-1, 28, 28, 1))
tf.summary.image("25 training data examples", images, max_outputs=25, step=0)
%tensorboard --logdir logs/train_data
###Output
_____no_output_____
###Markdown
Logging arbitrary image dataWhat if you want to visualize an image that's not a tensor, such as an image generated by [matplotlib](https://matplotlib.org/)?You need some boilerplate code to convert the plot to a tensor, but after that, you're good to go.In the code below, you'll log the first 25 images as a nice grid using matplotlib's ```subplot()``` function. You'll then view the grid in TensorBoard:
###Code
# Clear out prior logging data.
!rm -rf logs/plots
logdir = "logs/plots/" + datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(logdir)
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def image_grid():
"""Return a 5x5 grid of the MNIST images as a matplotlib figure."""
# Create a figure to contain the plot.
figure = plt.figure(figsize=(10,10))
for i in range(25):
# Start next subplot.
plt.subplot(5, 5, i + 1, title=class_names[train_labels[i]])
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
return figure
# Prepare the plot
figure = image_grid()
# Convert to image and log
with file_writer.as_default():
tf.summary.image("Training data", plot_to_image(figure), step=0)
%tensorboard --logdir logs/plots
###Output
_____no_output_____
###Markdown
Building an image classifierNow put this all together with a real example. After all, you're here to do machine learning and not plot pretty pictures!You're going to use image summaries to understand how well your model is doing while training a simple classifier for the Fashion-MNIST dataset. First, create a very simple model and compile it, setting up the optimizer and loss function. The compile step also specifies that you want to log the accuracy of the classifier along the way.
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
###Output
_____no_output_____
###Markdown
When training a classifier, it's useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.Define a function that calculates the confusion matrix. You'll use a convenient [Scikit-learn](https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) function to do this, and then plot it using matplotlib.
###Code
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
###Output
_____no_output_____
###Markdown
You're now ready to train the classifier and regularly log the confusion matrix along the way.Here's what you'll do:1. Create the [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics2. Create a [Keras LambdaCallback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LambdaCallback) to log the confusion matrix at the end of every epoch3. Train the model using Model.fit(), making sure to pass both callbacksAs training progresses, scroll down to see TensorBoard start up.
###Code
# Clear out prior logging data.
!rm -rf logs/image
logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# Define the basic TensorBoard callback.
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
file_writer_cm = tf.summary.create_file_writer(logdir + '/cm')
def log_confusion_matrix(epoch, logs):
# Use the model to predict the values from the validation dataset.
test_pred_raw = model.predict(test_images)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix.
cm = sklearn.metrics.confusion_matrix(test_labels, test_pred)
# Log the confusion matrix as an image summary.
figure = plot_confusion_matrix(cm, class_names=class_names)
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define the per-epoch callback.
cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix)
# Start TensorBoard.
%tensorboard --logdir logs/image
# Train the classifier.
model.fit(
train_images,
train_labels,
epochs=5,
verbose=0, # Suppress chatty output
callbacks=[tensorboard_callback, cm_callback],
validation_data=(test_images, test_labels),
)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Displaying image data in TensorBoard View on TensorFlow.org Run in Google Colab View source on GitHub OverviewUsing the **TensorFlow Image Summary API,** you can easily log tensors and arbitrary images and view them in TensorBoard. This can be extremely helpful to sample and examine your input data, or to [visualize layer weights](http://cs231n.github.io/understanding-cnn/) and [generated tensors](https://hub.packtpub.com/generative-adversarial-networks-using-keras/). You can also log diagnostic data as images that can be helpful in the course of your model development.In this tutorial, you will use learn how to use the Image Summary API to visualize tensors as images. You will also learn how to take an arbitrary image, convert it to a tensor, and visualize it in TensorBoard. You will work through a simple but real example that uses Image Summaries to help you understand how your model is performing. Setup
###Code
# Ensure TensorFlow 2.0 is installed.
!pip install -q tf-nightly-2.0-preview
# Load the TensorBoard notebook extension.
%load_ext tensorboard.notebook
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import io
import itertools
from packaging import version
from six.moves import range
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >= 2, \
"This notebook requires TensorFlow 2.0 or above."
###Output
TensorFlow version: 2.0.0-dev20190228
###Markdown
Download the Fashion-MNIST datasetYou're going to construct a simple neural network to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 28x28 grayscale images of fashion products from 10 categories, with 7,000 images per category.First, download the data:
###Code
# Download the data. The data is already divided into train and test.
# The labels are integers representing classes.
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = \
fashion_mnist.load_data()
# Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc.
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
###Markdown
Visualizing a single imageTo understand how the Image Summary API works, you're now going to simply log the first training image in your training set in TensorBoard.Before you do that, examine the shape of your training data:
###Code
print("Shape: ", train_images[0].shape)
print("Label: ", train_labels[0], "->", class_names[train_labels[0]])
###Output
Shape: (28, 28)
Label: 9 -> Ankle boot
###Markdown
Notice that the shape of each image in the data set is a rank-2 tensor of shape (28, 28), representing the height and the width.However, ```tf.summary.image()``` expects a rank-4 tensor containing ```(batch_size, height, width, channels)```. Therefore, the tensors need to be reshaped. You're logging only one image, so ```batch_size``` is 1. The images are grayscale, so set ```channels``` to 1.
###Code
# Reshape the image for the Summary API.
img = np.reshape(train_images[0], (-1, 28, 28, 1))
###Output
_____no_output_____
###Markdown
You're now ready to log this image and view it in TensorBoard.
###Code
# Clear out any prior log data.
!rm -rf logs
# Sets up a timestamped log directory.
logdir = "logs/train_data/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# Creates a file writer for the log directory.
file_writer = tf.summary.create_file_writer(logdir)
# Using the file writer, log the reshaped image.
with file_writer.as_default():
tf.summary.image("Training data", img, step=0)
###Output
_____no_output_____
###Markdown
Now, use TensorBoard to examine the image. Wait a few seconds for the UI to spin up.
###Code
%tensorboard --logdir logs/train_data
###Output
_____no_output_____
###Markdown
The "Images" tab displays the image you just logged. It's an "ankle boot". The image is scaled to a default size for easier viewing. If you want to view the unscaled original image, check "Show actual image size" at the upper left. Play with the brightness and contrast sliders to see how they affect the image pixels. Visualizing multiple imagesLogging one tensor is great, but what if you wanted to log multiple training examples?Simply specify the number of images you want to log when passing data to ```tf.summary.image()```.
###Code
with file_writer.as_default():
# Don't forget to reshape.
images = np.reshape(train_images[0:25], (-1, 28, 28, 1))
tf.summary.image("25 training data examples", images, max_outputs=25, step=0)
%tensorboard --logdir logs/train_data
###Output
_____no_output_____
###Markdown
Logging arbitrary image dataWhat if you want to visualize an image that's not a tensor, such as an image generated by [matplotlib](https://matplotlib.org/)?You need some boilerplate code to convert the plot to a tensor, but after that, your're good to go.In the code below, you'll log the first 25 images as a nice grid using matplotlib's ```subplot()``` function. You'll then view the grid in TensorBoard:
###Code
# Clear out prior logging data.
!rm -rf logs/plots
logdir = "logs/plots/" + datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(logdir)
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def image_grid():
"""Return a 5x5 grid of the MNIST images as a matplotlib figure."""
# Create a figure to contain the plot.
figure = plt.figure(figsize=(10,10))
for i in range(25):
# Start next subplot.
plt.subplot(5, 5, i + 1, title=class_names[train_labels[i]])
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
return figure
# Prepare the plot
figure = image_grid()
# Convert to image and log
with file_writer.as_default():
tf.summary.image("Training data", plot_to_image(figure), step=0)
%tensorboard --logdir logs/plots
###Output
_____no_output_____
###Markdown
Building an image classifierNow put this all together with a real example. After all, you're here to do machine learning and not plot pretty pictures!You're going to use image summaries to understand how well your model is doing while training a simple classifier for the Fashion-MNIST dataset. First, create a very simple model and compile it, setting up the optimizer and loss function. The compile step also specifies that you want to log the accuracy of the classifier along the way.
###Code
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
###Output
_____no_output_____
###Markdown
When training a classifier, it's useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.Define a function that calculates the confusion matrix. You'll use a convenient [Scikit-learn](https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) function to do this, and then plot it using matplotlib.
###Code
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
###Output
_____no_output_____
###Markdown
You're now ready to train the classifier and regularly log the confusion matrix along the way.Here's what you'll do:1. Create the [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics2. Create a [Keras LambdaCallback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LambdaCallback) to log the confusion matrix at the end of every epoch3. Train the model using Model.fit(), making sure to pass both callbacksAs training progresses, scroll down to see TensorBoard start up.
###Code
# Clear out prior logging data.
!rm -rf logs/image
logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# Define the basic TensorBoard callback.
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
file_writer_cm = tf.summary.create_file_writer(logdir + '/cm')
def log_confusion_matrix(epoch, logs):
# Use the model to predict the values from the validation dataset.
test_pred_raw = model.predict(test_images)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix.
cm = sklearn.metrics.confusion_matrix(test_labels, test_pred)
# Log the confusion matrix as an image summary.
figure = plot_confusion_matrix(cm, class_names=class_names)
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define the per-epoch callback.
cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix)
# Start TensorBoard.
%tensorboard --logdir logs/image
# Train the classifier.
model.fit(
train_images,
train_labels,
epochs=5,
verbose=0, # Suppress chatty output
callbacks=[tensorboard_callback, cm_callback],
validation_data=(test_images, test_labels),
)
###Output
_____no_output_____ |
finalproject/irmiger_006165257/irmiger_006165257.ipynb | ###Markdown
Next few inputs are getting general picture of my data
###Code
print(df.target.value_counts())
sns.countplot(x="target", data=df, palette="bwr")
plt.show()
#1 = male, 0 = female
print(df.sex.value_counts())
gen = sns.FacetGrid(df)
gen.map(plt.hist,"sex")
print(df.age.describe())
sns.set()
ages = sns.FacetGrid(df)
ages.map(plt.hist,"age", color="orange")
plt.figure(figsize=(16, 8))
plt.scatter(
df['chol'],
df['cp'],
c='black'
)
plt.xlabel("cholesterol")
plt.ylabel("chest pain type")
plt.show()
g = sns.lmplot(x="thal", y="target", data=df, y_jitter=.02, logistic=True)
#g.set(xlim=(0, 80), ylim=(-.05, 1.05))
g = sns.lmplot(x="chol", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="thalach", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="exang", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="slope", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="ca", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="trestbps", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="fbs", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="oldpeak", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="restecg", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="sex", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="age", y="target", data=df, y_jitter=.02, logistic=True)
g = sns.lmplot(x="restecg", y="target", data=df, y_jitter=.02, logistic=True)
cols_to_keep =['target', 'age','oldpeak','ca','thal','chol', 'thalach', 'trestbps']
data=df[cols_to_keep]
data.head(5)
#set independent variables
xData = data[data.columns[1:]]
yData = data['target']
logit = sm.Logit(yData,xData)
result = logit.fit()
print(result.summary())
#oods ratios
params = result.params
conf = result.conf_int()
conf['Odds Ratios'] = params
conf.columns = ['2.5%','97.5%','Odds Ratios']
print(np.exp(conf))
###Output
2.5% 97.5% Odds Ratios
age 0.952641 1.015873 0.983749
oldpeak 1.433580 2.686392 1.962437
ca 1.559020 2.984194 2.156946
thal 1.940516 5.179187 3.170220
chol 0.998258 1.011059 1.004638
thalach 0.950650 0.975131 0.962813
trestbps 0.994029 1.027458 1.010605
|
python-qutip/opensystem.ipynb | ###Markdown
H:hamiltonian;W:oscillator;W=1\begin{equation}H=-\frac{W}{2}\sigma_{z}\end{equation}
###Code
H=-(1/2)*sigmaz()#hamlitonian H=(-w/2)*sigmaz,oscillator frequency=1
P=0.36
H
rho=P*fock_dm(2,0)+(1-P)*fock_dm(2,1)
psi0=fock(2,0)
rho
times = np.linspace(0.0, 10.0, 100)
result = mesolve(H, psi0, times, [], [])
result.states
result2= mesolve(H, rho, times, [], [])
result2.states
ket=basis(2,0)
ket1=basis(2,1)
jumpOP=ket*ket1.dag()
jumpOP
result3= mesolve(H, rho, times, [jumpOP], [])
result3.states
result3= mesolve(H, rho, times, [np.sqrt(0.0004) *jumpOP], [sigmax(),sigmay(),sigmaz()])
fig, ax = plt.subplots()
ax.plot(times, result3.expect[2])
fig, ax = plt.subplots()
ax.plot(times, result3.expect[2])
###Output
_____no_output_____ |
Incremental Clustering Assignment/Incremental Clustering.ipynb | ###Markdown
Imports
###Code
from sklearn.cluster import MiniBatchKMeans
from sklearn.datasets import load_iris
import numpy as np
import cv2
###Output
_____no_output_____
###Markdown
Iris
###Code
data = load_iris()
X = data.data
y = data.target
y_names = data.target_names
print('X =>', X)
print('y =>', y)
print('y_names =>', y_names)
###Output
X => [[5.1 3.5 1.4 0.2]
[4.9 3. 1.4 0.2]
[4.7 3.2 1.3 0.2]
[4.6 3.1 1.5 0.2]
[5. 3.6 1.4 0.2]
[5.4 3.9 1.7 0.4]
[4.6 3.4 1.4 0.3]
[5. 3.4 1.5 0.2]
[4.4 2.9 1.4 0.2]
[4.9 3.1 1.5 0.1]
[5.4 3.7 1.5 0.2]
[4.8 3.4 1.6 0.2]
[4.8 3. 1.4 0.1]
[4.3 3. 1.1 0.1]
[5.8 4. 1.2 0.2]
[5.7 4.4 1.5 0.4]
[5.4 3.9 1.3 0.4]
[5.1 3.5 1.4 0.3]
[5.7 3.8 1.7 0.3]
[5.1 3.8 1.5 0.3]
[5.4 3.4 1.7 0.2]
[5.1 3.7 1.5 0.4]
[4.6 3.6 1. 0.2]
[5.1 3.3 1.7 0.5]
[4.8 3.4 1.9 0.2]
[5. 3. 1.6 0.2]
[5. 3.4 1.6 0.4]
[5.2 3.5 1.5 0.2]
[5.2 3.4 1.4 0.2]
[4.7 3.2 1.6 0.2]
[4.8 3.1 1.6 0.2]
[5.4 3.4 1.5 0.4]
[5.2 4.1 1.5 0.1]
[5.5 4.2 1.4 0.2]
[4.9 3.1 1.5 0.2]
[5. 3.2 1.2 0.2]
[5.5 3.5 1.3 0.2]
[4.9 3.6 1.4 0.1]
[4.4 3. 1.3 0.2]
[5.1 3.4 1.5 0.2]
[5. 3.5 1.3 0.3]
[4.5 2.3 1.3 0.3]
[4.4 3.2 1.3 0.2]
[5. 3.5 1.6 0.6]
[5.1 3.8 1.9 0.4]
[4.8 3. 1.4 0.3]
[5.1 3.8 1.6 0.2]
[4.6 3.2 1.4 0.2]
[5.3 3.7 1.5 0.2]
[5. 3.3 1.4 0.2]
[7. 3.2 4.7 1.4]
[6.4 3.2 4.5 1.5]
[6.9 3.1 4.9 1.5]
[5.5 2.3 4. 1.3]
[6.5 2.8 4.6 1.5]
[5.7 2.8 4.5 1.3]
[6.3 3.3 4.7 1.6]
[4.9 2.4 3.3 1. ]
[6.6 2.9 4.6 1.3]
[5.2 2.7 3.9 1.4]
[5. 2. 3.5 1. ]
[5.9 3. 4.2 1.5]
[6. 2.2 4. 1. ]
[6.1 2.9 4.7 1.4]
[5.6 2.9 3.6 1.3]
[6.7 3.1 4.4 1.4]
[5.6 3. 4.5 1.5]
[5.8 2.7 4.1 1. ]
[6.2 2.2 4.5 1.5]
[5.6 2.5 3.9 1.1]
[5.9 3.2 4.8 1.8]
[6.1 2.8 4. 1.3]
[6.3 2.5 4.9 1.5]
[6.1 2.8 4.7 1.2]
[6.4 2.9 4.3 1.3]
[6.6 3. 4.4 1.4]
[6.8 2.8 4.8 1.4]
[6.7 3. 5. 1.7]
[6. 2.9 4.5 1.5]
[5.7 2.6 3.5 1. ]
[5.5 2.4 3.8 1.1]
[5.5 2.4 3.7 1. ]
[5.8 2.7 3.9 1.2]
[6. 2.7 5.1 1.6]
[5.4 3. 4.5 1.5]
[6. 3.4 4.5 1.6]
[6.7 3.1 4.7 1.5]
[6.3 2.3 4.4 1.3]
[5.6 3. 4.1 1.3]
[5.5 2.5 4. 1.3]
[5.5 2.6 4.4 1.2]
[6.1 3. 4.6 1.4]
[5.8 2.6 4. 1.2]
[5. 2.3 3.3 1. ]
[5.6 2.7 4.2 1.3]
[5.7 3. 4.2 1.2]
[5.7 2.9 4.2 1.3]
[6.2 2.9 4.3 1.3]
[5.1 2.5 3. 1.1]
[5.7 2.8 4.1 1.3]
[6.3 3.3 6. 2.5]
[5.8 2.7 5.1 1.9]
[7.1 3. 5.9 2.1]
[6.3 2.9 5.6 1.8]
[6.5 3. 5.8 2.2]
[7.6 3. 6.6 2.1]
[4.9 2.5 4.5 1.7]
[7.3 2.9 6.3 1.8]
[6.7 2.5 5.8 1.8]
[7.2 3.6 6.1 2.5]
[6.5 3.2 5.1 2. ]
[6.4 2.7 5.3 1.9]
[6.8 3. 5.5 2.1]
[5.7 2.5 5. 2. ]
[5.8 2.8 5.1 2.4]
[6.4 3.2 5.3 2.3]
[6.5 3. 5.5 1.8]
[7.7 3.8 6.7 2.2]
[7.7 2.6 6.9 2.3]
[6. 2.2 5. 1.5]
[6.9 3.2 5.7 2.3]
[5.6 2.8 4.9 2. ]
[7.7 2.8 6.7 2. ]
[6.3 2.7 4.9 1.8]
[6.7 3.3 5.7 2.1]
[7.2 3.2 6. 1.8]
[6.2 2.8 4.8 1.8]
[6.1 3. 4.9 1.8]
[6.4 2.8 5.6 2.1]
[7.2 3. 5.8 1.6]
[7.4 2.8 6.1 1.9]
[7.9 3.8 6.4 2. ]
[6.4 2.8 5.6 2.2]
[6.3 2.8 5.1 1.5]
[6.1 2.6 5.6 1.4]
[7.7 3. 6.1 2.3]
[6.3 3.4 5.6 2.4]
[6.4 3.1 5.5 1.8]
[6. 3. 4.8 1.8]
[6.9 3.1 5.4 2.1]
[6.7 3.1 5.6 2.4]
[6.9 3.1 5.1 2.3]
[5.8 2.7 5.1 1.9]
[6.8 3.2 5.9 2.3]
[6.7 3.3 5.7 2.5]
[6.7 3. 5.2 2.3]
[6.3 2.5 5. 1.9]
[6.5 3. 5.2 2. ]
[6.2 3.4 5.4 2.3]
[5.9 3. 5.1 1.8]]
y => [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2]
y_names => ['setosa' 'versicolor' 'virginica']
###Markdown
Incremental Clustering for IRIS dataset
###Code
minibatchKmeans = MiniBatchKMeans(n_clusters=3,random_state=0,batch_size=6)
minibatchKmeans.fit(X)
y_pred = minibatchKmeans.predict(X)
print("accuracy:", (1-(np.count_nonzero(y_pred == y) / len(y))) * 100)
###Output
accuracy: 91.33333333333333
###Markdown
Intrusion Detection
###Code
def detect():
font = cv2.FONT_HERSHEY_SIMPLEX
cam = cv2.VideoCapture(1)
cam.set(3, 640)
cam.set(4, 480)
image_list = []
count = 0
while(True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if count >= 1:
temp = res_list
minibatchKmeans = MiniBatchKMeans(n_clusters=3,random_state=0)
res_list = minibatchKmeans.fit(gray)
if count < 1:
temp = res_list
count+=1
pred = temp.predict(gray)
if False in (temp.labels_ == pred):
cv2.putText(img, "Detected", (320, 240), font, 1, (255,255,255), 2)
else:
cv2.putText(img, "Clean", (320, 240), font, 1, (255,255,255), 2)
cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff
if k == 27:
break
cam.release()
cv2.destroyAllWindows()
detect()
###Output
_____no_output_____
###Markdown
Extras
###Code
minibatchKmeans = MiniBatchKMeans(n_clusters=3,random_state=0)
image1 = cv2.imread('1.jpg')
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
image2 = cv2.imread('2.jpg')
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
res_list = minibatchKmeans.fit(gray1) #static
pred = minibatchKmeans.predict(gray2) #with person
False in (res_list.labels_ == pred)
res_list.labels_ == pred
###Output
_____no_output_____ |
notebooks/classifier_scikitlearn_BaggingClassifier.ipynb | ###Markdown
Adversarial-Robustness-Toolbox for scikit-learn DecisionTreeClassifier
###Code
from sklearn.ensemble import BaggingClassifier
from sklearn.datasets import load_iris
import numpy as np
from matplotlib import pyplot as plt
from art.classifiers import SklearnClassifier
from art.attacks import ZooAttack
from art.utils import load_mnist
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
1 Training scikit-learn BaggingClassifier and attacking with ART Zeroth Order Optimization attack
###Code
def get_adversarial_examples(x_train, y_train):
# Fit BaggingClassifier
model = BaggingClassifier()
model.fit(X=x_train, y=y_train)
# Create ART classifier for scikit-learn BaggingClassifier
art_classifier = SklearnClassifier(model=model)
# Create ART Zeroth Order Optimization attack
zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=20,
binary_search_steps=10, initial_const=1e-3, abort_early=True, use_resize=False,
use_importance=False, nb_parallel=1, batch_size=1, variable_h=0.2)
# Generate adversarial samples with ART Zeroth Order Optimization attack
x_train_adv = zoo.generate(x_train)
return x_train_adv, model
###Output
_____no_output_____
###Markdown
1.1 Utility functions
###Code
def get_data(num_classes):
x_train, y_train = load_iris(return_X_y=True)
x_train = x_train[y_train < num_classes][:, [0, 1]]
y_train = y_train[y_train < num_classes]
x_train[:, 0][y_train == 0] *= 2
x_train[:, 1][y_train == 2] *= 2
x_train[:, 0][y_train == 0] -= 3
x_train[:, 1][y_train == 2] -= 2
x_train[:, 0] = (x_train[:, 0] - 4) / (9 - 4)
x_train[:, 1] = (x_train[:, 1] - 1) / (6 - 1)
return x_train, y_train
def plot_results(model, x_train, y_train, x_train_adv, num_classes):
fig, axs = plt.subplots(1, num_classes, figsize=(num_classes * 5, 5))
colors = ['orange', 'blue', 'green']
for i_class in range(num_classes):
# Plot difference vectors
for i in range(y_train[y_train == i_class].shape[0]):
x_1_0 = x_train[y_train == i_class][i, 0]
x_1_1 = x_train[y_train == i_class][i, 1]
x_2_0 = x_train_adv[y_train == i_class][i, 0]
x_2_1 = x_train_adv[y_train == i_class][i, 1]
if x_1_0 != x_2_0 or x_1_1 != x_2_1:
axs[i_class].plot([x_1_0, x_2_0], [x_1_1, x_2_1], c='black', zorder=1)
# Plot benign samples
for i_class_2 in range(num_classes):
axs[i_class].scatter(x_train[y_train == i_class_2][:, 0], x_train[y_train == i_class_2][:, 1], s=20,
zorder=2, c=colors[i_class_2])
axs[i_class].set_aspect('equal', adjustable='box')
# Show predicted probability as contour plot
h = .01
x_min, x_max = 0, 1
y_min, y_max = 0, 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z_proba = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z_proba = Z_proba[:, i_class].reshape(xx.shape)
im = axs[i_class].contourf(xx, yy, Z_proba, levels=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
vmin=0, vmax=1)
if i_class == num_classes - 1:
cax = fig.add_axes([0.95, 0.2, 0.025, 0.6])
plt.colorbar(im, ax=axs[i_class], cax=cax)
# Plot adversarial samples
for i in range(y_train[y_train == i_class].shape[0]):
x_1_0 = x_train[y_train == i_class][i, 0]
x_1_1 = x_train[y_train == i_class][i, 1]
x_2_0 = x_train_adv[y_train == i_class][i, 0]
x_2_1 = x_train_adv[y_train == i_class][i, 1]
if x_1_0 != x_2_0 or x_1_1 != x_2_1:
axs[i_class].scatter(x_2_0, x_2_1, zorder=2, c='red', marker='X')
axs[i_class].set_xlim((x_min, x_max))
axs[i_class].set_ylim((y_min, y_max))
axs[i_class].set_title('class ' + str(i_class))
axs[i_class].set_xlabel('feature 1')
axs[i_class].set_ylabel('feature 2')
###Output
_____no_output_____
###Markdown
2 Example: Iris dataset legend- colored background: probability of class i- orange circles: class 1- blue circles: class 2- green circles: class 3- red crosses: adversarial samples for class i
###Code
num_classes = 2
x_train, y_train = get_data(num_classes=num_classes)
x_train_adv, model = get_adversarial_examples(x_train, y_train)
plot_results(model, x_train, y_train, x_train_adv, num_classes)
num_classes = 3
x_train, y_train = get_data(num_classes=num_classes)
x_train_adv, model = get_adversarial_examples(x_train, y_train)
plot_results(model, x_train, y_train, x_train_adv, num_classes)
###Output
_____no_output_____
###Markdown
3 Example: MNIST 3.1 Load and transform MNIST dataset
###Code
(x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
n_samples_train = x_train.shape[0]
n_features_train = x_train.shape[1] * x_train.shape[2] * x_train.shape[3]
n_samples_test = x_test.shape[0]
n_features_test = x_test.shape[1] * x_test.shape[2] * x_test.shape[3]
x_train = x_train.reshape(n_samples_train, n_features_train)
x_test = x_test.reshape(n_samples_test, n_features_test)
y_train = np.argmax(y_train, axis=1)
y_test = np.argmax(y_test, axis=1)
n_samples_max = 200
x_train = x_train[0:n_samples_max]
y_train = y_train[0:n_samples_max]
x_test = x_test[0:n_samples_max]
y_test = y_test[0:n_samples_max]
###Output
_____no_output_____
###Markdown
3.2 Train BaggingClassifier classifier
###Code
model = BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False,
n_jobs=None, random_state=None, verbose=0)
model.fit(X=x_train, y=y_train)
###Output
_____no_output_____
###Markdown
3.3 Create and apply Zeroth Order Optimization Attack with ART
###Code
art_classifier = SklearnClassifier(model=model)
zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=100,
binary_search_steps=20, initial_const=1e-3, abort_early=True, use_resize=False,
use_importance=False, nb_parallel=10, batch_size=1, variable_h=0.25)
x_train_adv = zoo.generate(x_train)
x_test_adv = zoo.generate(x_test)
###Output
_____no_output_____
###Markdown
3.4 Evaluate BaggingClassifier on benign and adversarial samples
###Code
score = model.score(x_train, y_train)
print("Benign Training Score: %.4f" % score)
plt.matshow(x_train[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_train[0:1, :])[0]
print("Benign Training Predicted Label: %i" % prediction)
score = model.score(x_train_adv, y_train)
print("Adversarial Training Score: %.4f" % score)
plt.matshow(x_train_adv[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_train_adv[0:1, :])[0]
print("Adversarial Training Predicted Label: %i" % prediction)
score = model.score(x_test, y_test)
print("Benign Test Score: %.4f" % score)
plt.matshow(x_test[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_test[0:1, :])[0]
print("Benign Test Predicted Label: %i" % prediction)
score = model.score(x_test_adv, y_test)
print("Adversarial Test Score: %.4f" % score)
plt.matshow(x_test_adv[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_test_adv[0:1, :])[0]
print("Adversarial Test Predicted Label: %i" % prediction)
###Output
Adversarial Test Predicted Label: 6
###Markdown
Adversarial-Robustness-Toolbox for scikit-learn DecisionTreeClassifier
###Code
from sklearn.ensemble import BaggingClassifier
from sklearn.datasets import load_iris
import numpy as np
from matplotlib import pyplot as plt
from art.estimators.classification import SklearnClassifier
from art.attacks.evasion import ZooAttack
from art.utils import load_mnist
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
1 Training scikit-learn BaggingClassifier and attacking with ART Zeroth Order Optimization attack
###Code
def get_adversarial_examples(x_train, y_train):
# Fit BaggingClassifier
model = BaggingClassifier()
model.fit(X=x_train, y=y_train)
# Create ART classifier for scikit-learn BaggingClassifier
art_classifier = SklearnClassifier(model=model)
# Create ART Zeroth Order Optimization attack
zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=20,
binary_search_steps=10, initial_const=1e-3, abort_early=True, use_resize=False,
use_importance=False, nb_parallel=1, batch_size=1, variable_h=0.2)
# Generate adversarial samples with ART Zeroth Order Optimization attack
x_train_adv = zoo.generate(x_train)
return x_train_adv, model
###Output
_____no_output_____
###Markdown
1.1 Utility functions
###Code
def get_data(num_classes):
x_train, y_train = load_iris(return_X_y=True)
x_train = x_train[y_train < num_classes][:, [0, 1]]
y_train = y_train[y_train < num_classes]
x_train[:, 0][y_train == 0] *= 2
x_train[:, 1][y_train == 2] *= 2
x_train[:, 0][y_train == 0] -= 3
x_train[:, 1][y_train == 2] -= 2
x_train[:, 0] = (x_train[:, 0] - 4) / (9 - 4)
x_train[:, 1] = (x_train[:, 1] - 1) / (6 - 1)
return x_train, y_train
def plot_results(model, x_train, y_train, x_train_adv, num_classes):
fig, axs = plt.subplots(1, num_classes, figsize=(num_classes * 5, 5))
colors = ['orange', 'blue', 'green']
for i_class in range(num_classes):
# Plot difference vectors
for i in range(y_train[y_train == i_class].shape[0]):
x_1_0 = x_train[y_train == i_class][i, 0]
x_1_1 = x_train[y_train == i_class][i, 1]
x_2_0 = x_train_adv[y_train == i_class][i, 0]
x_2_1 = x_train_adv[y_train == i_class][i, 1]
if x_1_0 != x_2_0 or x_1_1 != x_2_1:
axs[i_class].plot([x_1_0, x_2_0], [x_1_1, x_2_1], c='black', zorder=1)
# Plot benign samples
for i_class_2 in range(num_classes):
axs[i_class].scatter(x_train[y_train == i_class_2][:, 0], x_train[y_train == i_class_2][:, 1], s=20,
zorder=2, c=colors[i_class_2])
axs[i_class].set_aspect('equal', adjustable='box')
# Show predicted probability as contour plot
h = .01
x_min, x_max = 0, 1
y_min, y_max = 0, 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z_proba = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z_proba = Z_proba[:, i_class].reshape(xx.shape)
im = axs[i_class].contourf(xx, yy, Z_proba, levels=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
vmin=0, vmax=1)
if i_class == num_classes - 1:
cax = fig.add_axes([0.95, 0.2, 0.025, 0.6])
plt.colorbar(im, ax=axs[i_class], cax=cax)
# Plot adversarial samples
for i in range(y_train[y_train == i_class].shape[0]):
x_1_0 = x_train[y_train == i_class][i, 0]
x_1_1 = x_train[y_train == i_class][i, 1]
x_2_0 = x_train_adv[y_train == i_class][i, 0]
x_2_1 = x_train_adv[y_train == i_class][i, 1]
if x_1_0 != x_2_0 or x_1_1 != x_2_1:
axs[i_class].scatter(x_2_0, x_2_1, zorder=2, c='red', marker='X')
axs[i_class].set_xlim((x_min, x_max))
axs[i_class].set_ylim((y_min, y_max))
axs[i_class].set_title('class ' + str(i_class))
axs[i_class].set_xlabel('feature 1')
axs[i_class].set_ylabel('feature 2')
###Output
_____no_output_____
###Markdown
2 Example: Iris dataset legend- colored background: probability of class i- orange circles: class 1- blue circles: class 2- green circles: class 3- red crosses: adversarial samples for class i
###Code
num_classes = 2
x_train, y_train = get_data(num_classes=num_classes)
x_train_adv, model = get_adversarial_examples(x_train, y_train)
plot_results(model, x_train, y_train, x_train_adv, num_classes)
num_classes = 3
x_train, y_train = get_data(num_classes=num_classes)
x_train_adv, model = get_adversarial_examples(x_train, y_train)
plot_results(model, x_train, y_train, x_train_adv, num_classes)
###Output
ZOO: 100%|██████████| 150/150 [00:24<00:00, 6.23it/s]
###Markdown
3 Example: MNIST 3.1 Load and transform MNIST dataset
###Code
(x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
n_samples_train = x_train.shape[0]
n_features_train = x_train.shape[1] * x_train.shape[2] * x_train.shape[3]
n_samples_test = x_test.shape[0]
n_features_test = x_test.shape[1] * x_test.shape[2] * x_test.shape[3]
x_train = x_train.reshape(n_samples_train, n_features_train)
x_test = x_test.reshape(n_samples_test, n_features_test)
y_train = np.argmax(y_train, axis=1)
y_test = np.argmax(y_test, axis=1)
n_samples_max = 200
x_train = x_train[0:n_samples_max]
y_train = y_train[0:n_samples_max]
x_test = x_test[0:n_samples_max]
y_test = y_test[0:n_samples_max]
###Output
_____no_output_____
###Markdown
3.2 Train BaggingClassifier classifier
###Code
model = BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False,
n_jobs=None, random_state=None, verbose=0)
model.fit(X=x_train, y=y_train)
###Output
_____no_output_____
###Markdown
3.3 Create and apply Zeroth Order Optimization Attack with ART
###Code
art_classifier = SklearnClassifier(model=model)
zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=100,
binary_search_steps=20, initial_const=1e-3, abort_early=True, use_resize=False,
use_importance=False, nb_parallel=10, batch_size=1, variable_h=0.25)
x_train_adv = zoo.generate(x_train)
x_test_adv = zoo.generate(x_test)
###Output
ZOO: 100%|██████████| 200/200 [05:53<00:00, 1.77s/it]
###Markdown
3.4 Evaluate BaggingClassifier on benign and adversarial samples
###Code
score = model.score(x_train, y_train)
print("Benign Training Score: %.4f" % score)
plt.matshow(x_train[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_train[0:1, :])[0]
print("Benign Training Predicted Label: %i" % prediction)
score = model.score(x_train_adv, y_train)
print("Adversarial Training Score: %.4f" % score)
plt.matshow(x_train_adv[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_train_adv[0:1, :])[0]
print("Adversarial Training Predicted Label: %i" % prediction)
score = model.score(x_test, y_test)
print("Benign Test Score: %.4f" % score)
plt.matshow(x_test[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_test[0:1, :])[0]
print("Benign Test Predicted Label: %i" % prediction)
score = model.score(x_test_adv, y_test)
print("Adversarial Test Score: %.4f" % score)
plt.matshow(x_test_adv[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_test_adv[0:1, :])[0]
print("Adversarial Test Predicted Label: %i" % prediction)
###Output
Adversarial Test Predicted Label: 6
|
notebooks/1.0_DataCompile.ipynb | ###Markdown
Load the hourly historical data
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import sys
sys.path.append('../')
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
from dotenv import load_dotenv, find_dotenv
# data directories
load_dotenv(find_dotenv(), verbose=True)
dir_project = os.getenv('PROJECT_ROOT')
dir_data_raw = os.path.join(dir_project, 'data/raw/')
dir_data_interim = os.path.join(dir_project, 'data/interim/')
dir_data_processed = os.path.join(dir_project, 'data/processed/')
dir_data_external = os.path.join(dir_project, 'data/external/')
dir_models = os.path.join(dir_project, 'models/')
all_dfs_hourly = pd.read_excel(os.path.join(dir_data_raw, 'DATA HYDRO IVADO Data VF 2017-2020YTD.xlsx'), sheet_name=None)
###Output
_____no_output_____
###Markdown
The original excel file has several sheets:
###Code
all_dfs_hourly.keys()
for df_key in all_dfs_hourly:
print(df_key, len(all_dfs_hourly[df_key]))
###Output
Demand 30824
Wind 30828
Zonal Demand 30828
Weather 30816
HOEP Price 30820
Intertie Flow 30816
Generator Output 30816
Hourly by Generator Name 30816
###Markdown
Sheet-wise preprocess Some of the sheets have different time format, we unify them first: - `Weather`
###Code
df_weather = all_dfs_hourly['Weather']
# Fusion date and hour
df_weather['Fusionné'] = df_weather['Date'].dt.strftime('%m/%d/%Y') + ' ' + (df_weather['Hour']-1).astype(str) + ':00'
df_weather['Fusionné'] = pd.to_datetime(df_weather['Fusionné'])
df_weather.head()
###Output
_____no_output_____
###Markdown
- `Intertie Flow` & `Hourly by Generator Name` Similar to the above:
###Code
sheet = 'Intertie Flow'
df_temp = all_dfs_hourly[sheet]
df_temp['Fusionné'] = df_temp['date'].dt.strftime('%m/%d/%Y') + ' ' + (df_temp['hour'].astype(int)-1).astype(str) + ':00'
df_temp['Fusionné'] = pd.to_datetime(df_temp['Fusionné'])
sheet = 'Hourly by Generator Name'
df_temp = all_dfs_hourly[sheet]
df_temp['Fusionné'] = df_temp['Date'].dt.strftime('%m/%d/%Y') + ' ' + (df_temp['Hour']-1).astype(str) + ':00'
df_temp['Fusionné'] = pd.to_datetime(df_temp['Fusionné'])
###Output
_____no_output_____
###Markdown
- Unify the col name for `Wind`
###Code
df_wind = all_dfs_hourly['Wind']
df_wind.rename({'Date': 'Fusionné'}, axis='columns', inplace=True)
# clarify the col name
df_wind.rename({'Réalisé': 'wind_realized', 'Prévisionnel': 'wind_provisional'}, axis='columns', inplace=True)
###Output
_____no_output_____
###Markdown
- Drop duplicated `Ontario Demand` from one of the `Demand` and `Zonal Demand` sheets.
###Code
df_demand = all_dfs_hourly['Demand']
df_demand.drop(columns=['Ontario Demand'], inplace=True)
###Output
_____no_output_____
###Markdown
Concatenate different sheets (variables)
###Code
all_dfs_hourly.pop('key Generator name - fuel type')
from functools import reduce
df_merged = reduce(lambda left,right: pd.merge(left,right,on=['Fusionné'],
how='outer'), all_dfs_hourly.values())
df_merged.rename({'Fusionné': 'date'}, axis='columns', inplace=True)
len(df_merged.columns)
len(df_merged)
###Output
_____no_output_____
###Markdown
Output:
###Code
df_merged.to_csv(os.path.join(dir_data_interim, 'hourly_data_merged_all_variables.csv'), index=False)
###Output
_____no_output_____ |
week05_nlp/part2_pytorch.ipynb | ###Markdown
Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow). довольно хороший ноутбук нужно потом будет вернуться к нему и сделать
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription).
###Code
data = pd.read_csv("./Train_rev1.csv", index_col=None)
data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32')
text_columns = ["Title", "FullDescription"]
categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"]
target_column = "Log1pSalary"
data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string
data.sample(3)
###Output
_____no_output_____
###Markdown
The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization.
###Code
print("Before")
print(data["Title"][::100000])
import nltk
tokenizer = nltk.tokenize.WordPunctTokenizer()
for col in text_columns:
data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower())))
###Output
_____no_output_____
###Markdown
Now we can assume that our text is a space-separated list of tokens:
###Code
print("After")
print(data["Title"][::100000])
###Output
_____no_output_____
###Markdown
Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words.
###Code
from collections import Counter
token_counts = Counter()
# Count how many times does each token occur in "Title" and "FullDescription"
<YOUR CODE HERE>
print("Total unique tokens :", len(token_counts))
print('\n'.join(map(str, token_counts.most_common(n=5))))
print('...')
print('\n'.join(map(str, token_counts.most_common()[-3:])))
assert token_counts.most_common(1)[0][1] in range(2600000, 2700000)
assert len(token_counts) in range(200000, 210000)
print('Correct!')
# Let's see how many words are there for each count
_=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True)
plt.xlabel("Counts")
###Output
_____no_output_____
###Markdown
__Task 1.1__ Get a list of all tokens that occur at least 10 times.
###Code
min_count = 10
# tokens from token_counts keys that had at least min_count occurrences throughout the dataset
tokens = <YOUR CODE HERE>
# Add a special tokens for unknown and empty words
UNK, PAD = "UNK", "PAD"
tokens = [UNK, PAD] + tokens
print("Tokens left:", len(tokens))
assert type(tokens)==list
assert len(tokens) in range(32000,35000)
assert 'me' in tokens
assert UNK in tokens
print("Correct!")
###Output
_____no_output_____
###Markdown
__Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int)
###Code
token_to_id = <your code here>
assert isinstance(token_to_id, dict)
assert len(token_to_id) == len(tokens)
for tok in tokens:
assert tokens[token_to_id[tok]] == tok
print("Correct!")
###Output
_____no_output_____
###Markdown
And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices.
###Code
UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD])
def as_matrix(sequences, max_len=None):
""" Convert a list of tokens into a matrix with padding """
if isinstance(sequences[0], str):
sequences = list(map(str.split, sequences))
max_len = min(max(map(len, sequences)), max_len or float('inf'))
matrix = np.full((len(sequences), max_len), np.int32(PAD_IX))
for i,seq in enumerate(sequences):
row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]]
matrix[i, :len(row_ix)] = row_ix
return matrix
#### print("Lines:")
print('\n'.join(data["Title"][::100000].values), end='\n\n')
print("Matrix:")
print(as_matrix(data["Title"][::100000]))
###Output
_____no_output_____
###Markdown
Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding.
###Code
from sklearn.feature_extraction import DictVectorizer
# we only consider top-1k most frequent companies to minimize memory usage
top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000))
recognized_companies = set(top_companies)
data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other")
categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False)
categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1))
###Output
_____no_output_____
###Markdown
The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes.
###Code
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.1, random_state=42)
print("Train size = ", len(data_train))
print("Validation size = ", len(data_val))
def generate_batch(data, batch_size=None, replace=True, max_len=None):
"""
Creates a pytorch-friendly dict from the batch data.
:returns: a dict with {'title' : int64[batch, title_max_len]
"""
if batch_size is not None:
data = data.sample(batch_size, replace=replace)
batch = {}
for col in text_columns:
batch[col] = as_matrix(data[col].values, max_len)
batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1))
if target_column in data.columns:
batch[target_column] = data[target_column].values
return batch
generate_batch(data_train, 3, max_len=10)
###Output
_____no_output_____
###Markdown
Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary. By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time.
###Code
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class GlobalMaxPooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return x.max(dim=self.dim)[0]
class TitleEncoder(nn.Module):
def __init__(self, n_tokens=len(tokens), out_size=64):
"""
A simple sequential encoder for titles.
x -> emb -> conv -> global_max -> relu -> dense
"""
super(self.__class__, self).__init__()
self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX)
self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1)
self.pool1 = GlobalMaxPooling()
self.dense = nn.Linear(out_size, out_size)
def forward(self, text_ix):
"""
:param text_ix: int64 Variable of shape [batch_size, max_len]
:returns: float32 Variable of shape [batch_size, out_size]
"""
h = self.emb(text_ix)
# we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order
h = torch.transpose(h, 1, 2)
# Apply the layers as defined above. Add some ReLUs before dense.
<YOUR CODE>
return <YOUR CODE>
title_encoder = TitleEncoder(out_size=64)
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title']))
dummy_v = title_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del title_encoder
print("Seems fine")
###Output
_____no_output_____
###Markdown
__Task 2.1__ Create description encoder
###Code
# Define an encoder for job descriptions.
# Use any means you want so long as it's torch.nn.Module.
<YOUR CODE HERE>
desc_encoder = <Create description encoder>
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription']))
dummy_v = desc_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del desc_encoder
print("Seems fine too")
###Output
_____no_output_____
###Markdown
__ Task 2.2__ Build one network ~~to rule them all~~
###Code
class FullNetwork(nn.Module):
"""
This class does all the steps from (title, desc, categorical) features -> predicted target
It unites title & desc encoders you defined above as long as some layers for head and categorical branch.
"""
def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)):
super(self.__class__, self).__init__()
self.title_encoder = TitleEncoder(out_size=64)
self.desc_encoder = <YOUR CODE>
# define layers for categorical features. A few dense layers would do.
<YOUR CODE>
# define "output" layers that process depend the three encoded vectors into answer
<YOUR CODE>
def forward(self, title_ix, desc_ix, cat_features):
"""
:param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix
:param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix
:param cat_features: float32 Variable [batch, n_cat_features]
:returns: float32 Variable 1d [batch], predicted log1p-salary
"""
# process each data source with it's respective encoder
title_h = self.title_encoder(title_ix)
desc_h = <YOUR CODE>
# apply categorical encoder
cat_h = <YOUR CODE>
# concatenate all vectors together...
joint_h = torch.cat([title_h, desc_h, cat_h], dim=1)
# ... and stack a few more layers at the top
<YOUR CODE>
# Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs
# Note 2: please do not use output nonlinearities.
return <YOUR CODE>
model = FullNetwork()
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
# test it on one batch
batch = generate_batch(data_train, 32)
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0]
def compute_loss(reference, prediction):
"""
Computes objective for minimization.
By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc.
"""
return torch.mean((prediction - reference) ** 2)
def compute_mae(reference, prediction):
""" Compute MAE on actual salary, assuming your model outputs log1p(salary)"""
return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean()
loss = compute_loss(reference, prediction)
dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True)
for grad in dummy_grads:
assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \
"Double-check that your model uses all it's layers."
###Output
_____no_output_____
###Markdown
Let's train it!
###Code
from tqdm import tnrange
def iterate_minibatches(data, batch_size=32, max_len=None,
max_batches=None, shuffle=True, verbose=True):
indices = np.arange(len(data))
if shuffle:
indices = np.random.permutation(indices)
if max_batches is not None:
indices = indices[: batch_size * max_batches]
irange = tnrange if verbose else range
for start in irange(0, len(indices), batch_size):
yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len)
num_epochs = 100
max_len = 100
batch_size = 32
batches_per_epoch = 100
for epoch_i in range(num_epochs):
print("Training:")
train_loss = train_mae = train_batches = 0
model.train(True)
for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
loss.backward()
opt.step()
opt.zero_grad()
train_loss += loss.data.numpy()
train_mae += compute_mae(reference, prediction).data.numpy()
train_batches += 1
print("\tLoss:\t%.5f" % (train_loss / train_batches))
print("\tMAE:\t%.5f" % (train_mae / train_batches))
print('\n\n')
print("Validation:")
val_loss = val_mae = val_batches = 0
model.train(False)
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
print("Final eval:")
val_loss = val_mae = val_batches = 0
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
###Output
_____no_output_____
###Markdown
Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow).
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription).
###Code
data = pd.read_csv("./Train_rev1.csv", index_col=None)
data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32')
text_columns = ["Title", "FullDescription"]
categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"]
target_column = "Log1pSalary"
data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string
data.sample(3)
###Output
_____no_output_____
###Markdown
The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization.
###Code
print("Before")
print(data["Title"][::100000])
import nltk
tokenizer = nltk.tokenize.WordPunctTokenizer()
for col in text_columns:
data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower())))
###Output
_____no_output_____
###Markdown
Now we can assume that our text is a space-separated list of tokens:
###Code
print("After")
print(data["Title"][::100000])
###Output
_____no_output_____
###Markdown
Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words.
###Code
from collections import Counter
token_counts = Counter()
# Count how many times does each token occur in "Title" and "FullDescription"
<YOUR CODE HERE>
print("Total unique tokens :", len(token_counts))
print('\n'.join(map(str, token_counts.most_common(n=5))))
print('...')
print('\n'.join(map(str, token_counts.most_common()[-3:])))
assert token_counts.most_common(1)[0][1] in range(2600000, 2700000)
assert len(token_counts) in range(200000, 210000)
print('Correct!')
# Let's see how many words are there for each count
_=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True)
plt.xlabel("Counts")
###Output
_____no_output_____
###Markdown
__Task 1.1__ Get a list of all tokens that occur at least 10 times.
###Code
min_count = 10
# tokens from token_counts keys that had at least min_count occurrences throughout the dataset
tokens = <YOUR CODE HERE>
# Add a special tokens for unknown and empty words
UNK, PAD = "UNK", "PAD"
tokens = [UNK, PAD] + tokens
print("Tokens left:", len(tokens))
assert type(tokens)==list
assert len(tokens) in range(32000,35000)
assert 'me' in tokens
assert UNK in tokens
print("Correct!")
###Output
_____no_output_____
###Markdown
__Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int)
###Code
token_to_id = <your code here>
assert isinstance(token_to_id, dict)
assert len(token_to_id) == len(tokens)
for tok in tokens:
assert tokens[token_to_id[tok]] == tok
print("Correct!")
###Output
_____no_output_____
###Markdown
And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices.
###Code
UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD])
def as_matrix(sequences, max_len=None):
""" Convert a list of tokens into a matrix with padding """
if isinstance(sequences[0], str):
sequences = list(map(str.split, sequences))
max_len = min(max(map(len, sequences)), max_len or float('inf'))
matrix = np.full((len(sequences), max_len), np.int32(PAD_IX))
for i,seq in enumerate(sequences):
row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]]
matrix[i, :len(row_ix)] = row_ix
return matrix
#### print("Lines:")
print('\n'.join(data["Title"][::100000].values), end='\n\n')
print("Matrix:")
print(as_matrix(data["Title"][::100000]))
###Output
_____no_output_____
###Markdown
Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding.
###Code
from sklearn.feature_extraction import DictVectorizer
# we only consider top-1k most frequent companies to minimize memory usage
top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000))
recognized_companies = set(top_companies)
data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other")
categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False)
categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1))
###Output
_____no_output_____
###Markdown
The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes.
###Code
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.1, random_state=42)
print("Train size = ", len(data_train))
print("Validation size = ", len(data_val))
def generate_batch(data, batch_size=None, replace=True, max_len=None):
"""
Creates a pytorch-friendly dict from the batch data.
:returns: a dict with {'title' : int64[batch, title_max_len]
"""
if batch_size is not None:
data = data.sample(batch_size, replace=replace)
batch = {}
for col in text_columns:
batch[col] = as_matrix(data[col].values, max_len)
batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1))
if target_column in data.columns:
batch[target_column] = data[target_column].values
return batch
generate_batch(data_train, 3, max_len=10)
###Output
_____no_output_____
###Markdown
Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary. By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time.
###Code
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class GlobalMaxPooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return x.max(dim=self.dim)[0]
class TitleEncoder(nn.Module):
def __init__(self, n_tokens=len(tokens), out_size=64):
"""
A simple sequential encoder for titles.
x -> emb -> conv -> global_max -> relu -> dense
"""
super(self.__class__, self).__init__()
self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX)
self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1)
self.pool1 = GlobalMaxPooling()
self.dense = nn.Linear(out_size, out_size)
def forward(self, text_ix):
"""
:param text_ix: int64 Variable of shape [batch_size, max_len]
:returns: float32 Variable of shape [batch_size, out_size]
"""
h = self.emb(text_ix)
# we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order
h = torch.transpose(h, 1, 2)
# Apply the layers as defined above. Add some ReLUs before dense.
<YOUR CODE>
return <YOUR CODE>
title_encoder = TitleEncoder(out_size=64)
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title']))
dummy_v = title_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del title_encoder
print("Seems fine")
###Output
_____no_output_____
###Markdown
__Task 2.1__ Create description encoder
###Code
# Define an encoder for job descriptions.
# Use any means you want so long as it's torch.nn.Module.
<YOUR CODE HERE>
desc_encoder = <Create description encoder>
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription']))
dummy_v = desc_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del desc_encoder
print("Seems fine too")
###Output
_____no_output_____
###Markdown
__ Task 2.2__ Build one network ~~to rule them all~~
###Code
class FullNetwork(nn.Module):
"""
This class does all the steps from (title, desc, categorical) features -> predicted target
It unites title & desc encoders you defined above as long as some layers for head and categorical branch.
"""
def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)):
super(self.__class__, self).__init__()
self.title_encoder = TitleEncoder(out_size=64)
self.desc_encoder = <YOUR CODE>
# define layers for categorical features. A few dense layers would do.
<YOUR CODE>
# define "output" layers that process depend the three encoded vectors into answer
<YOUR CODE>
def forward(self, title_ix, desc_ix, cat_features):
"""
:param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix
:param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix
:param cat_features: float32 Variable [batch, n_cat_features]
:returns: float32 Variable 1d [batch], predicted log1p-salary
"""
# process each data source with it's respective encoder
title_h = self.title_encoder(title_ix)
desc_h = <YOUR CODE>
# apply categorical encoder
cat_h = <YOUR CODE>
# concatenate all vectors together...
joint_h = torch.cat([title_h, desc_h, cat_h], dim=1)
# ... and stack a few more layers at the top
<YOUR CODE>
# Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs
# Note 2: please do not use output nonlinearities.
return <YOUR CODE>
model = FullNetwork()
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
# test it on one batch
batch = generate_batch(data_train, 32)
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0]
def compute_loss(reference, prediction):
"""
Computes objective for minimization.
By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc.
"""
return torch.mean((prediction - reference) ** 2)
def compute_mae(reference, prediction):
""" Compute MAE on actual salary, assuming your model outputs log1p(salary)"""
return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean()
loss = compute_loss(reference, prediction)
dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True)
for grad in dummy_grads:
assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \
"Double-check that your model uses all it's layers."
###Output
_____no_output_____
###Markdown
Let's train it!
###Code
from tqdm import tnrange
def iterate_minibatches(data, batch_size=32, max_len=None,
max_batches=None, shuffle=True, verbose=True):
indices = np.arange(len(data))
if shuffle:
indices = np.random.permutation(indices)
if max_batches is not None:
indices = indices[: batch_size * max_batches]
irange = tnrange if verbose else range
for start in irange(0, len(indices), batch_size):
yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len)
num_epochs = 100
max_len = 100
batch_size = 32
batches_per_epoch = 100
for epoch_i in range(num_epochs):
print("Training:")
train_loss = train_mae = train_batches = 0
model.train(True)
for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
loss.backward()
opt.step()
opt.zero_grad()
train_loss += loss.data.numpy()
train_mae += compute_mae(reference, prediction).data.numpy()
train_batches += 1
print("\tLoss:\t%.5f" % (train_loss / train_batches))
print("\tMAE:\t%.5f" % (train_mae / train_batches))
print('\n\n')
print("Validation:")
val_loss = val_mae = val_batches = 0
model.train(False)
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
print("Final eval:")
val_loss = val_mae = val_batches = 0
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
###Output
_____no_output_____
###Markdown
Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow).
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription).
###Code
data = pd.read_csv("./Train_rev1.csv", index_col=None)
data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32')
text_columns = ["Title", "FullDescription"]
categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"]
target_column = "Log1pSalary"
data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string
data.sample(3)
###Output
_____no_output_____
###Markdown
The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization.
###Code
print("Before")
print(data["Title"][::100000])
import nltk
tokenizer = nltk.tokenize.WordPunctTokenizer()
for col in text_columns:
data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower())))
###Output
_____no_output_____
###Markdown
Now we can assume that our text is a space-separated list of tokens:
###Code
print("After")
print(data["Title"][::100000])
###Output
_____no_output_____
###Markdown
Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words.
###Code
from collections import Counter
token_counts = Counter()
# Count how many times does each token occur in "Title" and "FullDescription"
<YOUR CODE HERE>
print("Total unique tokens :", len(token_counts))
print('\n'.join(map(str, token_counts.most_common(n=5))))
print('...')
print('\n'.join(map(str, token_counts.most_common()[-3:])))
assert token_counts.most_common(1)[0][1] in range(2600000, 2700000)
assert len(token_counts) in range(200000, 210000)
print('Correct!')
# Let's see how many words are there for each count
_=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True)
plt.xlabel("Counts")
###Output
_____no_output_____
###Markdown
__Task 1.1__ Get a list of all tokens that occur at least 10 times.
###Code
min_count = 10
# tokens from token_counts keys that had at least min_count occurrences throughout the dataset
tokens = <YOUR CODE HERE>
# Add a special tokens for unknown and empty words
UNK, PAD = "UNK", "PAD"
tokens = [UNK, PAD] + tokens
print("Tokens left:", len(tokens))
assert type(tokens)==list
assert len(tokens) in range(32000,35000)
assert 'me' in tokens
assert UNK in tokens
print("Correct!")
###Output
_____no_output_____
###Markdown
__Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int)
###Code
token_to_id = <your code here>
assert isinstance(token_to_id, dict)
assert len(token_to_id) == len(tokens)
for tok in tokens:
assert tokens[token_to_id[tok]] == tok
print("Correct!")
###Output
_____no_output_____
###Markdown
And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices.
###Code
UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD])
def as_matrix(sequences, max_len=None):
""" Convert a list of tokens into a matrix with padding """
if isinstance(sequences[0], str):
sequences = list(map(str.split, sequences))
max_len = min(max(map(len, sequences)), max_len or float('inf'))
matrix = np.full((len(sequences), max_len), np.int32(PAD_IX))
for i,seq in enumerate(sequences):
row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]]
matrix[i, :len(row_ix)] = row_ix
return matrix
#### print("Lines:")
print('\n'.join(data["Title"][::100000].values), end='\n\n')
print("Matrix:")
print(as_matrix(data["Title"][::100000]))
###Output
_____no_output_____
###Markdown
Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding.
###Code
from sklearn.feature_extraction import DictVectorizer
# we only consider top-1k most frequent companies to minimize memory usage
top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000))
recognized_companies = set(top_companies)
data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other")
categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False)
categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1))
###Output
_____no_output_____
###Markdown
The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes.
###Code
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.1, random_state=42)
print("Train size = ", len(data_train))
print("Validation size = ", len(data_val))
def generate_batch(data, batch_size=None, replace=True, max_len=None):
"""
Creates a pytorch-friendly dict from the batch data.
:returns: a dict with {'title' : int64[batch, title_max_len]
"""
if batch_size is not None:
data = data.sample(batch_size, replace=replace)
batch = {}
for col in text_columns:
batch[col] = as_matrix(data[col].values, max_len)
batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1))
if target_column in data.columns:
batch[target_column] = data[target_column].values
return batch
generate_batch(data_train, 3, max_len=10)
###Output
_____no_output_____
###Markdown
Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary. By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time.
###Code
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class GlobalMaxPooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return x.max(dim=self.dim)[0]
class TitleEncoder(nn.Module):
def __init__(self, n_tokens=len(tokens), out_size=64):
"""
A simple sequential encoder for titles.
x -> emb -> conv -> global_max -> relu -> dense
"""
super(self.__class__, self).__init__()
self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX)
self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1)
self.pool1 = GlobalMaxPooling()
self.dense = nn.Linear(out_size, out_size)
def forward(self, text_ix):
"""
:param text_ix: int64 Variable of shape [batch_size, max_len]
:returns: float32 Variable of shape [batch_size, out_size]
"""
h = self.emb(text_ix)
# we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order
h = torch.transpose(h, 1, 2)
# Apply the layers as defined above. Add some ReLUs before dense.
<YOUR CODE>
return <YOUR CODE>
title_encoder = TitleEncoder(out_size=64)
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title']))
dummy_v = title_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del title_encoder
print("Seems fine")
###Output
_____no_output_____
###Markdown
__Task 2.1__ Create description encoder
###Code
# Define an encoder for job descriptions.
# Use any means you want so long as it's torch.nn.Module.
<YOUR CODE HERE>
desc_encoder = <Create description encoder>
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription']))
dummy_v = desc_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del desc_encoder
print("Seems fine too")
###Output
_____no_output_____
###Markdown
__ Task 2.2__ Build one network ~~to rule them all~~
###Code
class FullNetwork(nn.Module):
"""
This class does all the steps from (title, desc, categorical) features -> predicted target
It unites title & desc encoders you defined above as long as some layers for head and categorical branch.
"""
def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)):
super(self.__class__, self).__init__()
self.title_encoder = TitleEncoder(out_size=64)
self.desc_encoder = <YOUR CODE>
# define layers for categorical features. A few dense layers would do.
<YOUR CODE>
# define "output" layers that process depend the three encoded vectors into answer
<YOUR CODE>
def forward(self, title_ix, desc_ix, cat_features):
"""
:param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix
:param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix
:param cat_features: float32 Variable [batch, n_cat_features]
:returns: float32 Variable 1d [batch], predicted log1p-salary
"""
# process each data source with it's respective encoder
title_h = self.title_encoder(title_ix)
desc_h = <YOUR CODE>
# apply categorical encoder
cat_h = <YOUR CODE>
# concatenate all vectors together...
joint_h = torch.cat([title_h, desc_h, cat_h], dim=1)
# ... and stack a few more layers at the top
<YOUR CODE>
# Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs
# Note 2: please do not use output nonlinearities.
return <YOUR CODE>
model = FullNetwork()
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
# test it on one batch
batch = generate_batch(data_train, 32)
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0]
def compute_loss(reference, prediction):
"""
Computes objective for minimization.
By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc.
"""
return torch.mean((prediction - reference) ** 2)
def compute_mae(reference, prediction):
""" Compute MAE on actual salary, assuming your model outputs log1p(salary)"""
return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean()
loss = compute_loss(reference, prediction)
dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True)
for grad in dummy_grads:
assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \
"Double-check that your model uses all it's layers."
###Output
_____no_output_____
###Markdown
Let's train it!
###Code
from tqdm import tnrange
def iterate_minibatches(data, batch_size=32, max_len=None,
max_batches=None, shuffle=True, verbose=True):
indices = np.arange(len(data))
if shuffle:
indices = np.random.permutation(indices)
if max_batches is not None:
indices = indices[: batch_size * max_batches]
irange = tnrange if verbose else range
for start in irange(0, len(indices), batch_size):
yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len)
num_epochs = 100
max_len = 100
batch_size = 32
batches_per_epoch = 100
for epoch_i in range(num_epochs):
print("Training:")
train_loss = train_mae = train_batches = 0
model.train(True)
for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
loss.backward()
opt.step()
opt.zero_grad()
train_loss += loss.data.numpy()
train_mae += compute_mae(reference, prediction).data.numpy()
train_batches += 1
print("\tLoss:\t%.5f" % (train_loss / train_batches))
print("\tMAE:\t%.5f" % (train_mae / train_batches))
print('\n\n')
print("Validation:")
val_loss = val_mae = val_batches = 0
model.train(False)
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
print("Final eval:")
val_loss = val_mae = val_batches = 0
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
###Output
_____no_output_____
###Markdown
Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow).
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
path = '/content/drive/My Drive/PracticalDL/week05_nlp/'
###Output
Mounted at /content/drive
###Markdown
About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription).
###Code
data = pd.read_csv(path + "Train_rev1.csv", index_col=None)
data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32')
text_columns = ["Title", "FullDescription"]
categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"]
target_column = "Log1pSalary"
data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string
data.sample(3)
###Output
_____no_output_____
###Markdown
The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization.
###Code
print("Before")
print(data["Title"][::100000])
import nltk
tokenizer = nltk.tokenize.WordPunctTokenizer()
for col in text_columns:
data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower())))
###Output
_____no_output_____
###Markdown
Now we can assume that our text is a space-separated list of tokens:
###Code
print("After")
print(data["Title"][::100000])
print(type(data["Title"]))
###Output
After
0 engineering systems analyst
100000 hr assistant
200000 senior ec & i engineer
Name: Title, dtype: object
<class 'pandas.core.series.Series'>
###Markdown
Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words.
###Code
from collections import Counter
token_counts = Counter()
# Count how many times does each token occur in "Title" and "FullDescription"
full_list = data["Title"].tolist()
descriptions = data["FullDescription"].tolist()
full_list.extend(descriptions)
token_counts = Counter(' '.join(full_list).split())
print("Total unique tokens :", len(token_counts))
print('\n'.join(map(str, token_counts.most_common(n=5))))
print('...')
print('\n'.join(map(str, token_counts.most_common()[-3:])))
assert token_counts.most_common(1)[0][1] in range(2600000, 2700000)
assert len(token_counts) in range(200000, 210000)
print('Correct!')
# Let's see how many words are there for each count
_=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True)
plt.xlabel("Counts")
###Output
_____no_output_____
###Markdown
__Task 1.1__ Get a list of all tokens that occur at least 10 times.
###Code
min_count = 10
# tokens from token_counts keys that had at least min_count occurrences throughout the dataset
tokens = list(map(lambda pair: pair[0], filter(lambda x: x[1] >= min_count, token_counts.items())))
# Add a special tokens for unknown and empty words
UNK, PAD = "UNK", "PAD"
tokens = [UNK, PAD] + tokens
print("Tokens left:", len(tokens))
assert type(tokens)==list
assert len(tokens) in range(32000,35000)
assert 'me' in tokens
assert UNK in tokens
print("Correct!")
###Output
Tokens left: 34158
Correct!
###Markdown
__Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int)
###Code
token_to_id = {}
for i in range(len(tokens)):
token_to_id[tokens[i]] = i
assert isinstance(token_to_id, dict)
assert len(token_to_id) == len(tokens)
for tok in tokens:
assert tokens[token_to_id[tok]] == tok
print("Correct!")
###Output
Correct!
###Markdown
And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices.
###Code
UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD])
def as_matrix(sequences, max_len=None):
""" Convert a list of tokens into a matrix with padding """
if isinstance(sequences[0], str):
sequences = list(map(str.split, sequences))
max_len = min(max(map(len, sequences)), max_len or float('inf'))
matrix = np.full((len(sequences), max_len), np.int32(PAD_IX))
for i,seq in enumerate(sequences):
row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]]
matrix[i, :len(row_ix)] = row_ix
return matrix
#### print("Lines:")
print('\n'.join(data["Title"][::100000].values), end='\n\n')
print("Matrix:")
print(as_matrix(data["Title"][::100000]))
###Output
engineering systems analyst
hr assistant
senior ec & i engineer
Matrix:
[[ 2 3 4 1 1]
[ 998 176 1 1 1]
[ 18 3472 242 59 6]]
###Markdown
Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding.
###Code
from sklearn.feature_extraction import DictVectorizer
# we only consider top-1k most frequent companies to minimize memory usage
top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000))
recognized_companies = set(top_companies)
data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other")
categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False)
categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1))
###Output
_____no_output_____
###Markdown
The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes.
###Code
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.1, random_state=42)
print("Train size = ", len(data_train))
print("Validation size = ", len(data_val))
def generate_batch(data, batch_size=None, replace=True, max_len=None):
"""
Creates a pytorch-friendly dict from the batch data.
:returns: a dict with {'title' : int64[batch, title_max_len]
"""
if batch_size is not None:
data = data.sample(batch_size, replace=replace)
batch = {}
for col in text_columns:
batch[col] = as_matrix(data[col].values, max_len)
batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1))
if target_column in data.columns:
batch[target_column] = data[target_column].values
return batch
generate_batch(data_train, 3, max_len=10)
###Output
_____no_output_____
###Markdown
Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary. By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time.
###Code
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class GlobalMaxPooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return x.max(dim=self.dim)[0]
class AveragePooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
x[x == 1] = 0
return torch.mean(x, dim=self.dim)
class TitleEncoder(nn.Module):
def __init__(self, n_tokens=len(tokens), out_size=48):
"""
A simple sequential encoder for titles.
x -> emb -> conv -> global_max -> relu -> dense
"""
super(self.__class__, self).__init__()
self.emb = nn.Embedding(n_tokens, 48, padding_idx=PAD_IX)
self.conv1 = nn.Conv1d(48, out_size, kernel_size=3, padding=1)
self.batchnorm1 = nn.BatchNorm1d(out_size)
self.conv2 = nn.Conv1d(out_size, out_size, kernel_size=3, padding=1)
self.batchnorm2 = nn.BatchNorm1d(out_size)
self.relu1 = nn.ReLU()
self.pool1 = AveragePooling()
self.dense = nn.Linear(out_size, out_size)
def forward(self, text_ix):
"""
:param text_ix: int64 Variable of shape [batch_size, max_len]
:returns: float32 Variable of shape [batch_size, out_size]
"""
h = self.emb(text_ix)
# we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order
h = torch.transpose(h, 1, 2)
# Apply the layers as defined above. Add some ReLUs before dense.
result = self.dense(self.pool1(self.relu1(self.batchnorm2(self.conv2(self.batchnorm1(self.conv1(h)))))))
return result
title_encoder = TitleEncoder(out_size=64)
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title']))
dummy_v = title_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del title_encoder
print("Seems fine")
###Output
Seems fine
###Markdown
__Task 2.1__ Create description encoder
###Code
# Define an encoder for job descriptions.
# Use any means you want so long as it's torch.nn.Module.
class DescriptionEncoder(nn.Module):
def __init__(self, n_tokens=len(tokens), out_size=48):
"""
A simple sequential encoder for desciptions.
x -> emb -> conv -> global_max -> relu -> dense
"""
super(self.__class__, self).__init__()
self.emb = nn.Embedding(n_tokens, 48, padding_idx=PAD_IX)
self.conv1 = nn.Conv1d(48, out_size, kernel_size=3, padding=1)
self.batchnorm1 = nn.BatchNorm1d(out_size)
self.conv2 = nn.Conv1d(out_size, out_size, kernel_size=3, padding=1)
self.batchnorm2 = nn.BatchNorm1d(out_size)
self.relu1 = nn.ReLU()
self.pool1 = AveragePooling()
self.dense = nn.Linear(out_size, out_size)
def forward(self, text_ix):
"""
:param text_ix: int64 Variable of shape [batch_size, max_len]
:returns: float32 Variable of shape [batch_size, out_size]
"""
h = self.emb(text_ix)
# we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order
h = torch.transpose(h, 1, 2)
# Apply the layers as defined above. Add some ReLUs before dense.
result = self.dense(self.pool1(self.relu1(self.batchnorm2(self.conv2(self.batchnorm1(self.conv1(h)))))))
return result
desc_encoder = DescriptionEncoder(out_size=64)
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription']))
dummy_v = desc_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del desc_encoder
print("Seems fine too")
###Output
Seems fine too
###Markdown
__ Task 2.2__ Build one network ~~to rule them all~~
###Code
class FullNetwork(nn.Module):
"""
This class does all the steps from (title, desc, categorical) features -> predicted target
It unites title & desc encoders you defined above as long as some layers for head and categorical branch.
"""
def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)):
super(self.__class__, self).__init__()
self.title_encoder = TitleEncoder(out_size=48)
self.desc_encoder = DescriptionEncoder(out_size=48)
# define layers for categorical features. A few dense layers would do.
self.dense1 = nn.Linear(n_cat_features, 128)
self.dropout1 = nn.Dropout(p=0.2)
self.relu1 = nn.ReLU()
self.dense2 = nn.Linear(128, 64)
self.dropout2 = nn.Dropout(p=0.2)
self.relu2 = nn.ReLU()
self.dense3 = nn.Linear(64, 48)
self.relu3 = nn.ReLU()
# define "output" layers that process depend the three encoded vectors into answer
self.dense4 = nn.Linear(144, 48)
self.relu4 = nn.ReLU()
self.dense5 = nn.Linear(48, 1)
def forward(self, title_ix, desc_ix, cat_features):
"""
:param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix
:param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix
:param cat_features: float32 Variable [batch, n_cat_features]
:returns: float32 Variable 1d [batch], predicted log1p-salary
"""
# process each data source with it's respective encoder
title_h = self.title_encoder(title_ix)
desc_h = self.desc_encoder(desc_ix)
# apply categorical encoder
cat_h = self.relu3(self.dense3(self.relu2(self.dropout2(self.dense2(self.relu1(self.dropout1(self.dense1(cat_features))))))))
# concatenate all vectors together...
joint_h = torch.cat([title_h, desc_h, cat_h], dim=1)
# ... and stack a few more layers at the top
result = self.dense5(self.relu4(self.dense4(joint_h)))[:,0]
# Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs
# Note 2: please do not use output nonlinearities.
return result
model = FullNetwork()
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
# test it on one batch
batch = generate_batch(data_train, 32)
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0]
def compute_loss(reference, prediction):
"""
Computes objective for minimization.
By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc.
"""
return torch.mean((prediction - reference) ** 2)
def compute_mae(reference, prediction):
""" Compute MAE on actual salary, assuming your model outputs log1p(salary)"""
return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean()
loss = compute_loss(reference, prediction)
dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True)
for grad in dummy_grads:
assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \
"Double-check that your model uses all it's layers."
###Output
_____no_output_____
###Markdown
Let's train it!
###Code
from tqdm import tnrange
def iterate_minibatches(data, batch_size=32, max_len=None,
max_batches=None, shuffle=True, verbose=True):
indices = np.arange(len(data))
if shuffle:
indices = np.random.permutation(indices)
if max_batches is not None:
indices = indices[: batch_size * max_batches]
irange = tnrange if verbose else range
for start in irange(0, len(indices), batch_size):
yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len)
num_epochs = 10
max_len = 100
batch_size = 32
batches_per_epoch = 100
for epoch_i in range(num_epochs):
print("Training:")
train_loss = train_mae = train_batches = 0
model.train(True)
for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
loss.backward()
opt.step()
opt.zero_grad()
train_loss += loss.data.numpy()
train_mae += compute_mae(reference, prediction).data.numpy()
train_batches += 1
print("\tLoss:\t%.5f" % (train_loss / train_batches))
print("\tMAE:\t%.5f" % (train_mae / train_batches))
print('\n\n')
print("Validation:")
val_loss = val_mae = val_batches = 0
model.train(False)
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
print("Final eval:")
val_loss = val_mae = val_batches = 0
with torch.no_grad():
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()
val_mae += compute_mae(reference, prediction).data.numpy()
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
###Output
Final eval:
###Markdown
Task 3.2: Actually make it workYour main task is to use some of the tricks you've learned on the network and analyze if you can improve __validation MAE__.Try __at least 3 options__ from the list below for a passing grade. If you're into A) CNN architectureAll the tricks you know about dense and convolutional neural networks apply here as well.* Dropout. Nuff said.* Batch Norm. This time it's `nn.BatchNorm1d`* Parallel convolution layers. The idea is that you apply several nn.Conv1d to the same embeddings and concatenate output channels.* More layers, more neurons, ya know... B) Play with poolingThere's more than one way to do max pooling:* Max over time - our `GlobalMaxPooling`* Average over time (excluding PAD)* Softmax-pooling:$$ out_{i, t} = \sum_t {h_{i,t} \cdot {{e ^ {h_{i, t}}} \over \sum_\tau e ^ {h_{j, \tau}} } }$$* Attentive pooling$$ out_{i, t} = \sum_t {h_{i,t} \cdot Attn(h_t)}$$, where $$ Attn(h_t) = {{e ^ {NN_{attn}(h_t)}} \over \sum_\tau e ^ {NN_{attn}(h_\tau)}} $$and $NN_{attn}$ is a small neural networkThe optimal score is usually achieved by concatenating several different poolings, including several attentive pooling with different $NN_{attn}$ C) Fun with embeddingsIt's not always a good idea to train embeddings from scratch. Here's a few tricks:* Use a pre-trained word2vec from [here](http://ahogrammer.com/2017/01/20/the-list-of-pretrained-word-embeddings/) or [here](http://mccormickml.com/2016/04/12/googles-pretrained-word2vec-model-in-python/).* Start with pre-trained embeddings, then fine-tune them with gradient descent* Use the same embedding matrix in title and desc vectorizer D) Going recurrentWe've already learned that recurrent networks can do cool stuff in sequence modelling. Turns out, they're not useless for classification as well. With some tricks of course..* Like convolutional layers, LSTM should be pooled into a fixed-size vector with some of the poolings. * Please bear in mind that while convolution uses [batch, units, time] dim order, recurrent units are built for [batch, time, unit]. You may need to `torch.transpose`.* Since you know all the text in advance, use bidirectional RNN * Run one LSTM from left to right * Run another in parallel from right to left * Concatenate their output sequences along unit axis (dim=-1)* It might be good idea to mix convolutions and recurrent layers differently for title and description E) Optimizing seriously* You don't necessarily need 100 epochs. Use early stopping. If you've never done this before, take a look at [keras](https://github.com/keras-team/keras/blob/master/keras/callbacks.pyL461) for inspiration. * In short, train until you notice that validation * Maintain the best-on-validation snapshot via `model.state_dict` * Plotting learning curves is usually a good idea A short reportPlease tell us what you did and how did it work.``, i guess...
###Code
###Output
_____no_output_____
###Markdown
Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow).
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription).
###Code
data = pd.read_csv("./Train_rev1.csv", index_col=None)
data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32')
text_columns = ["Title", "FullDescription"]
categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"]
target_column = "Log1pSalary"
data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string
data.sample(3)
###Output
_____no_output_____
###Markdown
The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization.
###Code
print("Before")
print(data["Title"][::100000])
import nltk
tokenizer = nltk.tokenize.WordPunctTokenizer()
for col in text_columns:
data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower())))
###Output
_____no_output_____
###Markdown
Now we can assume that our text is a space-separated list of tokens:
###Code
print("After")
print(data["Title"][::100000])
###Output
_____no_output_____
###Markdown
Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words.
###Code
from collections import Counter
token_counts = Counter()
# Count how many times does each token occur in "Title" and "FullDescription"
<YOUR CODE HERE>
print("Total unique tokens :", len(token_counts))
print('\n'.join(map(str, token_counts.most_common(n=5))))
print('...')
print('\n'.join(map(str, token_counts.most_common()[-3:])))
assert token_counts.most_common(1)[0][1] in range(2600000, 2700000)
assert len(token_counts) in range(200000, 210000)
print('Correct!')
# Let's see how many words are there for each count
_=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True)
plt.xlabel("Counts")
###Output
_____no_output_____
###Markdown
__Task 1.1__ Get a list of all tokens that occur at least 10 times.
###Code
min_count = 10
# tokens from token_counts keys that had at least min_count occurrences throughout the dataset
tokens = <YOUR CODE HERE>
# Add a special tokens for unknown and empty words
UNK, PAD = "UNK", "PAD"
tokens = [UNK, PAD] + tokens
print("Tokens left:", len(tokens))
assert type(tokens)==list
assert len(tokens) in range(32000,35000)
assert 'me' in tokens
assert UNK in tokens
print("Correct!")
###Output
_____no_output_____
###Markdown
__Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int)
###Code
token_to_id = <your code here>
assert isinstance(token_to_id, dict)
assert len(token_to_id) == len(tokens)
for tok in tokens:
assert tokens[token_to_id[tok]] == tok
print("Correct!")
###Output
_____no_output_____
###Markdown
And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices.
###Code
UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD])
def as_matrix(sequences, max_len=None):
""" Convert a list of tokens into a matrix with padding """
if isinstance(sequences[0], str):
sequences = list(map(str.split, sequences))
max_len = min(max(map(len, sequences)), max_len or float('inf'))
matrix = np.full((len(sequences), max_len), np.int32(PAD_IX))
for i,seq in enumerate(sequences):
row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]]
matrix[i, :len(row_ix)] = row_ix
return matrix
#### print("Lines:")
print('\n'.join(data["Title"][::100000].values), end='\n\n')
print("Matrix:")
print(as_matrix(data["Title"][::100000]))
###Output
_____no_output_____
###Markdown
Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding.
###Code
from sklearn.feature_extraction import DictVectorizer
# we only consider top-1k most frequent companies to minimize memory usage
top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000))
recognized_companies = set(top_companies)
data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other")
categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False)
categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1))
###Output
_____no_output_____
###Markdown
The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes.
###Code
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.1, random_state=42)
print("Train size = ", len(data_train))
print("Validation size = ", len(data_val))
def generate_batch(data, batch_size=None, replace=True, max_len=None):
"""
Creates a pytorch-friendly dict from the batch data.
:returns: a dict with {'title' : int64[batch, title_max_len]
"""
if batch_size is not None:
data = data.sample(batch_size, replace=replace)
batch = {}
for col in text_columns:
batch[col] = as_matrix(data[col].values, max_len)
batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1))
if target_column in data.columns:
batch[target_column] = data[target_column].values
return batch
generate_batch(data_train, 3, max_len=10)
###Output
_____no_output_____
###Markdown
Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary. By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time.
###Code
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class GlobalMaxPooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return x.max(dim=self.dim)[0]
class TitleEncoder(nn.Module):
def __init__(self, n_tokens=len(tokens), out_size=64):
"""
A simple sequential encoder for titles.
x -> emb -> conv -> global_max -> relu -> dense
"""
super(self.__class__, self).__init__()
self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX)
self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1)
self.pool1 = GlobalMaxPooling()
self.dense = nn.Linear(out_size, out_size)
def forward(self, text_ix):
"""
:param text_ix: int64 Variable of shape [batch_size, max_len]
:returns: float32 Variable of shape [batch_size, out_size]
"""
h = self.emb(text_ix)
# we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order
h = torch.transpose(h, 1, 2)
# Apply the layers as defined above. Add some ReLUs before dense.
<YOUR CODE>
return <YOUR CODE>
title_encoder = TitleEncoder(out_size=64)
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title']))
dummy_v = title_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del title_encoder
print("Seems fine")
###Output
_____no_output_____
###Markdown
__Task 2.1__ Create description encoder
###Code
# Define an encoder for job descriptions.
# Use any means you want so long as it's torch.nn.Module.
<YOUR CODE HERE>
desc_encoder = <Create description encoder>
dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription']))
dummy_v = desc_encoder(dummy_x)
assert isinstance(dummy_v, Variable)
assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64)
del desc_encoder
print("Seems fine too")
###Output
_____no_output_____
###Markdown
__ Task 2.2__ Build one network ~~to rule them all~~
###Code
class FullNetwork(nn.Module):
"""
This class does all the steps from (title, desc, categorical) features -> predicted target
It unites title & desc encoders you defined above as long as some layers for head and categorical branch.
"""
def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)):
super(self.__class__, self).__init__()
self.title_encoder = TitleEncoder(out_size=64)
self.desc_encoder = <YOUR CODE>
# define layers for categorical features. A few dense layers would do.
<YOUR CODE>
# define "output" layers that process depend the three encoded vectors into answer
<YOUR CODE>
def forward(self, title_ix, desc_ix, cat_features):
"""
:param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix
:param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix
:param cat_features: float32 Variable [batch, n_cat_features]
:returns: float32 Variable 1d [batch], predicted log1p-salary
"""
# process each data source with it's respective encoder
title_h = self.title_encoder(title_ix)
desc_h = <YOUR CODE>
# apply categorical encoder
cat_h = <YOUR CODE>
# concatenate all vectors together...
joint_h = torch.cat([title_h, desc_h, cat_h], dim=1)
# ... and stack a few more layers at the top
<YOUR CODE>
# Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs
# Note 2: please do not use output nonlinearities.
return <YOUR CODE>
model = FullNetwork()
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
# test it on one batch
batch = generate_batch(data_train, 32)
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0]
def compute_loss(reference, prediction):
"""
Computes objective for minimization.
By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc.
"""
return torch.mean((prediction - reference) ** 2)
def compute_mae(reference, prediction):
""" Compute MAE on actual salary, assuming your model outputs log1p(salary)"""
return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean()
loss = compute_loss(reference, prediction)
dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True)
for grad in dummy_grads:
assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \
"Double-check that your model uses all it's layers."
###Output
_____no_output_____
###Markdown
Let's train it!
###Code
from tqdm import tnrange
def iterate_minibatches(data, batch_size=32, max_len=None,
max_batches=None, shuffle=True, verbose=True):
indices = np.arange(len(data))
if shuffle:
indices = np.random.permutation(indices)
if max_batches is not None:
indices = indices[: batch_size * max_batches]
irange = tnrange if verbose else range
for start in irange(0, len(indices), batch_size):
yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len)
num_epochs = 100
max_len = 100
batch_size = 32
batches_per_epoch = 100
for epoch_i in range(num_epochs):
print("Training:")
train_loss = train_mae = train_batches = 0
model.train(True)
for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch):
title_ix = Variable(torch.LongTensor(batch["Title"]))
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]))
cat_features = Variable(torch.FloatTensor(batch["Categorical"]))
reference = Variable(torch.FloatTensor(batch[target_column]))
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
loss.backward()
opt.step()
opt.zero_grad()
train_loss += loss.data.numpy()[0]
train_mae += compute_mae(reference, prediction).data.numpy()[0]
train_batches += 1
print("\tLoss:\t%.5f" % (train_loss / train_batches))
print("\tMAE:\t%.5f" % (train_mae / train_batches))
print('\n\n')
print("Validation:")
val_loss = val_mae = val_batches = 0
model.train(False)
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]), volatile=True)
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]), volatile=True)
cat_features = Variable(torch.FloatTensor(batch["Categorical"]), volatile=True)
reference = Variable(torch.FloatTensor(batch[target_column]), volatile=True)
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()[0]
val_mae += compute_mae(reference, prediction).data.numpy()[0]
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
print("Final eval:")
val_loss = val_mae = val_batches = 0
for batch in iterate_minibatches(data_val, shuffle=False):
title_ix = Variable(torch.LongTensor(batch["Title"]), volatile=True)
desc_ix = Variable(torch.LongTensor(batch["FullDescription"]), volatile=True)
cat_features = Variable(torch.FloatTensor(batch["Categorical"]), volatile=True)
reference = Variable(torch.FloatTensor(batch[target_column]), volatile=True)
prediction = model(title_ix, desc_ix, cat_features)
loss = compute_loss(reference, prediction)
val_loss += loss.data.numpy()[0]
val_mae += compute_mae(reference, prediction).data.numpy()[0]
val_batches += 1
print("\tLoss:\t%.5f" % (val_loss / val_batches))
print("\tMAE:\t%.5f" % (val_mae / val_batches))
print('\n\n')
###Output
_____no_output_____ |
train_fasttext_model_with_biome_text2.ipynb | ###Markdown
Installing *biome.text*
###Code
!pip install -U pip
!pip install -U biome-text
exit(0)
###Output
Collecting pip
[?25l Downloading https://files.pythonhosted.org/packages/fe/ef/60d7ba03b5c442309ef42e7d69959f73aacccd0d86008362a681c4698e83/pip-21.0.1-py3-none-any.whl (1.5MB)
[K |████████████████████████████████| 1.5MB 6.8MB/s
[?25hInstalling collected packages: pip
Found existing installation: pip 19.3.1
Uninstalling pip-19.3.1:
Successfully uninstalled pip-19.3.1
Successfully installed pip-21.0.1
Collecting biome-text
Downloading biome_text-2.0.0-py3-none-any.whl (1.8 MB)
[K |████████████████████████████████| 1.8 MB 6.9 MB/s
[?25hCollecting s3fs~=0.4.0
Downloading s3fs-0.4.2-py3-none-any.whl (19 kB)
Collecting mlflow~=1.9.0
Downloading mlflow-1.9.1-py3-none-any.whl (11.9 MB)
[K |████████████████████████████████| 11.9 MB 13.2 MB/s
[?25hCollecting xlrd~=1.2.0
Downloading xlrd-1.2.0-py2.py3-none-any.whl (103 kB)
[K |████████████████████████████████| 103 kB 53.8 MB/s
[?25hCollecting gevent~=20.9.0
Downloading gevent-20.9.0-cp36-cp36m-manylinux2010_x86_64.whl (5.3 MB)
[K |████████████████████████████████| 5.3 MB 51.0 MB/s
[?25hRequirement already satisfied: flask~=1.1.2 in /usr/local/lib/python3.6/dist-packages (from biome-text) (1.1.2)
Collecting spacy~=2.3.0
Downloading spacy-2.3.5-cp36-cp36m-manylinux2014_x86_64.whl (10.4 MB)
[K |████████████████████████████████| 10.4 MB 50.3 MB/s
[?25hCollecting ray[tune]~=1.0.0
Downloading ray-1.0.1.post1-cp36-cp36m-manylinux1_x86_64.whl (23.1 MB)
[K |████████████████████████████████| 23.1 MB 55.0 MB/s
[?25hCollecting flask-cors~=3.0.8
Downloading Flask_Cors-3.0.10-py2.py3-none-any.whl (14 kB)
Collecting ipywidgets~=7.5.1
Downloading ipywidgets-7.5.1-py2.py3-none-any.whl (121 kB)
[K |████████████████████████████████| 121 kB 34.8 MB/s
[?25hCollecting flatdict~=4.0.0
Downloading flatdict-4.0.1.tar.gz (8.3 kB)
Collecting beautifulsoup4~=4.9.0
Downloading beautifulsoup4-4.9.3-py3-none-any.whl (115 kB)
[K |████████████████████████████████| 115 kB 40.4 MB/s
[?25hCollecting captum~=0.2.0
Downloading captum-0.2.0-py3-none-any.whl (1.4 MB)
[K |████████████████████████████████| 1.4 MB 42.4 MB/s
[?25hRequirement already satisfied: click~=7.1.0 in /usr/local/lib/python3.6/dist-packages (from biome-text) (7.1.2)
Collecting cachey~=0.2.0
Downloading cachey-0.2.1-py3-none-any.whl (6.4 kB)
Collecting distributed~=2.17.0
Downloading distributed-2.17.0-py3-none-any.whl (638 kB)
[K |████████████████████████████████| 638 kB 46.8 MB/s
[?25hCollecting tqdm>=4.49.0
Downloading tqdm-4.56.2-py2.py3-none-any.whl (72 kB)
[K |████████████████████████████████| 72 kB 953 kB/s
[?25hCollecting fastapi~=0.55.0
Downloading fastapi-0.55.1-py3-none-any.whl (48 kB)
[K |████████████████████████████████| 48 kB 4.8 MB/s
[?25hCollecting datasets~=1.1.2
Downloading datasets-1.1.3-py3-none-any.whl (153 kB)
[K |████████████████████████████████| 153 kB 55.2 MB/s
[?25hCollecting allennlp~=1.3.0
Downloading allennlp-1.3.0-py3-none-any.whl (506 kB)
[K |████████████████████████████████| 506 kB 47.8 MB/s
[?25hRequirement already satisfied: pandas~=1.1.0 in /usr/local/lib/python3.6/dist-packages (from biome-text) (1.1.5)
Collecting lxml~=4.5.0
Downloading lxml-4.5.2-cp36-cp36m-manylinux1_x86_64.whl (5.5 MB)
[K |████████████████████████████████| 5.5 MB 44.2 MB/s
[?25hCollecting uvicorn~=0.11.0
Downloading uvicorn-0.11.8-py3-none-any.whl (43 kB)
[K |████████████████████████████████| 43 kB 1.4 MB/s
[?25hCollecting elasticsearch<7.5.0,>=6.8.0
Downloading elasticsearch-7.1.0-py2.py3-none-any.whl (83 kB)
[K |████████████████████████████████| 83 kB 1.2 MB/s
[?25hRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (1.4.1)
Collecting overrides==3.1.0
Downloading overrides-3.1.0.tar.gz (11 kB)
Collecting tensorboardX>=1.2
Downloading tensorboardX-2.1-py2.py3-none-any.whl (308 kB)
[K |████████████████████████████████| 308 kB 50.2 MB/s
[?25hRequirement already satisfied: pytest in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (3.6.4)
Collecting transformers<4.1,>=4.0
Downloading transformers-4.0.1-py3-none-any.whl (1.4 MB)
[K |████████████████████████████████| 1.4 MB 44.8 MB/s
[?25hRequirement already satisfied: torch<1.8.0,>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (1.7.0+cu101)
Collecting jsonpickle
Downloading jsonpickle-2.0.0-py2.py3-none-any.whl (37 kB)
Collecting boto3<2.0,>=1.14
Downloading boto3-1.17.8-py2.py3-none-any.whl (130 kB)
[K |████████████████████████████████| 130 kB 54.0 MB/s
[?25hRequirement already satisfied: filelock<3.1,>=3.0 in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (3.0.12)
Collecting sentencepiece
Downloading sentencepiece-0.1.95-cp36-cp36m-manylinux2014_x86_64.whl (1.2 MB)
[K |████████████████████████████████| 1.2 MB 31.4 MB/s
[?25hCollecting jsonnet>=0.10.0
Downloading jsonnet-0.17.0.tar.gz (259 kB)
[K |████████████████████████████████| 259 kB 53.2 MB/s
[?25hRequirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (0.8)
Requirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (3.2.5)
Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (2.10.0)
Requirement already satisfied: requests>=2.18 in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (2.23.0)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (0.22.2.post1)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (1.19.5)
Collecting soupsieve>1.2
Downloading soupsieve-2.2-py3-none-any.whl (33 kB)
Collecting jmespath<1.0.0,>=0.7.1
Downloading jmespath-0.10.0-py2.py3-none-any.whl (24 kB)
Collecting botocore<1.21.0,>=1.20.8
Downloading botocore-1.20.8-py2.py3-none-any.whl (7.2 MB)
[K |████████████████████████████████| 7.2 MB 47.4 MB/s
[?25hCollecting s3transfer<0.4.0,>=0.3.0
Downloading s3transfer-0.3.4-py2.py3-none-any.whl (69 kB)
[K |████████████████████████████████| 69 kB 6.4 MB/s
[?25hCollecting urllib3<1.27,>=1.25.4
Downloading urllib3-1.26.3-py2.py3-none-any.whl (137 kB)
[K |████████████████████████████████| 137 kB 54.0 MB/s
[?25hRequirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.6/dist-packages (from botocore<1.21.0,>=1.20.8->boto3<2.0,>=1.14->allennlp~=1.3.0->biome-text) (2.8.1)
Requirement already satisfied: heapdict in /usr/local/lib/python3.6/dist-packages (from cachey~=0.2.0->biome-text) (1.0.1)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from captum~=0.2.0->biome-text) (3.2.2)
Requirement already satisfied: multiprocess in /usr/local/lib/python3.6/dist-packages (from datasets~=1.1.2->biome-text) (0.70.11.1)
Collecting pyarrow>=0.17.1
Downloading pyarrow-3.0.0-cp36-cp36m-manylinux2014_x86_64.whl (20.7 MB)
[K |████████████████████████████████| 20.7 MB 1.4 MB/s
[?25hCollecting xxhash
Downloading xxhash-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl (242 kB)
[K |████████████████████████████████| 242 kB 54.5 MB/s
[?25hCollecting tqdm>=4.49.0
Downloading tqdm-4.49.0-py2.py3-none-any.whl (69 kB)
[K |████████████████████████████████| 69 kB 6.6 MB/s
[?25hRequirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from datasets~=1.1.2->biome-text) (0.3.3)
Requirement already satisfied: tornado>=5 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (5.1.1)
Requirement already satisfied: zict>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (2.0.0)
Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (3.13)
Requirement already satisfied: sortedcontainers!=2.0.0,!=2.0.1 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (2.3.0)
Requirement already satisfied: dask>=2.9.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (2.12.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (53.0.0)
Requirement already satisfied: cloudpickle>=1.3.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (1.3.0)
Requirement already satisfied: tblib>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (1.7.0)
Requirement already satisfied: psutil>=5.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (5.4.8)
Collecting contextvars
Downloading contextvars-2.4.tar.gz (9.6 kB)
Requirement already satisfied: toolz>=0.8.2 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (0.11.1)
Requirement already satisfied: msgpack>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (1.0.2)
Collecting starlette==0.13.2
Downloading starlette-0.13.2-py3-none-any.whl (59 kB)
[K |████████████████████████████████| 59 kB 5.6 MB/s
[?25hCollecting pydantic<2.0.0,>=0.32.2
Downloading pydantic-1.7.3-cp36-cp36m-manylinux2014_x86_64.whl (9.2 MB)
[K |████████████████████████████████| 9.2 MB 41.4 MB/s
[?25hRequirement already satisfied: itsdangerous>=0.24 in /usr/local/lib/python3.6/dist-packages (from flask~=1.1.2->biome-text) (1.1.0)
Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/lib/python3.6/dist-packages (from flask~=1.1.2->biome-text) (2.11.3)
Requirement already satisfied: Werkzeug>=0.15 in /usr/local/lib/python3.6/dist-packages (from flask~=1.1.2->biome-text) (1.0.1)
Requirement already satisfied: Six in /usr/local/lib/python3.6/dist-packages (from flask-cors~=3.0.8->biome-text) (1.15.0)
Collecting zope.interface
Downloading zope.interface-5.2.0-cp36-cp36m-manylinux2010_x86_64.whl (236 kB)
[K |████████████████████████████████| 236 kB 53.2 MB/s
[?25hCollecting greenlet>=0.4.17
Downloading greenlet-1.0.0-cp36-cp36m-manylinux2010_x86_64.whl (156 kB)
[K |████████████████████████████████| 156 kB 54.4 MB/s
[?25hCollecting zope.event
Downloading zope.event-4.5.0-py2.py3-none-any.whl (6.8 kB)
Requirement already satisfied: traitlets>=4.3.1 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (4.3.3)
Requirement already satisfied: nbformat>=4.2.0 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (5.1.2)
Requirement already satisfied: ipykernel>=4.5.1 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (4.10.1)
Requirement already satisfied: widgetsnbextension~=3.5.0 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (3.5.1)
Requirement already satisfied: ipython>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (5.5.0)
Requirement already satisfied: jupyter-client in /usr/local/lib/python3.6/dist-packages (from ipykernel>=4.5.1->ipywidgets~=7.5.1->biome-text) (5.3.5)
Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (2.6.1)
Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (4.4.2)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (0.8.1)
Requirement already satisfied: pexpect in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (4.8.0)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (1.0.18)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (0.7.5)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from Jinja2>=2.10.1->flask~=1.1.2->biome-text) (1.1.1)
Collecting gorilla
Downloading gorilla-0.3.0-py2.py3-none-any.whl (11 kB)
Requirement already satisfied: entrypoints in /usr/local/lib/python3.6/dist-packages (from mlflow~=1.9.0->biome-text) (0.3)
Requirement already satisfied: sqlparse in /usr/local/lib/python3.6/dist-packages (from mlflow~=1.9.0->biome-text) (0.4.1)
Collecting azure-storage-blob>=12.0
Downloading azure_storage_blob-12.7.1-py2.py3-none-any.whl (339 kB)
[K |████████████████████████████████| 339 kB 54.5 MB/s
[?25hRequirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from mlflow~=1.9.0->biome-text) (3.12.4)
Collecting alembic
Downloading alembic-1.5.4.tar.gz (1.1 MB)
[K |████████████████████████████████| 1.1 MB 35.9 MB/s
[?25hCollecting gunicorn
Downloading gunicorn-20.0.4-py2.py3-none-any.whl (77 kB)
[K |████████████████████████████████| 77 kB 5.3 MB/s
[?25hCollecting gitpython>=2.1.0
Downloading GitPython-3.1.13-py3-none-any.whl (159 kB)
[K |████████████████████████████████| 159 kB 53.4 MB/s
[?25hCollecting sqlalchemy<=1.3.13
Downloading SQLAlchemy-1.3.13.tar.gz (6.0 MB)
[K |████████████████████████████████| 6.0 MB 43.0 MB/s
[?25hCollecting querystring-parser
Downloading querystring_parser-1.2.4-py2.py3-none-any.whl (7.9 kB)
Collecting prometheus-flask-exporter
Downloading prometheus_flask_exporter-0.18.1.tar.gz (21 kB)
Collecting databricks-cli>=0.8.7
Downloading databricks-cli-0.14.1.tar.gz (54 kB)
[K |████████████████████████████████| 54 kB 2.3 MB/s
[?25hCollecting docker>=4.0.0
Downloading docker-4.4.2-py2.py3-none-any.whl (146 kB)
[K |████████████████████████████████| 146 kB 46.3 MB/s
[?25hCollecting azure-core<2.0.0,>=1.10.0
Downloading azure_core-1.11.0-py2.py3-none-any.whl (127 kB)
[K |████████████████████████████████| 127 kB 53.0 MB/s
[?25hCollecting msrest>=0.6.18
Downloading msrest-0.6.21-py2.py3-none-any.whl (85 kB)
[K |████████████████████████████████| 85 kB 3.6 MB/s
[?25hCollecting cryptography>=2.1.4
Downloading cryptography-3.4.5-cp36-abi3-manylinux2014_x86_64.whl (3.2 MB)
[K |████████████████████████████████| 3.2 MB 42.6 MB/s
[?25hRequirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.6/dist-packages (from cryptography>=2.1.4->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (1.14.4)
Requirement already satisfied: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi>=1.12->cryptography>=2.1.4->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (2.20)
Requirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from databricks-cli>=0.8.7->mlflow~=1.9.0->biome-text) (0.8.7)
Collecting websocket-client>=0.32.0
Downloading websocket_client-0.57.0-py2.py3-none-any.whl (200 kB)
[K |████████████████████████████████| 200 kB 54.9 MB/s
[?25hCollecting gitdb<5,>=4.0.1
Downloading gitdb-4.0.5-py3-none-any.whl (63 kB)
[K |████████████████████████████████| 63 kB 1.6 MB/s
[?25hCollecting smmap<4,>=3.0.1
Downloading smmap-3.0.5-py2.py3-none-any.whl (25 kB)
Requirement already satisfied: requests-oauthlib>=0.5.0 in /usr/local/lib/python3.6/dist-packages (from msrest>=0.6.18->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (1.3.0)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from msrest>=0.6.18->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (2020.12.5)
Collecting isodate>=0.6.0
Downloading isodate-0.6.0-py2.py3-none-any.whl (45 kB)
[K |████████████████████████████████| 45 kB 2.5 MB/s
[?25hRequirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.2.0->ipywidgets~=7.5.1->biome-text) (0.2.0)
Requirement already satisfied: jupyter-core in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.2.0->ipywidgets~=7.5.1->biome-text) (4.7.1)
Requirement already satisfied: jsonschema!=2.5.0,>=2.4 in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.2.0->ipywidgets~=7.5.1->biome-text) (2.6.0)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas~=1.1.0->biome-text) (2018.9)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (0.2.5)
Requirement already satisfied: prometheus-client>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from ray[tune]~=1.0.0->biome-text) (0.9.0)
Collecting colorful
Downloading colorful-0.5.4-py2.py3-none-any.whl (201 kB)
[K |████████████████████████████████| 201 kB 46.6 MB/s
[?25hCollecting gpustat
Downloading gpustat-0.6.0.tar.gz (78 kB)
[K |████████████████████████████████| 78 kB 4.5 MB/s
[?25hRequirement already satisfied: google in /usr/local/lib/python3.6/dist-packages (from ray[tune]~=1.0.0->biome-text) (2.0.3)
Requirement already satisfied: grpcio>=1.28.1 in /usr/local/lib/python3.6/dist-packages (from ray[tune]~=1.0.0->biome-text) (1.32.0)
Collecting aiohttp-cors
Downloading aiohttp_cors-0.7.0-py3-none-any.whl (27 kB)
Collecting redis<3.5.0,>=3.3.2
Downloading redis-3.4.1-py2.py3-none-any.whl (71 kB)
[K |████████████████████████████████| 71 kB 6.3 MB/s
[?25hCollecting opencensus
Downloading opencensus-0.7.12-py2.py3-none-any.whl (127 kB)
[K |████████████████████████████████| 127 kB 48.0 MB/s
[?25hCollecting colorama
Downloading colorama-0.4.4-py2.py3-none-any.whl (16 kB)
Collecting py-spy>=0.2.0
Downloading py_spy-0.3.4-py2.py3-none-manylinux1_x86_64.whl (3.2 MB)
[K |████████████████████████████████| 3.2 MB 34.7 MB/s
[?25hCollecting aioredis
Downloading aioredis-1.3.1-py3-none-any.whl (65 kB)
[K |████████████████████████████████| 65 kB 2.4 MB/s
[?25hCollecting aiohttp
Downloading aiohttp-3.7.3-cp36-cp36m-manylinux2014_x86_64.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 31.5 MB/s
[?25hCollecting urllib3<1.27,>=1.25.4
Downloading urllib3-1.25.11-py2.py3-none-any.whl (127 kB)
[K |████████████████████████████████| 127 kB 50.1 MB/s
[?25hRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.18->allennlp~=1.3.0->biome-text) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.18->allennlp~=1.3.0->biome-text) (2.10)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.5.0->msrest>=0.6.18->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (3.1.0)
Collecting fsspec>=0.6.0
Downloading fsspec-0.8.5-py3-none-any.whl (98 kB)
[K |████████████████████████████████| 98 kB 5.4 MB/s
[?25hRequirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (0.8.2)
Requirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.1.3)
Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (2.0.5)
Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.0.5)
Collecting thinc<7.5.0,>=7.4.1
Downloading thinc-7.4.5-cp36-cp36m-manylinux2014_x86_64.whl (1.1 MB)
[K |████████████████████████████████| 1.1 MB 42.0 MB/s
[?25hRequirement already satisfied: blis<0.8.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (0.4.1)
Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.0.5)
Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (3.0.5)
Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.0.0)
Requirement already satisfied: importlib-metadata>=0.20 in /usr/local/lib/python3.6/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy~=2.3.0->biome-text) (3.4.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.20->catalogue<1.1.0,>=0.0.7->spacy~=2.3.0->biome-text) (3.4.0)
Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.20->catalogue<1.1.0,>=0.0.7->spacy~=2.3.0->biome-text) (3.7.4.3)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch<1.8.0,>=1.6.0->allennlp~=1.3.0->biome-text) (0.16.0)
Collecting sacremoses
Downloading sacremoses-0.0.43.tar.gz (883 kB)
[K |████████████████████████████████| 883 kB 47.8 MB/s
[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers<4.1,>=4.0->allennlp~=1.3.0->biome-text) (20.9)
Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers<4.1,>=4.0->allennlp~=1.3.0->biome-text) (2019.12.20)
Collecting tokenizers==0.9.4
Downloading tokenizers-0.9.4-cp36-cp36m-manylinux2010_x86_64.whl (2.9 MB)
[K |████████████████████████████████| 2.9 MB 42.1 MB/s
[?25hCollecting uvloop>=0.14.0
Downloading uvloop-0.15.1.tar.gz (2.1 MB)
[K |████████████████████████████████| 2.1 MB 37.6 MB/s
[33mWARNING: Discarding https://files.pythonhosted.org/packages/94/98/9dc814f391b2293ecc790b9752e005296c69c3694fd9975b6cb77c448135/uvloop-0.15.1.tar.gz#sha256=7846828112bfb49abc5fdfc47d0e4dfd7402115c9fde3c14c31818cfbeeb63dc (from https://pypi.org/simple/uvloop/). Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.[0m
[?25h Downloading uvloop-0.15.0.tar.gz (2.1 MB)
[K |████████████████████████████████| 2.1 MB 35.2 MB/s
[33mWARNING: Discarding https://files.pythonhosted.org/packages/df/fb/0e1b479ac5502f3d4531a2fc3f046312616f1ad020c686da353c2ff3bbc6/uvloop-0.15.0.tar.gz#sha256=1a503d5b49da6e3dd5607d6e533a5315b1caedbf629901807c65a23a09cad065 (from https://pypi.org/simple/uvloop/). Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.[0m
[?25h Downloading uvloop-0.14.0-cp36-cp36m-manylinux2010_x86_64.whl (3.9 MB)
[K |████████████████████████████████| 3.9 MB 37.7 MB/s
[?25hCollecting h11<0.10,>=0.8
Downloading h11-0.9.0-py2.py3-none-any.whl (53 kB)
[K |████████████████████████████████| 53 kB 2.0 MB/s
[?25hCollecting httptools==0.1.*
Downloading httptools-0.1.1-cp36-cp36m-manylinux1_x86_64.whl (216 kB)
[K |████████████████████████████████| 216 kB 56.2 MB/s
[?25hCollecting websockets==8.*
Downloading websockets-8.1-cp36-cp36m-manylinux2010_x86_64.whl (78 kB)
[K |████████████████████████████████| 78 kB 6.9 MB/s
[?25hRequirement already satisfied: notebook>=4.4.1 in /usr/local/lib/python3.6/dist-packages (from widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (5.3.1)
Requirement already satisfied: terminado>=0.8.1 in /usr/local/lib/python3.6/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.9.2)
Requirement already satisfied: nbconvert in /usr/local/lib/python3.6/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (5.6.1)
Requirement already satisfied: Send2Trash in /usr/local/lib/python3.6/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (1.5.0)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.6/dist-packages (from jupyter-client->ipykernel>=4.5.1->ipywidgets~=7.5.1->biome-text) (22.0.2)
Requirement already satisfied: ptyprocess in /usr/local/lib/python3.6/dist-packages (from terminado>=0.8.1->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.7.0)
Collecting yarl<2.0,>=1.0
Downloading yarl-1.6.3-cp36-cp36m-manylinux2014_x86_64.whl (293 kB)
[K |████████████████████████████████| 293 kB 55.5 MB/s
[?25hCollecting idna-ssl>=1.0
Downloading idna-ssl-1.1.0.tar.gz (3.4 kB)
Collecting multidict<7.0,>=4.5
Downloading multidict-5.1.0-cp36-cp36m-manylinux2014_x86_64.whl (141 kB)
[K |████████████████████████████████| 141 kB 49.6 MB/s
[?25hRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.6/dist-packages (from aiohttp->ray[tune]~=1.0.0->biome-text) (20.3.0)
Collecting async-timeout<4.0,>=3.0
Downloading async_timeout-3.0.1-py3-none-any.whl (8.2 kB)
Collecting hiredis
Downloading hiredis-1.1.0-cp36-cp36m-manylinux2010_x86_64.whl (61 kB)
[K |████████████████████████████████| 61 kB 7.4 MB/s
[?25hCollecting Mako
Downloading Mako-1.1.4.tar.gz (479 kB)
[K |████████████████████████████████| 479 kB 46.4 MB/s
[?25hCollecting python-editor>=0.3
Downloading python_editor-1.0.4-py3-none-any.whl (4.9 kB)
Collecting immutables>=0.9
Downloading immutables-0.15-cp36-cp36m-manylinux1_x86_64.whl (100 kB)
[K |████████████████████████████████| 100 kB 10.1 MB/s
[?25hRequirement already satisfied: nvidia-ml-py3>=7.352.0 in /usr/local/lib/python3.6/dist-packages (from gpustat->ray[tune]~=1.0.0->biome-text) (7.352.0)
Collecting blessings>=1.6
Downloading blessings-1.7-py3-none-any.whl (18 kB)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->captum~=0.2.0->biome-text) (2.4.7)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->captum~=0.2.0->biome-text) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->captum~=0.2.0->biome-text) (1.3.1)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (1.4.3)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.6.0)
Requirement already satisfied: testpath in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.4.4)
Requirement already satisfied: bleach in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (3.3.0)
Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.8.4)
Requirement already satisfied: webencodings in /usr/local/lib/python3.6/dist-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.5.1)
Requirement already satisfied: google-api-core<2.0.0,>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from opencensus->ray[tune]~=1.0.0->biome-text) (1.16.0)
Collecting opencensus-context==0.1.2
Downloading opencensus_context-0.1.2-py2.py3-none-any.whl (4.4 kB)
Requirement already satisfied: google-auth<2.0dev,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (1.25.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (1.52.0)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (4.2.1)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (0.2.8)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (0.4.8)
Requirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (1.10.0)
Requirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (8.7.0)
Requirement already satisfied: pluggy<0.8,>=0.5 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (0.7.1)
Requirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (1.4.0)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers<4.1,>=4.0->allennlp~=1.3.0->biome-text) (1.0.0)
Building wheels for collected packages: overrides, flatdict, jsonnet, databricks-cli, sqlalchemy, idna-ssl, alembic, contextvars, gpustat, Mako, prometheus-flask-exporter, sacremoses
Building wheel for overrides (setup.py) ... [?25l[?25hdone
Created wheel for overrides: filename=overrides-3.1.0-py3-none-any.whl size=10175 sha256=3a7538da9d27180b4bc6ecb7c33268bbe2969dc082d3d0fa1fe72b6a49cd6450
Stored in directory: /root/.cache/pip/wheels/e6/3b/34/ae59fc8d35c37f01099425ab73599e45e9b9b599a7ccc2c45f
Building wheel for flatdict (setup.py) ... [?25l[?25hdone
Created wheel for flatdict: filename=flatdict-4.0.1-py3-none-any.whl size=6931 sha256=0dd28f38f87788a9a9d3ea59ca745c3a235d3f75b47263e6161e7f52262f5d52
Stored in directory: /root/.cache/pip/wheels/8c/0b/50/16de22650bd1c28ef15caa790b1b61847aef6c1b57fcb5fe3a
Building wheel for jsonnet (setup.py) ... [?25l[?25hdone
Created wheel for jsonnet: filename=jsonnet-0.17.0-cp36-cp36m-linux_x86_64.whl size=3387860 sha256=3181e05711275244d03128d7e730c8c5a5ebb33c45a7da5906d650de4e630eda
Stored in directory: /root/.cache/pip/wheels/7e/ad/c9/995f065cc9d62d8a6e39ed458050b2c085429afc651e62bf73
Building wheel for databricks-cli (setup.py) ... [?25l[?25hdone
Created wheel for databricks-cli: filename=databricks_cli-0.14.1-py3-none-any.whl size=100577 sha256=277971e85d745198bc798c03343c24964b27493c823ad055d23214db96c9105c
Stored in directory: /root/.cache/pip/wheels/36/2c/2e/09bcfa0bdb7005b96213ff0967f9ab2697b8d07196d1edeeeb
Building wheel for sqlalchemy (setup.py) ... [?25l[?25hdone
Created wheel for sqlalchemy: filename=SQLAlchemy-1.3.13-cp36-cp36m-linux_x86_64.whl size=1217230 sha256=39fe531ca08cc2387ccf0c02554e9841467ac60bed7744d4cf232fbcaef3933c
Stored in directory: /root/.cache/pip/wheels/28/3e/f9/8eca04781258bb6956ffba37e4e6e6951e5b3a16d4494b91cb
Building wheel for idna-ssl (setup.py) ... [?25l[?25hdone
Created wheel for idna-ssl: filename=idna_ssl-1.1.0-py3-none-any.whl size=3160 sha256=69ae8b8ec4f392f494b6e9a2ef3a0d2c53109ad046bea891edfe99778ae9da05
Stored in directory: /root/.cache/pip/wheels/6a/f5/9c/f8331a854f7a8739cf0e74c13854e4dd7b1af11b04fe1dde13
Building wheel for alembic (setup.py) ... [?25l[?25hdone
Created wheel for alembic: filename=alembic-1.5.4-py2.py3-none-any.whl size=156314 sha256=5aa2c51659c3db05860f1345ac45f4c243c51a3ca2371b1185edcced42551605
Stored in directory: /root/.cache/pip/wheels/97/72/33/933963de9d1c3bb66e4442a9fd0726e0082ea361a87d7ec815
Building wheel for contextvars (setup.py) ... [?25l[?25hdone
Created wheel for contextvars: filename=contextvars-2.4-py3-none-any.whl size=7665 sha256=a82def709b5036d5ca919d2f4e8ea663fd151a60e431b2a4e8c2cc3c18edf576
Stored in directory: /root/.cache/pip/wheels/41/11/53/911724983aa48deb94792432e14e518447212dd6c5477d49d3
Building wheel for gpustat (setup.py) ... [?25l[?25hdone
Created wheel for gpustat: filename=gpustat-0.6.0-py3-none-any.whl size=12617 sha256=7c0953fb444bb64b127ffe14c86aaa870c1e4d9c4939c7a8107e906f03f5176d
Stored in directory: /root/.cache/pip/wheels/50/da/35/fe2cfb3bc47822299f5e124a599d56f00b30ec0b328db16b9f
Building wheel for Mako (setup.py) ... [?25l[?25hdone
Created wheel for Mako: filename=Mako-1.1.4-py2.py3-none-any.whl size=75675 sha256=757f954d5cef850071c2e008ca25bde8bc22dd2772d249b8483f47a20142cb6b
Stored in directory: /root/.cache/pip/wheels/3c/ee/c2/9651c6b977f9d2a1bb766970d190f71213e2ca47b36d8dc488
Building wheel for prometheus-flask-exporter (setup.py) ... [?25l[?25hdone
Created wheel for prometheus-flask-exporter: filename=prometheus_flask_exporter-0.18.1-py3-none-any.whl size=17158 sha256=bb2b7a6eedc2f4ddd48f52c9b46f1b40c4e58ee5ff005ce45266091aaac381a9
Stored in directory: /root/.cache/pip/wheels/fe/70/a9/22af6c68f513e58533fb7fd649f4cc5e2a27c24422a41a1bfa
Building wheel for sacremoses (setup.py) ... [?25l[?25hdone
Created wheel for sacremoses: filename=sacremoses-0.0.43-py3-none-any.whl size=893258 sha256=252cc74ff717686c582999e941ced9c01322b71bf10d4938c9fad68e408c53fe
Stored in directory: /root/.cache/pip/wheels/49/25/98/cdea9c79b2d9a22ccc59540b1784b67f06b633378e97f58da2
Successfully built overrides flatdict jsonnet databricks-cli sqlalchemy idna-ssl alembic contextvars gpustat Mako prometheus-flask-exporter sacremoses
Installing collected packages: urllib3, multidict, immutables, yarl, soupsieve, jmespath, idna-ssl, contextvars, async-timeout, tqdm, smmap, opencensus-context, isodate, hiredis, botocore, blessings, beautifulsoup4, aiohttp, websocket-client, tokenizers, thinc, sqlalchemy, sacremoses, s3transfer, redis, python-editor, py-spy, opencensus, msrest, Mako, gpustat, gitdb, cryptography, colorful, colorama, azure-core, aioredis, aiohttp-cors, zope.interface, zope.event, xxhash, websockets, uvloop, transformers, tensorboardX, starlette, spacy, sentencepiece, ray, querystring-parser, pydantic, pyarrow, prometheus-flask-exporter, overrides, jsonpickle, jsonnet, httptools, h11, gunicorn, greenlet, gorilla, gitpython, fsspec, docker, databricks-cli, boto3, azure-storage-blob, alembic, xlrd, uvicorn, s3fs, mlflow, lxml, ipywidgets, gevent, flatdict, flask-cors, fastapi, elasticsearch, distributed, datasets, captum, cachey, allennlp, biome-text
Attempting uninstall: urllib3
Found existing installation: urllib3 1.24.3
Uninstalling urllib3-1.24.3:
Successfully uninstalled urllib3-1.24.3
Attempting uninstall: tqdm
Found existing installation: tqdm 4.41.1
Uninstalling tqdm-4.41.1:
Successfully uninstalled tqdm-4.41.1
Attempting uninstall: beautifulsoup4
Found existing installation: beautifulsoup4 4.6.3
Uninstalling beautifulsoup4-4.6.3:
Successfully uninstalled beautifulsoup4-4.6.3
Attempting uninstall: thinc
Found existing installation: thinc 7.4.0
Uninstalling thinc-7.4.0:
Successfully uninstalled thinc-7.4.0
Attempting uninstall: sqlalchemy
Found existing installation: SQLAlchemy 1.3.23
Uninstalling SQLAlchemy-1.3.23:
Successfully uninstalled SQLAlchemy-1.3.23
Attempting uninstall: spacy
Found existing installation: spacy 2.2.4
Uninstalling spacy-2.2.4:
Successfully uninstalled spacy-2.2.4
Attempting uninstall: pyarrow
Found existing installation: pyarrow 0.14.1
Uninstalling pyarrow-0.14.1:
Successfully uninstalled pyarrow-0.14.1
Attempting uninstall: xlrd
Found existing installation: xlrd 1.1.0
Uninstalling xlrd-1.1.0:
Successfully uninstalled xlrd-1.1.0
Attempting uninstall: lxml
Found existing installation: lxml 4.2.6
Uninstalling lxml-4.2.6:
Successfully uninstalled lxml-4.2.6
Attempting uninstall: ipywidgets
Found existing installation: ipywidgets 7.6.3
Uninstalling ipywidgets-7.6.3:
Successfully uninstalled ipywidgets-7.6.3
Attempting uninstall: distributed
Found existing installation: distributed 1.25.3
Uninstalling distributed-1.25.3:
Successfully uninstalled distributed-1.25.3
[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.[0m
Successfully installed Mako-1.1.4 aiohttp-3.7.3 aiohttp-cors-0.7.0 aioredis-1.3.1 alembic-1.5.4 allennlp-1.3.0 async-timeout-3.0.1 azure-core-1.11.0 azure-storage-blob-12.7.1 beautifulsoup4-4.9.3 biome-text-2.0.0 blessings-1.7 boto3-1.17.8 botocore-1.20.8 cachey-0.2.1 captum-0.2.0 colorama-0.4.4 colorful-0.5.4 contextvars-2.4 cryptography-3.4.5 databricks-cli-0.14.1 datasets-1.1.3 distributed-2.17.0 docker-4.4.2 elasticsearch-7.1.0 fastapi-0.55.1 flask-cors-3.0.10 flatdict-4.0.1 fsspec-0.8.5 gevent-20.9.0 gitdb-4.0.5 gitpython-3.1.13 gorilla-0.3.0 gpustat-0.6.0 greenlet-1.0.0 gunicorn-20.0.4 h11-0.9.0 hiredis-1.1.0 httptools-0.1.1 idna-ssl-1.1.0 immutables-0.15 ipywidgets-7.5.1 isodate-0.6.0 jmespath-0.10.0 jsonnet-0.17.0 jsonpickle-2.0.0 lxml-4.5.2 mlflow-1.9.1 msrest-0.6.21 multidict-5.1.0 opencensus-0.7.12 opencensus-context-0.1.2 overrides-3.1.0 prometheus-flask-exporter-0.18.1 py-spy-0.3.4 pyarrow-3.0.0 pydantic-1.7.3 python-editor-1.0.4 querystring-parser-1.2.4 ray-1.0.1.post1 redis-3.4.1 s3fs-0.4.2 s3transfer-0.3.4 sacremoses-0.0.43 sentencepiece-0.1.95 smmap-3.0.5 soupsieve-2.2 spacy-2.3.5 sqlalchemy-1.3.13 starlette-0.13.2 tensorboardX-2.1 thinc-7.4.5 tokenizers-0.9.4 tqdm-4.49.0 transformers-4.0.1 urllib3-1.25.11 uvicorn-0.11.8 uvloop-0.14.0 websocket-client-0.57.0 websockets-8.1 xlrd-1.2.0 xxhash-2.0.0 yarl-1.6.3 zope.event-4.5.0 zope.interface-5.2.0
###Markdown
Downloading the dataHere we download the preprocessed data and the pre trained word vectors.
###Code
!git clone https://github.com/recognai/cantemist-ner/
!wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.es.300.vec.gz
###Output
--2021-02-16 09:26:27-- https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.es.300.vec.gz
Resolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 104.22.75.142, 104.22.74.142, 172.67.9.4, ...
Connecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|104.22.75.142|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1285580896 (1.2G) [binary/octet-stream]
Saving to: ‘cc.es.300.vec.gz’
cc.es.300.vec.gz 100%[===================>] 1.20G 11.8MB/s in 1m 45s
2021-02-16 09:28:13 (11.7 MB/s) - ‘cc.es.300.vec.gz’ saved [1285580896/1285580896]
###Markdown
Training the system
###Code
from biome.text import Pipeline, Dataset, VocabularyConfiguration, TrainerConfiguration
###Output
_____no_output_____
###Markdown
Loading the datasetsThese datasets were created using the *NER_dataprep.ipynb* and *NER_dataprep_test.ipynb* notebooks in our [cantemist-ner](https://github.com/recognai/cantemist-ner) repo.
###Code
train_ds = Dataset.from_json("cantemist-ner/data/NER/train_full.json")
# This test dataset was released after the competition ended:
test_ds = Dataset.from_json("cantemist-ner/data/NER/gold_test.json")
# for biome.text v2 we need to rename the "labels" column containing the NER tags
train_ds.rename_column_("labels", "tags")
test_ds.rename_column_("labels", "tags")
###Output
_____no_output_____
###Markdown
Defining the pipelineThe architecture and hyerparameters were found by means of a random search HPO.
###Code
pipeline_config = {
'name': 'candemist-ner-first-hpo',
'features': {
'word': {
'embedding_dim': 300,
'lowercase_tokens': True,
'trainable': True,
'weights_file': "/content/cc.es.300.vec.gz"
},
'char': {
'embedding_dim': 64,
'lowercase_characters': True,
'encoder': {
'bidirectional': True,
'hidden_size': 128,
'num_layers': 1,
'type': 'gru'
},
'dropout': 0.16517050992687604
},
},
'encoder': {
'bidirectional': True,
'hidden_size': 512,
'input_size': 556,
'num_layers': 1,
'type': 'lstm'
},
'head': {
'dropout': 0.2689579604286324,
'labels': ['MORFOLOGIA_NEOPLASIA'],
'type': 'TokenClassification'
},
}
pl = Pipeline.from_config(pipeline_config)
###Output
[38;5;2m✔ Download and installation successful[0m
You can now load the model via spacy.load('en_core_web_sm')
[38;5;2m✔ Linking successful[0m
/usr/local/lib/python3.6/dist-packages/en_core_web_sm -->
/usr/local/lib/python3.6/dist-packages/spacy/data/en
You can now load the model via spacy.load('en')
###Markdown
Defining the vocabularyOnly include words that appear at least two times in the `train_ds` dataset.
###Code
vocab_config = VocabularyConfiguration(
datasets=[train_ds], min_count={"word": 2}
)
###Output
_____no_output_____
###Markdown
Defining the trainerThe hyerparameters were found by means of a random search HPO.
###Code
trainer_dict={
"optimizer": {
"type": "adamw",
"lr": 0.0038931174186587806,
"weight_decay": 0.01,
},
"learning_rate_scheduler": {
"type": "step",
"step_size":2,
"gamma":0.1
},
"batch_size": 32,
"num_epochs": 4,
"validation_metric": "+f1-measure-overall",
"patience":3
}
trainer_config = TrainerConfiguration(**trainer_dict)
###Output
_____no_output_____
###Markdown
Training the pipeline
###Code
pl.train(
output="output",
training=train_ds,
test=test_ds,
trainer=trainer_config,
vocab_config=vocab_config,
)
###Output
2021-02-16 09:40:03,935 - allennlp.data.vocabulary - INFO - Fitting token dictionary from dataset.
building vocab: 0it [00:00, ?it/s]
###Markdown
Appendix: BETO model with an f1 score of 0.861In a quick follow-up work, we experimented with the pretrained "*BETO: Spanish Bert*" model, used by the winner (f1: 0.87) and the runner-up (f1: 0.869) of the Cantemist NER competition, and were able to achieve similar results.
###Code
pipeline_dict = {
"name": "",
"features": {
"transformers": {
"model_name": "dccuchile/bert-base-spanish-wwm-cased",
#"mismatched": True, # False for wordpiece tokens, True for word tokens
"trainable": True,
"max_length": 512
},
},
"head": {
"type": "TokenClassification",
"labels": ["MORFOLOGIA_NEOPLASIA"]
}
}
pl = Pipeline.from_config(pipeline_dict)
# The hyperparameters were found by means of a random search HPO
trainer_dict = {
"optimizer": {
"type": "adamw",
"lr": 3e-5
},
"batch_size": 8,
"patience": 2,
"num_epochs": 8,
"validation_metric" : "+f1-measure-overall",
"learning_rate_scheduler": {
"type": "linear_with_warmup",
"num_epochs": 8,
"num_steps_per_epoch": 4485,
"warmup_steps": 100,
}
}
trainer = TrainerConfiguration(**trainer_dict)
pl.train(
output="output_beto",
training=train_ds,
test=test_ds,
trainer=trainer,
)
###Output
_____no_output_____ |
notebooks/MILP_for_experimental_design.ipynb | ###Markdown
Design of Complex Neuroscience Experiments using Mixed Integer Linear ProgrammingThis notebook provides example implementations for the four case studies in the article ["Design of Complex Experiments using Mixed Integer Linear Programming"](https://arxiv.org/abs/2012.02361). Each case study aims to demonstrate how Mixed Integer Linear Programming (MILP) can be used to address real-world experimental design challenges. This notebook reproduces the figures related to each case study. Full details of each case study can be found in the main article. The article also contains an introduction to the mathematical foundations of MILP. Code Organization The `milp` PackageFor clarity, some code has been has been omitted from this notebook and placed in the `milp` python package. This includes code related to solving each program and visualizing each program solution. The `milp` package can be found in the root directory of this repository and can be installed using the included `setup.py` file. The `milp` WorkflowEach example is solved by creating mixed integer linear programs. Each mixed integer linear program is constructed using the same basic workflow: 1. Initialize Program`milp.program.initialize_program()` initializes a dictionary that represents a mixed integer linear program. This dictionary will be updated to include the program's variables, linear constraints, and cost function terms.```pythonimport milpprogram = milp.program.initialize_program()``` 2. Add Variables`milp.program.add_variable()` adds a variable to a program. The name given to each variable will be used to specify its constraints and cost function coefficients. Whether a varaible is real, integer, or boolean can be specified by setting `variable_type` to `float`, `int`, or `bool`.```pythonmilp.program.add_variable( program=program, name='a', variable_type=bool,)milp.program.add_variable( program=program, name='b', variable_type=int, lower_bound=float('-inf'), upper_bound=float('inf'),)milp.program.add_variable( program=program, name='c', variable_type=float,)``` 3. Add Linear Constraints`milp.program.add_constraint()` adds a linear constraint to program. An equality constraint can be specified by using arguments `A_eq` and `b_eq`. The value of `A_eq` should be a dictionary whose keys are variable names and whose values are coefficients of those variables. Inequality constraints can be specified by using `A_lt` and `b_lt`. ```pythonmilp.program.add_constraint( program=program, A_eq={'a': 1, 'b': 1}, b_eq=0,)milp.program.add_constraint( program=program, A_lt={'a': 1, 'b': 2, 'c': 3}, b_lt=3,)``` 4. Specify Cost Function`milp.program.add_cost_terms` is used to specify the program's cost function. The value of `coefficients` should be a dictionary whose keys are variable names and whose values are coefficients of those variables.```pythonmilp.program.add_cost_terms( program=program, coefficients={'a': -1, 'b': 1},)``` 5. Solve Program`milp.program.solve_program()` solves the program using an external library (by default uses [Gurobi](https://www.gurobi.com)). It does this by 1) converting the program into the library-compatible representation, 2) running its solver, and 3) returning the solution. This solution specifies an experimental design that optimally conforms to the design constraints of the program.```pythonsolution = milp.program.solve_MILP(program=program)print(solution['variables'])``````> {'a': True, 'b': -1, 'c': 0.0}```Taken together these code snippets have represented and solved the following simple program:$$\min{b - a} \\a + b = 0 \\a + 2 b + 3 c \leq 3 \\\\a \in \mathbb{B}, b \in \mathbb{Z}, c \in \mathbb{R}^+$$Some additional functions are used to implement the design patterns discussed in **Section 3.2** of the main article. For further details refer to the source code and docstrings of `milp`. Software Licensing- All code in this repository, including the code in the `milp` package and the code in this Jupyter notebook, is licensed under a BSD 2-clause license.- By default the `milp` package solves programs using the external library [Gurobi](https://www.gurobi.com). Gurobi offers free academic licenses, available [here](https://www.gurobi.com/downloads/end-user-license-agreement-academic/). Instructions for installing Gurobi can be found on the Gurobi website.- Alternative solvers can be used instead of Gurobi. The `milp` also contains an adapter for [CPLEX](https://www.ibm.com/analytics/cplex-optimizer), used by specifying `solve_MILP(..., solver='cplex')`.
###Code
import random
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import milp
from milp.examples import balanced_grouping
from milp.examples import stimulus_task_pairing
from milp.examples import structured_sampling
from milp.examples import trajectory_design
# setup plot formatting
%matplotlib inline
milp.formatting.setup_plot_formatting()
# initialize software license
milp.program.initialize_license()
###Output
Using license file /home/jlg/storm/bin/gurobi902/gurobi.lic
Academic license - for non-commercial use only
###Markdown
Example Problem 1: Balanced GroupingA common challenge when designing experiments is to distribute conditions across blocks or sessions in a balanced way. Perhaps a researcher requires that each block of the experiment must have a similar number of exemplars of some stimulus category. Perhaps a researcher must collect data on a large battery of tasks, and the task distribution should be similar across each scanning session. Perhaps a researcher needs to evenly distribute content across trials, blocks, and sessions simultaneously. All of these scenarios can be addressed by MILP in a similar manner. Design SpecificationsIn this first example we will cover the simple case of trying to balance the mean luminance of visual stimuli across scanning runs. Suppose a researcher is designing a vision study using 360 short videos. Each video is 20 seconds long and has its own mean luminance. For data collection, the videos must be grouped into 12 separate runs of 10 minutes each, with each video appearing in exactly one run. Importantly, the researcher plans to perform an analysis that requires all of the runs to have the same mean luminance. Although this may seem like a trivial requirement, it can be extremely challenging in practice. There are 10^367 possible groupings that split 360 items into 12 equally sized groups. This space is far too large to test every possible grouping. The solution space is also discrete, so gradient-based methods cannot be used. **Figure 2B** shows a synthetic distribution of luminances that we have created for the videos. MILP FormulationIt is straightforward to express this problem as a mixed integer linear program. For notation, let $V$ be the number of videos, $R$ be the number of runs, $v$ be the index over videos, and $r$ be the index over runs. The first step is to define the variables of the solution space. Introduce binary variables $X$ to represent possible pairings between runs and videos:> **VARIABLES**$$X \in \mathbb{B}^{V \times R} \\$$$$\begin{aligned}x_{v,r} &= 1 \rightarrow \text{ video } v \text{ is in run } r \\ &= 0 \rightarrow \text{ video }v \text{ is not in run } r\end{aligned}$$All of the desired grouping properties in this problem can be expressed as linear equalities over elements of $X$. To specify that each run must contain $V / R = 30$ videos, introduce a constraint for each run:> **CONSTRAINTS**$$\sum_v x_{v,r} = 30 \quad \forall \ r \in \{1, ..., R\}$$To specify that each video appears in exactly one run, introduce a constraint for each video:> **CONSTRAINTS**$$\sum_r x_{v,r} = 1 \quad \forall \ v \in \{1, ..., V\}$$Any value of $X$ that satisfies all of the above constraints is a feasible grouping that produces a valid experiment.A cost function can now be designed to select the experiment that best balances luminance. Assume that the mean luminance of each video is stored in a vector $f$. This vector is predetermined by the video dataset. Use a cost function that is the sum of absolute deviations between the global mean and the mean of each run.> **COST FUNCTION**$$\begin{aligned}\mu &= \frac{1}{V} \sum_{v} f_v \\&= \text{mean luminance across all videos} \\\mu_r &= \frac{R}{V} \sum_v f_v \ x_{v,r} \\&= \text{mean luminance within run } r \\\end{aligned}$$$$ $$$$\min \sum_r | \mu - \mu_r |$$
###Code
def create_group_balance_problem(I, G, F):
"""create program to solve group balance problem
## Parameters
- I: int number of items
- G: int number of groups
- F: (n_features, I)-shaped array of features
"""
s = int(I / float(G))
if s * G != I:
raise Exception('G does not evenly divide I')
program = milp.program.initialize_program()
# decision variables
for i in range(I):
for g in range(G):
milp.program.add_variable(
program=program,
name='X_{i},{g}'.format(i=i, g=g),
variable_type=bool,
)
# each item should be placed in one group
for i in range(I):
coefficients = {'X_{i},{g}'.format(i=i, g=g): 1 for g in range(G)}
milp.program.add_constraint(program, A_eq=coefficients, b_eq=1)
# groups should have sizes s1, ..., sg
for g in range(G):
coefficients = {'X_{i},{g}'.format(i=i, g=g): 1 for i in range(I)}
milp.program.add_constraint(program, A_eq=coefficients, b_eq=s)
# mean group features
for f in range(F.shape[0]):
for g in range(G):
m_name = 'm_{f},{g}'.format(f=f, g=g)
milp.program.add_variable(
program=program,
name=m_name,
variable_type=float,
)
coefficients = {
'X_{i},{g}'.format(i=i, g=g): F[f, i]
for i in range(I)
}
coefficients[m_name] = -s
milp.program.add_constraint(
program=program,
A_eq=coefficients,
b_eq=0,
)
# global means
global_means = []
for f in range(F.shape[0]):
global_mean = (1 / float(I)) * sum(F[f, i] for i in range(I))
global_means.append(global_mean)
milp.program.store_constant(
program,
'global_feature_mean_' + str(f), global_means[-1],
)
# balance terms in cost function
for f in range(F.shape[0]):
for g in range(G):
coefficients = {'m_{f},{g}'.format(f=f, g=g): 1}
milp.program.add_abs_cost_term(
program=program,
coefficients=coefficients,
constant=-global_means[f],
)
return program
###Output
_____no_output_____
###Markdown
create program
###Code
np.random.seed(0)
I = 360
G = 12
n_f = 1
# power distribution
F = np.zeros([n_f, I])
F[0, :] = 1 - np.random.power(3, size=(I))
F = (F * 1000)
F_reduce = F.astype(int)
program = create_group_balance_problem(
I=I,
G=G,
F=F_reduce,
)
###Output
_____no_output_____
###Markdown
solve program
###Code
solution = milp.program.solve_MILP(program)
X_solution = milp.program.get_solution_variable(solution, 'X')
m_solution = milp.program.get_solution_variable(solution, 'm')
global_feature_mean = milp.program.get_solution_constant(
solution,
'global_feature_mean',
)
print()
print('X_solution', X_solution.shape)
###Output
program size:
- n_variables: 4344
- n_constraints: 408
- n_cost_function_terms: 12
X_solution (360, 12)
###Markdown
find randomized solutions
###Code
# n = 1e6
n = 1e8
X_solution = X_solution.T.copy().T
balanced_grouping.set_comparison_context(F, X_solution, F.mean(1))
best_randomization = balanced_grouping.compute_randomization_solutions(
F,
n,
processes=20,
)
randomization_ds = best_randomization['randomization_ds']
###Output
_____no_output_____
###Markdown
visualize result
###Code
balanced_grouping.plot_grouping_separate_plots(
F,
X_solution,
randomization_ds,
n=n,
figsize=[6, 6],
randomization_axis=[-50, 650, 0, n / 7.0],
show_title=True,
show_labels=True,
show_legend=True,
)
###Output
Figure 2B:
###Markdown
How much better is MILP?
###Code
X_groups = [np.nonzero(X_solution[:, g])[0] for g in range(G)]
global_feature_means = F.mean(1)
group_evaluation = balanced_grouping.evaluate_groups(
X_groups,
F,
global_feature_means
)
d_milp = group_evaluation['d_total']
ratio = best_randomization['d'] / d_milp
print(
'the MILP solution has',
ratio,
'times smaller error than the best randomized solution',
)
###Output
the MILP solution has 116.23892083863578 times smaller error than the best randomized solution
###Markdown
**Figure 2C** and **Figure 2D** show a solution to this program. As guaranteed by the MILP solver, this solution achieves the lowest cost function value of any X in the feasible set. **Figure 2C** shows the degree to which the mean luminance of each run deviates from the global mean luminance. This value is small compared to the overall range of luminances. **Figure 2D** compares the solution found using MILP to 108 solutions found using randomization. Each randomization solution is generated by simply shuffling the videos and then splitting into 12 sequential groups. The graph shows that the solution found using MILP is substantially more balanced than any solution that can be found by randomization. The best randomized solution is the one with the lowest total balance error. As shown in in Figure 2D, the best randomized solution still has a total balance error that is 116 times larger than the solution found using MILP. Example Problem 2: Stimulus-Task PairingAnother common neuroimaging challenge is designing top-down attention experiments. In these studies, the subject’s attention state is varied systematically to demonstrate the effect of attention on brain activity. If similar stimuli are presented during each attention condition, then attention is assumed to be the operative factor underlying any differences in brain activity.Visual attention experiments are typically limited to a small number of attention conditions. Each additional condition requires more data to be collected and also adds complexity to the overall design. Each attention condition in an experiment might also have its own idiosyncratic requirements. For example, some visual tasks (e.g. object identification) can only be performed on certain types of images (e.g. images containing objects). These experiments must be carefully balanced to ensure that the only meaningful difference across each condition is the subject's attentional state. In this case study, we demonstrate how MILP can address these challenges to create designs that have large numbers of attention conditions. Suppose a researcher is designing a visual attention experiment in a manner similar to (Clark et al. 1997), (O'Craven et al. 1999), or (Harel et al. 2014). In each of these studies, the subject’s attention state is varied independently from a visual stimulus to demonstrate the effect of attention on brain activity. These studies used 2, 3, and 6 attention states, respectively. For this example, suppose the researcher would like to significantly increase the number of attention conditions in an attempt to build a richer and more complete model of how attention affects brain activity. Design SpecificationsThe researcher would like the experiment to consist of many individual trials. During each trial, the subject will first be cued with a visual search target, such as an object category, a scene category, or a color. Then, an image will briefly flash. Finally, the subject will have a response period to indicate whether they detected the search target in the image. The researcher has allotted time for 2800 trials evenly split across 14 different search conditions, resulting in 200 trials per condition.The researcher has a stimulus dataset of 700 images. Each trial will use one of these images. Each of the images has been labeled along each of the 14 different search dimensions with one of three values. A “0” indicates that the image definitely does not contain the search target, a “1” indicates that the image might contain the search target, and a “2” indicates that the image definitely contains the search target. For this example we will create a synthetic dataset of image labels generated from a multinomial distribution where feature values [0, 1, 2] have probabilities [0.5, 0.25, 0.25].The main challenge that the researcher faces is deciding which images to use with each attention condition. There are three different types of balance that the researcher would like to impose. First, to reduce the effect of the subject memorizing the images, the researcher would like each image to appear in an equal number of trials throughout the experiment. This results in 4 trials per image (= 2800 trials / 700 images). Each image should also be paired with each attention condition no more than once. Second, the researcher would like to control for the effects of target detection (Guo et al. 2012; Çukur et al. 2013). To this end, the trials within each task should be evenly balanced across the 3 detection levels, meaning that ⅓ of trials should definitely contain the search target, ⅓ should ambiguously contain the search target, and ⅓ should definitely not contain the search target. Finally, to ensure that the stimulus feature distributions are similar across tasks, the researcher would like the feature distribution of each task to resemble the global feature distribution. More specifically, the mean value of each of the 14 features should be approximately equal across conditions. MILP FormulationThis problem can be seen as a variant of the previous balanced grouping problem in **Section 4.1** where the groups are now task conditions rather than runs. The main differences are that: 1) each group has its own unique constraints, 2) stimuli are allowed to appear in more than 1 group, and 3) multiple features are being balanced across groups.Use $i$ to index images, $t$ to index tasks, and $f$ to index stimulus features. The main variable of interest is the pairing of stimuli with tasks. Introduce binary variables $X$ to represent these pairings:> **VARIABLES**$$X \in \mathbb{B}^{700 \times 14} \\$$$$\begin{aligned}x_{i,t} &= 1 \rightarrow \text{ image } i \text{ is paired with task } t \\ &= 0 \rightarrow \text{ image } i \text{ is not paired with task } t\end{aligned}$$It is simple to constrain each task to have the same number of trials (2800 / 14 = 200)> **CONSTRAINTS**$$\sum_i x_{i,t} = 200 \quad \quad \quad \forall \ t \in \{ 1, ..., 14 \}$$It is also simple to require that all images appear an equal number of times throughout the experiment. For 2800 trials and 700 images, each image should be used 2800 / 700 = 4 times.> **CONSTRAINTS**$$\sum_t x_{i,t} = 4 \quad \quad \quad \forall \ i \in \{ 1, ..., 700 \}$$For each task, there should be an equal number of trials where the search target is present, ambiguous, or absent. Thus each of these three feature levels should have 200 × ⅓ = 66.67 trials. Since this is not an integer, constraints can be constructed using the integral floor and ceiling of this number. Let $S_{t,v}$ be the set of stimulus indices that have feature value $v$ for task $t$.> **CONSTRAINTS**$$\begin{aligned}\sum_{i \in S_{t,v}} x_{i,t} \ge 66 \quad \quad \quad & \forall \ t \in \{1, ..., 14\} \text{ and } v \in \{ 1, 2, 3 \} \\\sum_{i \in S_{t,v}} x_{i,t} \le 67 \quad \quad \quad & \forall \ t \in \{1, ..., 14\} \text{ and } v \in \{ 1, 2, 3 \}\end{aligned}$$The final part of the program is a cost function that promotes a similar stimulus feature distribution within each task. Use a matrix $L$ to refer to the feature values of each stimulus, where $L_{i,t}$ is the value of feature $t$ for image $i$. Also, let $μ_f$ be the global mean value of feature $f$ across all images. $L$ and $μ_f$ are constants predetermined by the image dataset. Use a cost function that minimizes the deviations between the global feature means $μ_f$ and the feature means of each task $m_{f,t}$.> **COST FUNCTION**$$\begin{aligned}m_{f,t} &= \frac{1}{200} \sum_i x_{i,t}\ L_{i,f} \\ &= \text{mean value of feature } f \text{ across trials of task } t\end{aligned}$$$$\\$$$$\min \sum_{f,t \atop {f \neq t}} | m_{f,t} - \mu_f |$$
###Code
def create_stimulus_condition_pairing_problem(
n_trials,
L,
feature_probabilities,
):
"""create program to solve stimulus condition pairing problem
## Parameters
- n_trials: int number of items
- L: (n_features, n_images)-shaped array of stimulus tags
- feature_probabilities: dict of probably for each detection level
"""
n_features, n_stimuli = L.shape
n_tasks = n_features
program = milp.program.initialize_program()
# decision variables
for i in range(n_stimuli):
for g in range(n_tasks):
milp.program.add_variable(
program,
'X_{i},{g}'.format(i=i, g=g),
variable_type=bool,
)
trials_per_condition = int(n_trials / n_tasks)
assert trials_per_condition * n_tasks == n_trials
# constraint: match feature probabilities
for g in range(n_tasks):
for feature_value, feature_probability in feature_probabilities.items():
feature_count = feature_probability * trials_per_condition
variables_of_feature_value = {}
for i in range(n_stimuli):
if L[g, i] == feature_value:
variables_of_feature_value['X_{i},{g}'.format(i=i, g=g)] = 1
mode = 'ceil_floor'
if mode == 'exact':
assert feature_count == int(feature_count)
milp.program.add_constraint(
program,
A_eq=variables_of_feature_value,
b_eq=feature_count,
)
elif mode == 'ceil_floor':
ceil = int(np.ceil(feature_count))
floor = int(np.floor(feature_count))
milp.program.add_constraint(
program,
A_lt=variables_of_feature_value,
b_lt=ceil,
)
milp.program.add_constraint(
program,
A_lt={k: -v for k, v in variables_of_feature_value.items()},
b_lt=-floor,
)
else:
raise Exception(mode)
# hard constrain equal usages per stimulus
target_stimulus_repeats = (n_trials / float(n_stimuli))
milp.program.store_constant(
program=program,
name='target_stimulus_repeats',
value=target_stimulus_repeats,
)
for i in range(n_stimuli):
coefficients = {}
for g in range(n_tasks):
coefficients['X_{i},{g}'.format(i=i, g=g)] = 1
milp.program.add_constraint(
program,
A_eq=coefficients,
b_eq=target_stimulus_repeats,
)
# for each task, balance the feature distributions to the population mean
for g in range(n_tasks):
for f in range(n_features):
if g == f:
continue
coefficients = {}
for i in range(n_stimuli):
coefficients['X_{i},{g}'.format(i=i, g=g)] = L[f, i]
milp.program.add_abs_cost_term(
program,
coefficients=coefficients,
constant=-trials_per_condition * L[f, :].mean(),
)
return program
np.random.seed(0)
n_tasks = 14
n_stimuli = 700
n_trials = 2800
F_feature_values = [0, 1, 2]
F_feature_probabilities = [0.5, 0.25, 0.25]
L = np.random.choice(
F_feature_values,
p=np.array(F_feature_probabilities),
size=(n_tasks, n_stimuli),
)
n_feature_values = len(F_feature_values)
feature_probabilities = {
F_feature_values[i]: 1.0 / n_feature_values
for i in range(n_feature_values)
}
program = create_stimulus_condition_pairing_problem(
n_trials=n_trials,
L=L,
feature_probabilities=feature_probabilities,
)
###Output
_____no_output_____
###Markdown
solve program
###Code
solution = milp.program.solve_MILP(program)
X_solution = milp.program.get_solution_variable(solution, 'X')
print()
print('X_solution', X_solution.shape)
###Output
program size:
- n_variables: 9982
- n_constraints: 1148
- n_cost_function_terms: 182
X_solution (700, 14)
###Markdown
visualize result
###Code
print('MILP solution')
stimulus_task_pairing.plot_as_separate_figures(
X_solution,
L,
figsize=[6, 6],
show_title=True,
show_labels=True,
)
###Output
MILP solution
###Markdown
Example Problem 3: Structured Hierarchical SamplingAnother common neuroimaging design challenge is sampling stimuli from a highly structured space, such as the space of natural language. Such spaces are difficult to sample because samples must obey strict rules rather than being drawn from a simple probability distribution. Natural language stimuli cannot be generated by simply combining random words. For language to be intelligible, words must be jointly compatible in a meaningful way, obeying rules of grammar, syntax, and semantics. To sample from this type of space, one must be able to efficiently represent and navigate the rules of the space.In this example, suppose a researcher would like to generate natural language stimuli in the form of questions about concrete nouns. These questions will be used in an experiment that asks a large number of questions about a large number of concrete nouns, in a manner similar to (Sudre et al. 2012). In this previous study, the authors asked 20 questions about each of 60 nouns in approximately 1 hour of scanning time. In this example we will use a sparse sampling strategy to increase the scope of this experiment to 5x the number of nouns and 6x the number of questions while only using 2x the trials. We will also allow the researcher to specify rules about which nouns are semantically compatible with each question.Questions are formed by pairing a single concrete noun (e.g. a car, a phone, a flower) with a question template (e.g. How heavy is ? When was invented? What color is ?). Concrete nouns are organized into a semantic hierarchy (**Figure 4B**). Unlike the (Sudre et al. 2012) study, each question template is only applicable to nouns from a particular portion of this hierarchy (**Figure 4C**). For example, questions related to object affordances might only be compatible with nouns from the “Inanimate Object” portion of the tree. Questions related to social behavior might only be compatible with the “People” portion of the tree. The researcher would like to independently model the effects of questions and concrete nouns. To this end they will place constraints on how often each is sampled, and the manner in which they are allowed to combine. Design SpecificationsIn this example the researcher has allotted scanning time for 2400 trials. There are a total of 300 concrete nouns organized into a 15-group semantic hierarchy shown in **Figure 4B**. Each concrete noun exists in exactly one of the 10 leaf groups, and each leaf group contains exactly 30 concrete nouns. For each of the 15 example groups shown, the researcher has 8 question templates, for a total of 120 question templates. Each question template is applicable to some subset of the semantic hierarchy. Question templates related to a non-leaf group are compatible with any nouns for which that non-leaf group is an ancestor.The main challenge that the researcher faces is deciding which concrete nouns to pair with each question template. Each noun should be paired with each question template no more than 1 time, and all pairings should respect the compatibility constraints of the semantic hierarchy. Each of the 120 question templates should be used the same number of times across the 2400 trials. Because each noun is compatible with a different number of question templates, it is not possible to exactly balance the number of times each concrete noun is used. However, noun usage should still be balanced as much as possible. Finally, for question templates that are compatible with multiple leaf groups in the semantic hierarchy, the researcher would like to balance the number of times each template is paired with each compatible leaf group. MILP FormulationIndex question templates with $t$, concrete nouns with $n$, and noun leaf groups with $g$. Store information about template-group compatibility in a matrix $C$>$$C \in \mathbb{B} ^ {120 \times 10} \\$$$$\begin{aligned}c_{t, g} &= 1 \rightarrow \text{ question template } t \text{ compatible with group } g \\&= 0 \rightarrow \text{ question template } t \text{ not compatible with group } g\end{aligned}$$Store information about noun-group membership in a matrix M> $$M \in \mathbb{B} ^ {10 \times 300} \\$$$$\begin{aligned}m_{g, n} &= 1 \rightarrow \text{ noun } n \text{ is in group } g \\ &= 0 \rightarrow \text{ noun } n \text{ is not in group } g\end{aligned}$$Matrices $C$ and $M$ are constants that are predetermined by the given semantic tree.The main variables to be decided are pairings between question templates and concrete nouns. Introduce variables $P$ to represent these pairings> **VARIABLES** $$P \in \mathbb{B} ^ {120 \times 300} $$$$\begin{aligned}p_{t,n} &= 1 \rightarrow \text{ question } t \text{ is paired with noun } n \\ &= 0 \rightarrow \text{ question } t \text{ is not paired with noun } n\end{aligned}$$Pairs $(t, n)$ for which $(CM)_{t,n} = 0$ are invalid pairings. The corresponding $p_{t,n}$ variables can be set to $0$ to reduce the size of the program. Each question template should be used 2400 / 120 = 20 times> **CONSTRAINT**$$\sum_n p_{t,n} = 20 \quad \forall \ t \in \{1, ..., 20 \}$$The next step is to balance the number of times that each question template is paired with each of its compatible noun groups. The number of times that template $t$ is paired with group $g$ is given by an element of the matrix product $(PMT)_{t,g}$. Let $v_t$ be the number of noun groups that are compatible with template $t$. Since each question template is to be used 20 times, each question template $t$ should be used $(20 / v_t)$ times with each of its compatible noun groups. Since this quotient is not necessarily integral, $(PMT)_{t,g}$ can be constrained to fall within its integral floor and ceiling.> **CONSTRAINT**$$v_t = \sum_g c_{t,g} \\ \\$$$$\forall t \in \{ 1, ..., 120 \} \text{ and } g \in \{ 1, ..., 20 \} \\\begin{aligned}\quad \quad \quad \quad \quad \quad (PM^T)_{t,g} &\ge floor \left ( \frac{20}{v_t} \right ) \\\text{ and } \\\quad \quad \quad \quad \quad \quad (PM^T)_{t,g} &\leq ceil \left ( \frac{20}{v_t} \right )\end{aligned}$$Finally, a cost function can be created to balance the number of times each concrete noun is used. There are many ways this can be achieved. The number of times each noun is used is given by an. Suppose the researcher would like to avoid outliers, and thus wishes to minimize the range of noun occurrence counts. As explained in the **Section 3**, this range can be minimized as:> **COST FUNCTION**$$\begin{aligned}a_n &= \sum_t p_{t,n} \\ &= \text{number of times noun } n \text{ is used}\end{aligned}$$$$ \\ $$$$ \\ \\ \min \left \{ \max_n(a_n) - \min_n(a_n) \right \}$$
###Code
def create_structured_hierarchical_sampling_problem(
noun_semantic_tree,
n_total_trials,
n_question_templates,
n_nouns_per_leaf_group,
):
"""create program to solve structured hierarchical sampling problem
## Parameters
- noun_semantic_tree: dict of nested noun semantic tree
- n_total_trials: int number of total trials
- n_question_templates: int number of question templates
- n_nouns_per_leaf_group: int number of nouns per leaf group
"""
n_appearances_per_question_template = int(
n_total_trials / n_question_templates
)
C, M = structured_sampling.get_coding_matrices(
noun_semantic_tree=noun_semantic_tree,
n_total_trials=n_total_trials,
n_question_templates=n_question_templates,
n_nouns_per_leaf_group=n_nouns_per_leaf_group,
)
CM = C.dot(M)
T, G = C.shape
G, N = M.shape
program = milp.program.initialize_program()
# decision variables
for t in range(T):
for n in range(N):
if CM[t, n]:
milp.program.add_variable(
program,
'P_{t},{n}'.format(t=t, n=n),
variable_type=bool,
)
# number of times each question template is used
for t in range(T):
A_eq = {}
for variable in milp.program.get_variables(program, 'P', t, None):
A_eq[variable] = 1
milp.program.add_constraint(
program,
A_eq=A_eq,
b_eq=n_appearances_per_question_template,
)
# minimize max(noun_usage) - min(noun_usage)
milp.program.add_variable(program, 'maximum_noun_usage', int)
milp.program.add_variable(program, 'minimum_noun_usage', int)
for n in range(N):
coefficients = {'maximum_noun_usage': -1}
coefficients_negative = {'minimum_noun_usage': 1}
for variable in milp.program.get_variables(program, 'P', None, n):
coefficients[variable] = 1
coefficients_negative[variable] = -1
milp.program.add_constraint(program, A_lt=coefficients, b_lt=0)
milp.program.add_constraint(program, A_lt=coefficients_negative, b_lt=0)
milp.program.add_cost_terms(
program=program,
coefficients={'maximum_noun_usage': 1},
)
milp.program.add_cost_terms(
program=program,
coefficients={'minimum_noun_usage': -1},
)
# balance number of times each group is paired with each question
for t in range(T):
n_template_groups = C[t, :].sum()
target_pairings_per_group = (
n_appearances_per_question_template / float(n_template_groups)
)
for g in range(G):
if C[t, g]:
coefficients = {}
for n in range(N):
if M[g, n]:
coefficients['P_{t},{n}'.format(t=t, n=n)] = 1
if np.isclose(
target_pairings_per_group,
int(target_pairings_per_group),
):
milp.program.add_constraint(
program,
A_eq=coefficients,
b_eq=target_pairings_per_group,
)
else:
ceil = np.ceil(target_pairings_per_group)
floor = np.floor(target_pairings_per_group)
milp.program.add_constraint(
program,
A_lt=coefficients,
b_lt=ceil,
)
milp.program.add_constraint(
program,
A_lt={k: -v for k, v in coefficients.items()},
b_lt=-floor,
)
return program
###Output
_____no_output_____
###Markdown
define parameters
###Code
noun_semantic_tree = {
'Entities': {
'People': {
'Famous People': {},
'People that the Subject Knows': {},
},
'Animals': {},
'Corporations': {},
},
'Inanimate Objects': {
'Vehicles': {},
'Handheld Tools': {},
'Clothing Items': {},
},
'Places': {
'Countries': {},
'Buildings': {
'Famous Buildings': {},
'Generic Buildings': {},
},
},
}
n_total_trials = 2400
n_question_templates = 120
n_nouns_per_leaf_group = 30
C, M = structured_sampling.get_coding_matrices(
noun_semantic_tree=noun_semantic_tree,
n_total_trials=n_total_trials,
n_question_templates=n_question_templates,
n_nouns_per_leaf_group=n_nouns_per_leaf_group,
)
program = create_structured_hierarchical_sampling_problem(
noun_semantic_tree=noun_semantic_tree,
n_total_trials=n_total_trials,
n_question_templates=n_question_templates,
n_nouns_per_leaf_group=n_nouns_per_leaf_group,
)
###Output
_____no_output_____
###Markdown
solve program
###Code
solution = milp.program.solve_MILP(program)
P_solution = milp.program.get_solution_variable(solution, 'P')
print()
print('P_solution', P_solution.shape)
CM = C.dot(M)
P = milp.program.get_solution_variable(solution=solution, name='P')
PMT = P.astype(int).dot(M.T)
###Output
_____no_output_____
###Markdown
inspect solution
###Code
structured_sampling.plot_solution_summary_as_separate_figures(
C,
M,
P,
figsize=[6, 6],
show_title=True,
show_labels=True,
)
###Output
Figure 5A:
###Markdown
Example Problem 4: Sequence Design For NavigationOur final case study will demonstrate how MILP can address challenges associated with designing navigation experiments. Navigation is a rapidly advancing field of neuroscience featuring studies that are increasingly rich and naturalistic (Spiers and Maguire 2006; Suthana et al. 2011; Nielson et al. 2015). As these studies grow more complex, so do the design constraints that must be integrated into experimental designs. Here we show how MILP is a natural fit for representing and optimizing the structure of such experiments.Suppose a researcher is designing a neuroimaging experiment where subjects must navigate a complex, naturalistic environment. Subjects will perform a “taxi driver” task where they are successively cued to drive to various locations on a map. Each time they reach a destination, a new destination cue will appear. Subjects will perform many of these trials throughout the course of the experiment.An important aspect of this design is the sequence of cued destinations. This sequence will determine the particular distribution of navigational phenomena that the subject encounters throughout the experiment (Hartley et al. 2003; Xu et al. 2010). In the simplest case, this sequence could be generated randomly. However, this misses an opportunity to control the conditions measured by the experiment. Optimal selection of this sequence might require special consideration of the particular map being used and the hypotheses being tested. Design SpecificationsFor this example, suppose a map has 25 possible destinations. The researcher would like to collect 80 trials per subject over the course of 40 minutes, resulting in a mean trial time of 30 seconds. To prevent memory effects related to the lengths of trials, the researcher would like path lengths of each trial to approximate an exponential distribution. To prevent memory effects related to repeatedly visiting locations, the researcher would like the number of times each location is visited to assume a geometric distribution. MILP formulationWe will formulate this as a graph traversal problem where each location is a node and each route between locations is edge. This is similar to the classic traveling salesman problem, where the goal is to find a sequence that both visits every node once and minimizes total distance traveled. However, the goal here is instead to find a sequence whose edge length distribution maximally conforms to the target exponential distribution. Another difference is we would like to allow each destination to be visited more than once.We will use notation $N_i$ to refer to node $i$, and $E_{i,j}$ to refer to the edge that connects $N_i$ to $N_j$. For simplicity, we will first formulate the problem where each node is visited at most once. Also for simplicity, we will randomly choose nodes $N_I$ and $N_F$ to be the initial and final nodes in the sequence. Introduce a binary variable to track the edges are used in the sequence> **VARIABLES**$$X \in \mathbb{B}^{25 \times 25} \\$$$$\begin{aligned}X_{i,j} &= 0 \text{ if edge } E_{i,j} \text{ is not used} \\ &= 1 \text{ if edge } E_{i,j} \text{ is used}\end{aligned}$$Assume $X_{i,j} = 0$ for all $i$. We can constrain the number of edges in the sequence to equal the number of trials> **CONSTRAINTS**$$\sum_{i,j} X_{i,j} = \text{number of trials}$$The number of times the subject enters and leaves each destination are given by the sums> $$\sum_i X_{i,j} = \text{ number of times entering } N_j$$$$\sum_j X_{i,j} = \text{ number of times entering } N_i$$For a well formed sequence, $N_I$ should be left once, $N_F$ should be entered once> **CONSTRAINTS**$$\begin{aligned}\sum_i X_{i, F} &= 1 \\\sum_j X_{F, j} &= 0 \\\sum_j X_{I, j} &= 1 \\\sum_i X_{i, I} &= 0 \\\end{aligned}$$Other nodes should be entered and left an equal number of times> **CONSTRAINTS**$$\sum_i X_{i,h} = \sum_j X_{h,j} \quad \quad \quad \quad \forall \ h \notin \{ I, F \}$$To constrain trial length so that it is distributed exponentially, we will discretize exponential distribution into a 10 bin histogram, as shown in **Figure 6C**. Each bin represents a specific range of trial lengths $R_b$ and has a target number of trials $T_b$ that should fall in that range.Let $L_{i,j}$ be the length of edge $E_{i,j}$. Let Ab be the number of trials that fall within the range of bin $b$. Our cost function is then the deviation between the actual and target number of trials within each bin> **COST FUNCTION**$$B_b = \left \{ (i,j) | L_{i,j} \in R_b \right \} \\$$$$A_b = \sum_{ (i,j) \in B_b} X_{i,j}$$$$\min \sum_b | T_b - A_b |$$These constraints produce a sequence that is formed from NI to NF. However, they also allow for the inclusion of “subtours”, which are additional unconnected cyclic paths that exist alongside the main sequence. Since we want a single, acyclic sequence, we will utilize a common technique called “subtour elimination”. Eliminating all subtours outright would require an intractably large number of constraints. Much more efficient is to iteratively solve a series of MILP programs, and successively add constraints until a solution free of subtours is found. Details of subtour elimination and iterative solving can be found in (Laporte and Nobert 1983). For each subtour detected in the intermediate solutions, we will add a constraint> **CONSTRAINT**$$S = \text{ the set of edges in subtour} \\$$$$\sum_{(i,j) \in S , i \neq j} X_{i, j} \leq |S| - 1$$Finally, to allow each node to be visited multiple times, we will simply stack multiple copies of the original graph. Each node will be connected to all copies of all nodes other than itself. Any route through this augmented graph can be transformed into a route on the original graph by simply combining all copies of each node into a single node.
###Code
np.random.seed(0)
n_nodes = 25
x = np.random.rand(n_nodes)
y = np.random.rand(n_nodes)
distances = ((x - x[:, np.newaxis]) ** 2 + (y - y[:, np.newaxis]) ** 2) ** 0.5
t_per_distance = 2.0
t_mean_trial = 0.5
t_per_run = 10.0
n_runs = 4
n_trials = (n_runs * t_per_run) / float(t_mean_trial)
# n_trials = 100
mean_repeats_per_node = n_trials / float(n_nodes)
mean_trial_distance = t_mean_trial / t_per_distance
print('n_trials:', n_trials)
print('mean_repeats_per_node:', mean_repeats_per_node)
print('mean_trial_distance:', mean_trial_distance)
trajectory_design.plot_distance_distribution(distances)
###Output
n_trials: 80.0
mean_repeats_per_node: 3.2
mean_trial_distance: 0.25
Figure 6D:
###Markdown
repeats per node
###Code
print('compute repeats per node')
print()
n_counts = 30
geometric_parameter = 1 / mean_repeats_per_node
n_repeats = np.arange(n_counts)
n_repeats_pmf = scipy.stats.geom.pmf(n_repeats, geometric_parameter)
repeat_counts = trajectory_design.compute_repeats(n_counts, n_nodes, n_trials)
visits_per_location = trajectory_design.compute_visits_per_location(
repeat_counts
)
print('mean', (n_repeats_pmf * n_repeats).sum())
print()
print('count sum:', repeat_counts.sum())
print(
'weighted count sum:',
(repeat_counts * np.arange(repeat_counts.shape[0])).sum(),
)
trajectory_design.plot_node_repeats_pmf(n_repeats, n_repeats_pmf)
plt.show()
trajectory_design.plot_node_repeats_denormalized(
n_repeats,
n_repeats_pmf,
n_nodes,
)
plt.show()
trajectory_design.plot_target_vs_actual_repeats_per_node(
repeat_counts,
n_repeats_pmf,
n_nodes,
)
plt.show()
trajectory_design.plot_locations(
x,
y,
colors=visits_per_location,
title=visits_per_location,
cmap='nipy_spectral',
)
plt.show()
###Output
compute repeats per node
mean 3.199385156983983
count sum: 25
weighted count sum: 80
###Markdown
path length distribution
###Code
n_bins = 10
distance_pdf = trajectory_design.compute_target_path_length_pdf(
distances,
mean_trial_distance,
)
distance_pmf = trajectory_design.compute_target_path_length_pmf(
n_bins,
distances,
distance_pdf,
)
discrete_path_distribution = trajectory_design.compute_target_path_length_discrete(
distance_pmf,
n_trials,
)
print('target mean:', mean_trial_distance)
print('mean:', distance_pdf['scipy_distribution'].mean())
trajectory_design.plot_distance_pdf(distance_pdf)
plt.show()
trajectory_design.plot_distance_pmf(distance_pmf)
plt.show()
trajectory_design.plot_discrete_path_distribution(discrete_path_distribution)
plt.show()
###Output
program size:
- n_variables: 20
- n_constraints: 21
- n_cost_function_terms: 10
target mean: 0.25
mean: 0.25
###Markdown
compute path
###Code
trajectory = trajectory_design.compute_trajectory(
visits_per_location=visits_per_location,
distances=distances,
target_path_length_distribution=discrete_path_distribution,
)
###Output
program size:
- n_variables: 6170
- n_constraints: 182
- n_cost_function_terms: 10
eliminating subtours iteration 0 (sizes = [11, 41, 3, 17, 9, 3, 3])
eliminating subtours iteration 1 (sizes = [18, 60, 3, 3])
eliminating subtours iteration 2 (sizes = [76, 4, 3])
eliminating subtours iteration 3 (sizes = [54, 12, 4, 3, 3, 3, 3, 3, 4])
eliminating subtours iteration 4 (sizes = [71, 7, 5])
eliminating subtours iteration 5 (sizes = [22, 9, 37, 10, 3, 3, 3])
eliminating subtours iteration 6 (sizes = [26, 29, 5, 3, 3, 17, 4])
eliminating subtours iteration 7 (sizes = [48, 12, 21, 3])
eliminating subtours iteration 8 (sizes = [52, 23, 3, 3, 4])
eliminating subtours iteration 9 (sizes = [34, 41, 4, 3, 3])
eliminating subtours iteration 10 (sizes = [45, 5, 25, 3, 5, 3])
eliminating subtours iteration 11 (sizes = [17, 40, 9, 4, 13, 3])
eliminating subtours iteration 12 (sizes = [13, 52, 8, 3, 7, 3])
eliminating subtours iteration 13 (sizes = [42, 4, 14, 18, 7])
eliminating subtours iteration 14 (sizes = [53, 22, 4, 3, 3])
eliminating subtours iteration 15 (sizes = [38, 15, 14, 4, 12, 3])
eliminating subtours iteration 16 (sizes = [34, 24, 23, 3])
eliminating subtours iteration 17 (sizes = [11, 44, 22, 5, 3])
eliminating subtours iteration 18 (sizes = [3, 27, 10, 27, 11, 3, 6])
eliminating subtours iteration 19 (sizes = [43, 14, 14, 13])
eliminating subtours iteration 20 (sizes = [25, 53, 5])
eliminating subtours iteration 21 (sizes = [71, 3, 4, 3, 4])
eliminating subtours iteration 22 (sizes = [75, 5, 3])
eliminating subtours iteration 23 (sizes = [41, 41])
eliminating subtours iteration 24 (sizes = [20, 3, 3, 56, 3])
eliminating subtours iteration 25 (sizes = [15, 42, 26])
eliminating subtours iteration 26 (sizes = [67, 5, 6, 6])
eliminating subtours iteration 27 (sizes = [70, 7, 4, 3])
eliminating subtours iteration 28 (sizes = [74, 3, 6])
eliminating subtours iteration 29 (sizes = [10, 46, 7, 16, 3, 4])
eliminating subtours iteration 30 (sizes = [19, 21, 3, 27, 3, 5, 3, 3, 5])
eliminating subtours iteration 31 (sizes = [44, 21, 5, 3, 4, 9])
eliminating subtours iteration 32 (sizes = [25, 26, 11, 16, 7])
eliminating subtours iteration 33 (sizes = [46, 12, 20, 4, 3])
eliminating subtours iteration 34 (sizes = [52, 24, 3, 5])
eliminating subtours iteration 35 (sizes = [18, 37, 20, 7, 3])
eliminating subtours iteration 36 (sizes = [7, 11, 8, 27, 21, 3, 8, 3])
eliminating subtours iteration 37 (sizes = [66, 7, 4, 7])
eliminating subtours iteration 38 (sizes = [12, 16, 24, 15, 16, 3])
eliminating subtours iteration 39 (sizes = [51, 4, 4, 7, 10, 8, 3])
eliminating subtours iteration 40 (sizes = [22, 30, 15, 11, 3, 3, 3])
eliminating subtours iteration 41 (sizes = [58, 22, 3])
eliminating subtours iteration 42 (sizes = [53, 19, 3, 7, 3])
eliminating subtours iteration 43 (sizes = [40, 28, 9, 7])
eliminating subtours iteration 44 (sizes = [48, 5, 3, 14, 9, 5, 3])
eliminating subtours iteration 45 (sizes = [66, 5, 4, 3, 3, 3, 3])
eliminating subtours iteration 46 (sizes = [46, 12, 3, 6, 9, 3, 4, 5])
eliminating subtours iteration 47 (sizes = [63, 3, 4, 5, 3, 4, 5])
eliminating subtours iteration 48 (sizes = [50, 11, 18, 3, 3])
eliminating subtours iteration 49 (sizes = [54, 6, 21, 3])
eliminating subtours iteration 50 (sizes = [10, 62, 3, 6, 4])
eliminating subtours iteration 51 (sizes = [24, 3, 44, 5, 9])
eliminating subtours iteration 52 (sizes = [74, 4, 3, 3])
eliminating subtours iteration 53 (sizes = [36, 16, 5, 3, 6, 3, 12, 5, 3])
eliminating subtours iteration 54 (sizes = [79, 3])
eliminating subtours iteration 55 (sizes = [73, 3, 3, 5])
eliminating subtours iteration 56 (sizes = [41, 32, 6, 5])
eliminating subtours iteration 57 (sizes = [56, 7, 3, 9, 3, 8])
eliminating subtours iteration 58 (sizes = [28, 34, 4, 6, 11, 3])
eliminating subtours iteration 59 (sizes = [71, 7, 3, 3])
eliminating subtours iteration 60 (sizes = [23, 25, 17, 16, 4])
eliminating subtours iteration 61 (sizes = [76, 3, 4])
eliminating subtours iteration 62 (sizes = [15, 44, 3, 12, 3, 6, 4])
eliminating subtours iteration 63 (sizes = [25, 8, 3, 24, 15, 4, 6, 3])
eliminating subtours iteration 64 (sizes = [7, 62, 8, 5, 3])
eliminating subtours iteration 65 (sizes = [28, 42, 4, 8, 3])
eliminating subtours iteration 66 (sizes = [59, 3, 17, 3, 3])
eliminating subtours iteration 67 (sizes = [34, 7, 7, 13, 5, 10, 4, 5, 4])
eliminating subtours iteration 68 (sizes = [48, 31, 4])
eliminating subtours iteration 69 (sizes = [3, 23, 41, 12, 3, 4])
eliminating subtours iteration 70 (sizes = [73, 3, 5, 3])
eliminating subtours iteration 71 (sizes = [56, 4, 6, 9, 4, 3, 5])
eliminating subtours iteration 72 (sizes = [43, 8, 19, 7, 5, 4])
eliminating subtours iteration 73 (sizes = [8, 38, 23, 15])
eliminating subtours iteration 74 (sizes = [64, 12, 3, 5])
eliminating subtours iteration 75 (sizes = [67, 7, 5, 3, 3])
eliminating subtours iteration 76 (sizes = [73, 5, 5])
eliminating subtours iteration 77 (sizes = [5, 14, 47, 11, 4, 5])
eliminating subtours iteration 78 (sizes = [15, 49, 17, 3])
eliminating subtours iteration 79 (sizes = [24, 26, 5, 23, 3, 5])
eliminating subtours iteration 80 (sizes = [72, 8, 3])
eliminating subtours iteration 81 (sizes = [19, 52, 3, 4, 3, 3, 3])
eliminating subtours iteration 82 (sizes = [3, 70, 8, 3])
eliminating subtours iteration 83 (sizes = [46, 12, 9, 3, 5, 4, 8])
eliminating subtours iteration 84 (sizes = [14, 40, 18, 8, 3, 3])
eliminating subtours iteration 85 (sizes = [14, 11, 15, 20, 14, 3, 6, 3, 3])
eliminating subtours iteration 86 (sizes = [68, 7, 5, 4])
eliminating subtours iteration 87 (sizes = [32, 14, 35, 3])
eliminating subtours iteration 88 (sizes = [68, 4, 4, 8])
eliminating subtours iteration 89 (sizes = [37, 36, 3, 3, 6])
eliminating subtours iteration 90 (sizes = [44, 10, 7, 10, 8, 5, 3])
eliminating subtours iteration 91 (sizes = [50, 4, 13, 4, 10, 5])
eliminating subtours iteration 92 (sizes = [26, 38, 3, 3, 4, 3, 3, 3, 6])
eliminating subtours iteration 93 (sizes = [56, 6, 15, 7])
eliminating subtours iteration 94 (sizes = [34, 31, 4, 6, 3, 8])
eliminating subtours iteration 95 (sizes = [70, 8, 3, 3])
eliminating subtours iteration 96 (sizes = [63, 15, 5])
eliminating subtours iteration 97 (sizes = [73, 5, 5])
eliminating subtours iteration 98 (sizes = [34, 11, 23, 16])
eliminating subtours iteration 99 (sizes = [17, 26, 26, 5, 3, 7, 3])
eliminating subtours iteration 100 (sizes = [17, 30, 13, 16, 9])
eliminating subtours iteration 101 (sizes = [33, 34, 6, 7, 3, 3])
eliminating subtours iteration 102 (sizes = [72, 6, 5])
final trajectory:
[19, 10, 17, 8, 10, 2, 18, 23, 7, 10, 12, 11, 3, 4, 3, 11, 3, 14, 2, 23, 1, 21, 4, 3, 2, 14, 15, 24, 15, 16, 14, 11, 3, 12, 19, 10, 19, 10, 7, 19, 10, 19, 10, 23, 15, 24, 15, 22, 3, 23, 1, 10, 19, 7, 19, 10, 2, 22, 9, 13, 20, 2, 13, 2, 6, 11, 12, 3, 5, 23, 1, 18, 1, 18, 16, 15, 16, 14, 2, 0, 2]
###Markdown
compute random paths
###Code
n_random = 10000000
# n_random = 100000
random_paths = trajectory_design.compute_random_paths(
n_random=n_random,
n_trials=n_trials,
distances=distances,
visits_per_location=visits_per_location,
**discrete_path_distribution
)
for key, value in random_paths.items():
print(key + ':', value)
###Output
best_path: [19 16 15 14 19 15 15 7 14 2 5 1 21 13 19 8 23 24 13 19 7 14 1 18
1 2 2 3 23 7 14 0 23 3 10 16 23 2 9 10 12 11 3 12 22 1 18 15
24 10 3 3 11 2 4 23 10 2 20 10 15 16 19 17 19 10 19 10 10 2 2 6
11 10 3 22 11 4 18 12 3]
best_counts: [21 7 14 8 5 3 7 11 3 1]
best_error: 36
n_random: 10000000
###Markdown
Plot Results
###Code
figsize = [6, 6]
n_shown_repeats = 15
trajectory_design.plot_summary(
distances=distances,
n_repeats=n_repeats,
n_repeats_pmf=n_repeats_pmf,
n_nodes=n_nodes,
repeat_counts=repeat_counts,
n_shown_repeats=n_shown_repeats,
x=x,
y=y,
visits_per_location=visits_per_location,
distance_pdf=distance_pdf,
discrete_path_distribution=discrete_path_distribution,
distance_pmf=distance_pmf,
trajectory=trajectory,
random_paths=random_paths,
figsize=figsize,
show_title=True,
show_labels=True,
show_legend=True,
show_colorbar=True,
)
###Output
Figure 6D:
|
doc/LectureNotes/_build/jupyter_execute/statistics.ipynb | ###Markdown
Elements of Probability Theory and Statistical Data Analysis Domains and probabilitiesConsider the following simple example, namely the tossing of two dice, resulting in the following possible values $$\{2,3,4,5,6,7,8,9,10,11,12\}.$$ These values are called the *domain*. To this domain we have the corresponding *probabilities* $$\{1/36,2/36/,3/36,4/36,5/36,6/36,5/36,4/36,3/36,2/36,1/36\}.$$ Tossing the diceThe numbers in the domain are the outcomes of the physical process of tossing say two dice.We cannot tell beforehand whether the outcome is 3 or 5 or any other number in this domain.This defines the randomness of the outcome, or unexpectedness or any other synonimous word whichencompasses the uncertitude of the final outcome. The only thing we can tell beforehandis that say the outcome 2 has a certain probability. If our favorite hobby is to spend an hour every evening throwing dice and registering the sequence of outcomes, we will note that the numbers in the above domain $$\{2,3,4,5,6,7,8,9,10,11,12\},$$ appear in a random order. After 11 throws the results may look like $$\{10,8,6,3,6,9,11,8,12,4,5\}.$$ Stochastic variables**Random variables are characterized by a domain which contains all possible values that the random value may take. This domain has a corresponding probability distribution function(PDF)**. Stochastic variables and the main concepts, the discrete caseThere are two main concepts associated with a stochastic variable. The*domain* is the set $\mathbb D = \{x\}$ of all accessible valuesthe variable can assume, so that $X \in \mathbb D$. An example of adiscrete domain is the set of six different numbers that we may get bythrowing of a dice, $x\in\{1,\,2,\,3,\,4,\,5,\,6\}$.The *probability distribution function (PDF)* is a function$p(x)$ on the domain which, in the discrete case, gives us theprobability or relative frequency with which these values of $X$occur $$p(x) = \mathrm{Prob}(X=x).$$ Stochastic variables and the main concepts, the continuous caseIn the continuous case, the PDF does not directly depict theactual probability. Instead we define the probability for thestochastic variable to assume any value on an infinitesimal intervalaround $x$ to be $p(x)dx$. The continuous function $p(x)$ then gives usthe *density* of the probability rather than the probabilityitself. The probability for a stochastic variable to assume any valueon a non-infinitesimal interval $[a,\,b]$ is then just the integral $$\mathrm{Prob}(a\leq X\leq b) = \int_a^b p(x)dx.$$ Qualitatively speaking, a stochastic variable represents the values ofnumbers chosen as if by chance from some specified PDF so that theselection of a large set of these numbers reproduces this PDF. The cumulative probabilityOf interest to us is the *cumulative probabilitydistribution function* (**CDF**), $P(x)$, which is just the probabilityfor a stochastic variable $X$ to assume any value less than $x$ $$P(x)=\mathrm{Prob(}X\leq x\mathrm{)} =\int_{-\infty}^x p(x^{\prime})dx^{\prime}.$$ The relation between a CDF and its corresponding PDF is then $$p(x) = \frac{d}{dx}P(x).$$ Properties of PDFsThere are two properties that all PDFs must satisfy. The first one ispositivity (assuming that the PDF is normalized) $$0 \leq p(x) \leq 1.$$ Naturally, it would be nonsensical for any of the values of the domainto occur with a probability greater than $1$ or less than $0$. Also,the PDF must be normalized. That is, all the probabilities must add upto unity. The probability of "anything" to happen is always unity. Forboth discrete and continuous PDFs, this condition is $$\begin{align*}\sum_{x_i\in\mathbb D} p(x_i) & = 1,\\\int_{x\in\mathbb D} p(x)\,dx & = 1.\end{align*}$$ Important distributions, the uniform distributionThe first oneis the most basic PDF; namely the uniform distribution $$\begin{equation}p(x) = \frac{1}{b-a}\theta(x-a)\theta(b-x).\label{eq:unifromPDF} \tag{1}\end{equation}$$ For $a=0$ and $b=1$ we have $$\begin{array}{ll}p(x)dx = dx & \in [0,1].\end{array}$$ The latter distribution is used to generate random numbers. For other PDFs, one needs normally a mapping from this distribution to say for example the exponential distribution. Gaussian distributionThe second one is the Gaussian Distribution $$p(x) = \frac{1}{\sigma\sqrt{2\pi}} \exp{(-\frac{(x-\mu)^2}{2\sigma^2})},$$ with mean value $\mu$ and standard deviation $\sigma$. If $\mu=0$ and $\sigma=1$, it is normally called the **standard normal distribution** $$p(x) = \frac{1}{\sqrt{2\pi}} \exp{(-\frac{x^2}{2})},$$ The following simple Python code plots the above distribution for different values of $\mu$ and $\sigma$.
###Code
%matplotlib inline
import numpy as np
from math import acos, exp, sqrt
from matplotlib import pyplot as plt
from matplotlib import rc, rcParams
import matplotlib.units as units
import matplotlib.ticker as ticker
rc('text',usetex=True)
rc('font',**{'family':'serif','serif':['Gaussian distribution']})
font = {'family' : 'serif',
'color' : 'darkred',
'weight' : 'normal',
'size' : 16,
}
pi = acos(-1.0)
mu0 = 0.0
sigma0 = 1.0
mu1= 1.0
sigma1 = 2.0
mu2 = 2.0
sigma2 = 4.0
x = np.linspace(-20.0, 20.0)
v0 = np.exp(-(x*x-2*x*mu0+mu0*mu0)/(2*sigma0*sigma0))/sqrt(2*pi*sigma0*sigma0)
v1 = np.exp(-(x*x-2*x*mu1+mu1*mu1)/(2*sigma1*sigma1))/sqrt(2*pi*sigma1*sigma1)
v2 = np.exp(-(x*x-2*x*mu2+mu2*mu2)/(2*sigma2*sigma2))/sqrt(2*pi*sigma2*sigma2)
plt.plot(x, v0, 'b-', x, v1, 'r-', x, v2, 'g-')
plt.title(r'{\bf Gaussian distributions}', fontsize=20)
plt.text(-19, 0.3, r'Parameters: $\mu = 0$, $\sigma = 1$', fontdict=font)
plt.text(-19, 0.18, r'Parameters: $\mu = 1$, $\sigma = 2$', fontdict=font)
plt.text(-19, 0.08, r'Parameters: $\mu = 2$, $\sigma = 4$', fontdict=font)
plt.xlabel(r'$x$',fontsize=20)
plt.ylabel(r'$p(x)$ [MeV]',fontsize=20)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.savefig('gaussian.pdf', format='pdf')
plt.show()
###Output
_____no_output_____
###Markdown
Exponential distributionAnother important distribution in science is the exponential distribution $$p(x) = \alpha\exp{-(\alpha x)}.$$ Expectation valuesLet $h(x)$ be an arbitrary continuous function on the domain of the stochasticvariable $X$ whose PDF is $p(x)$. We define the *expectation value*of $h$ with respect to $p$ as follows $$\begin{equation}\langle h \rangle_X \equiv \int\! h(x)p(x)\,dx\label{eq:expectation_value_of_h_wrt_p} \tag{2}\end{equation}$$ Whenever the PDF is known implicitly, like in this case, we will dropthe index $X$ for clarity. A particularly useful class of special expectation values are the*moments*. The $n$-th moment of the PDF $p$ is defined asfollows $$\langle x^n \rangle \equiv \int\! x^n p(x)\,dx$$ Stochastic variables and the main concepts, mean valuesThe zero-th moment $\langle 1\rangle$ is just the normalization condition of$p$. The first moment, $\langle x\rangle$, is called the *mean* of $p$and often denoted by the letter $\mu$ $$\langle x\rangle = \mu \equiv \int x p(x)dx,$$ for a continuous distribution and $$\langle x\rangle = \mu \equiv \sum_{i=1}^N x_i p(x_i),$$ for a discrete distribution. Qualitatively it represents the centroid or the average value of thePDF and is therefore simply called the expectation value of $p(x)$. Stochastic variables and the main concepts, central moments, the varianceA special version of the moments is the set of *central moments*, the n-th central moment defined as $$\langle (x-\langle x\rangle )^n\rangle \equiv \int\! (x-\langle x\rangle)^n p(x)\,dx$$ The zero-th and first central moments are both trivial, equal $1$ and$0$, respectively. But the second central moment, known as the*variance* of $p$, is of particular interest. For the stochasticvariable $X$, the variance is denoted as $\sigma^2_X$ or $\mathrm{Var}(X)$ $$\begin{align*}\sigma^2_X &=\mathrm{Var}(X) = \langle (x-\langle x\rangle)^2\rangle =\int (x-\langle x\rangle)^2 p(x)dx\\& = \int\left(x^2 - 2 x \langle x\rangle^{2} +\langle x\rangle^2\right)p(x)dx\\& = \langle x^2\rangle - 2 \langle x\rangle\langle x\rangle + \langle x\rangle^2\\& = \langle x^2 \rangle - \langle x\rangle^2\end{align*}$$ The square root of the variance, $\sigma =\sqrt{\langle (x-\langle x\rangle)^2\rangle}$ is called the **standard deviation** of $p$. It is the RMS (root-mean-square)value of the deviation of the PDF from its mean value, interpretedqualitatively as the "spread" of $p$ around its mean. Probability Distribution FunctionsThe following table collects properties of probability distribution functions.In our notation we reserve the label $p(x)$ for the probability of a certain event,while $P(x)$ is the cumulative probability. Discrete PDF Continuous PDF Domain $\left\{x_1, x_2, x_3, \dots, x_N\right\}$ $[a,b]$ Probability $p(x_i)$ $p(x)dx$ Cumulative $P_i=\sum_{l=1}^ip(x_l)$ $P(x)=\int_a^xp(t)dt$ Positivity $0 \le p(x_i) \le 1$ $p(x) \ge 0$ Positivity $0 \le P_i \le 1$ $0 \le P(x) \le 1$ Monotonic $P_i \ge P_j$ if $x_i \ge x_j$ $P(x_i) \ge P(x_j)$ if $x_i \ge x_j$ Normalization $P_N=1$ $P(b)=1$ Probability Distribution FunctionsWith a PDF we can compute expectation values of selected quantities such as $$\langle x^k\rangle=\sum_{i=1}^{N}x_i^kp(x_i),$$ if we have a discrete PDF or $$\langle x^k\rangle=\int_a^b x^kp(x)dx,$$ in the case of a continuous PDF. We have already defined the mean value $\mu$and the variance $\sigma^2$. The three famous Probability Distribution FunctionsThere are at least three PDFs which one may encounter. These are the**Uniform distribution** $$p(x)=\frac{1}{b-a}\Theta(x-a)\Theta(b-x),$$ yielding probabilities different from zero in the interval $[a,b]$.**The exponential distribution** $$p(x)=\alpha \exp{(-\alpha x)},$$ yielding probabilities different from zero in the interval $[0,\infty)$ and with mean value $$\mu = \int_0^{\infty}xp(x)dx=\int_0^{\infty}x\alpha \exp{(-\alpha x)}dx=\frac{1}{\alpha},$$ with variance $$\sigma^2=\int_0^{\infty}x^2p(x)dx-\mu^2 = \frac{1}{\alpha^2}.$$ Probability Distribution Functions, the normal distributionFinally, we have the so-called univariate normal distribution, or just the **normal distribution** $$p(x)=\frac{1}{b\sqrt{2\pi}}\exp{\left(-\frac{(x-a)^2}{2b^2}\right)}$$ with probabilities different from zero in the interval $(-\infty,\infty)$.The integral $\int_{-\infty}^{\infty}\exp{\left(-(x^2\right)}dx$ appears in many calculations, its valueis $\sqrt{\pi}$, a result we will need when we compute the mean value and the variance.The mean value is $$\mu = \int_0^{\infty}xp(x)dx=\frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}x \exp{\left(-\frac{(x-a)^2}{2b^2}\right)}dx,$$ which becomes with a suitable change of variables $$\mu =\frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}b\sqrt{2}(a+b\sqrt{2}y)\exp{-y^2}dy=a.$$ Probability Distribution Functions, the normal distributionSimilarly, the variance becomes $$\sigma^2 = \frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}(x-\mu)^2 \exp{\left(-\frac{(x-a)^2}{2b^2}\right)}dx,$$ and inserting the mean value and performing a variable change we obtain $$\sigma^2 = \frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}b\sqrt{2}(b\sqrt{2}y)^2\exp{\left(-y^2\right)}dy=\frac{2b^2}{\sqrt{\pi}}\int_{-\infty}^{\infty}y^2\exp{\left(-y^2\right)}dy,$$ and performing a final integration by parts we obtain the well-known result $\sigma^2=b^2$.It is useful to introduce the standard normal distribution as well, defined by $\mu=a=0$, viz. a distributioncentered around zero and with a variance $\sigma^2=1$, leading to $$\begin{equation} p(x)=\frac{1}{\sqrt{2\pi}}\exp{\left(-\frac{x^2}{2}\right)}.\label{_auto1} \tag{3}\end{equation}$$ Probability Distribution Functions, the cumulative distributionThe exponential and uniform distributions have simple cumulative functions,whereas the normal distribution does not, being proportional to the so-callederror function $erf(x)$, given by $$P(x) = \frac{1}{\sqrt{2\pi}}\int_{-\infty}^x\exp{\left(-\frac{t^2}{2}\right)}dt,$$ which is difficult to evaluate in a quick way. Probability Distribution Functions, other important distributionSome other PDFs which one encounters often in the natural sciences are the binomial distribution $$p(x) = \left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} \hspace{0.5cm}x=0,1,\dots,n,$$ where $y$ is the probability for a specific event, such as the tossing of a coin or moving left or rightin case of a random walker. Note that $x$ is a discrete stochastic variable. The sequence of binomial trials is characterized by the following definitions * Every experiment is thought to consist of $N$ independent trials. * In every independent trial one registers if a specific situation happens or not, such as the jump to the left or right of a random walker. * The probability for every outcome in a single trial has the same value, for example the outcome of tossing (either heads or tails) a coin is always $1/2$. Probability Distribution Functions, the binomial distributionIn order to compute the mean and variance we need to recall Newton's binomialformula $$(a+b)^m=\sum_{n=0}^m \left(\begin{array}{c} m \\ n\end{array}\right)a^nb^{m-n},$$ which can be used to show that $$\sum_{x=0}^n\left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} = (y+1-y)^n = 1,$$ the PDF is normalized to one. The mean value is $$\mu = \sum_{x=0}^n x\left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} =\sum_{x=0}^n x\frac{n!}{x!(n-x)!}y^x(1-y)^{n-x},$$ resulting in $$\mu = \sum_{x=0}^n x\frac{(n-1)!}{(x-1)!(n-1-(x-1))!}y^{x-1}(1-y)^{n-1-(x-1)},$$ which we rewrite as $$\mu=ny\sum_{\nu=0}^n\left(\begin{array}{c} n-1 \\ \nu\end{array}\right)y^{\nu}(1-y)^{n-1-\nu} =ny(y+1-y)^{n-1}=ny.$$ The variance is slightly trickier to get. It reads $\sigma^2=ny(1-y)$. Probability Distribution Functions, Poisson's distributionAnother important distribution with discrete stochastic variables $x$ is the Poisson model, which resembles the exponential distribution and reads $$p(x) = \frac{\lambda^x}{x!} e^{-\lambda} \hspace{0.5cm}x=0,1,\dots,;\lambda > 0.$$ In this case both the mean value and the variance are easier to calculate, $$\mu = \sum_{x=0}^{\infty} x \frac{\lambda^x}{x!} e^{-\lambda} = \lambda e^{-\lambda}\sum_{x=1}^{\infty}\frac{\lambda^{x-1}}{(x-1)!}=\lambda,$$ and the variance is $\sigma^2=\lambda$. Probability Distribution Functions, Poisson's distributionAn example of applications of the Poisson distribution could be the countingof the number of $\alpha$-particles emitted from a radioactive source in a given time interval.In the limit of $n\rightarrow \infty$ and for small probabilities $y$, the binomial distributionapproaches the Poisson distribution. Setting $\lambda = ny$, with $y$ the probability for an event inthe binomial distribution we can show that $$\lim_{n\rightarrow \infty}\left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} e^{-\lambda}=\sum_{x=1}^{\infty}\frac{\lambda^x}{x!} e^{-\lambda}.$$ Meet the covariance!An important quantity in a statistical analysis is the so-called covariance. Consider the set $\{X_i\}$ of $n$stochastic variables (not necessarily uncorrelated) with themultivariate PDF $P(x_1,\dots,x_n)$. The *covariance* of twoof the stochastic variables, $X_i$ and $X_j$, is defined as follows $$\begin{equation}\mathrm{Cov}(X_i,\,X_j) = \langle (x_i-\langle x_i\rangle)(x_j-\langle x_j\rangle)\rangle \label{_auto2} \tag{4}\end{equation}$$ $$\begin{equation} =\int\cdots\int (x_i-\langle x_i\rangle)(x_j-\langle x_j\rangle)P(x_1,\dots,x_n)\,dx_1\dots dx_n,\label{eq:def_covariance} \tag{5}\end{equation}$$ with $$\langle x_i\rangle =\int\cdots\int x_i P(x_1,\dots,x_n)\,dx_1\dots dx_n.$$ Meet the covariance in matrix disguiseIf we consider the above covariance as a matrix $$C_{ij} =\mathrm{Cov}(X_i,\,X_j),$$ then the diagonal elements are just the familiarvariances, $C_{ii} = \mathrm{Cov}(X_i,\,X_i) = \mathrm{Var}(X_i)$. It turns out thatall the off-diagonal elements are zero if the stochastic variables areuncorrelated. Covariance
###Code
# Importing various packages
from math import exp, sqrt
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
def covariance(x, y, n):
sum = 0.0
mean_x = np.mean(x)
mean_y = np.mean(y)
for i in range(0, n):
sum += (x[(i)]-mean_x)*(y[i]-mean_y)
return sum/n
n = 10
x=np.random.normal(size=n)
y = 4+3*x+np.random.normal(size=n)
covxy = covariance(x,y,n)
print(covxy)
z = np.vstack((x, y))
c = np.cov(z.T)
print(c)
###Output
4.323291478597321
[[4.06126507e+00 5.22717936e+00 3.58228342e-01 1.09103481e+01
4.42847770e+00 5.02161783e+00 8.06412177e-03 6.67407338e+00
1.12366979e+01 6.04205220e+00]
[5.22717936e+00 6.72780613e+00 4.61069091e-01 1.40425078e+01
5.69981195e+00 6.46323168e+00 1.03791824e-02 8.59007674e+00
1.44625466e+01 7.77661393e+00]
[3.58228342e-01 4.61069091e-01 3.15979239e-02 9.62359224e-01
3.90618734e-01 4.42937310e-01 7.11304709e-04 5.88693966e-01
9.91145266e-01 5.32945844e-01]
[1.09103481e+01 1.40425078e+01 9.62359224e-01 2.93100040e+01
1.18968431e+01 1.34902789e+01 2.16637855e-02 1.79295029e+01
3.01867234e+01 1.62316154e+01]
[4.42847770e+00 5.69981195e+00 3.90618734e-01 1.18968431e+01
4.82889306e+00 5.47566390e+00 8.79326583e-03 7.27753165e+00
1.22527008e+01 6.58836420e+00]
[5.02161783e+00 6.46323168e+00 4.42937310e-01 1.34902789e+01
5.47566390e+00 6.20906175e+00 9.97101567e-03 8.25226753e+00
1.38937995e+01 7.47079457e+00]
[8.06412177e-03 1.03791824e-02 7.11304709e-04 2.16637855e-02
8.79326583e-03 9.97101567e-03 1.60122668e-05 1.32521615e-02
2.23117916e-02 1.19972087e-02]
[6.67407338e+00 8.59007674e+00 5.88693966e-01 1.79295029e+01
7.27753165e+00 8.25226753e+00 1.32521615e-02 1.09678277e+01
1.84658093e+01 9.92919670e+00]
[1.12366979e+01 1.44625466e+01 9.91145266e-01 3.01867234e+01
1.22527008e+01 1.38937995e+01 2.23117916e-02 1.84658093e+01
3.10896672e+01 1.67171347e+01]
[6.04205220e+00 7.77661393e+00 5.32945844e-01 1.62316154e+01
6.58836420e+00 7.47079457e+00 1.19972087e-02 9.92919670e+00
1.67171347e+01 8.98892195e+00]]
###Markdown
Meet the covariance, uncorrelated eventsConsider the stochastic variables $X_i$ and $X_j$, ($i\neq j$). We have $$\begin{align*}Cov(X_i,\,X_j) &= \langle (x_i-\langle x_i\rangle)(x_j-\langle x_j\rangle)\rangle\\&=\langle x_i x_j - x_i\langle x_j\rangle - \langle x_i\rangle x_j + \langle x_i\rangle\langle x_j\rangle\rangle\\&=\langle x_i x_j\rangle - \langle x_i\langle x_j\rangle\rangle - \langle \langle x_i\rangle x_j \rangle +\langle \langle x_i\rangle\langle x_j\rangle\rangle \\&=\langle x_i x_j\rangle - \langle x_i\rangle\langle x_j\rangle - \langle x_i\rangle\langle x_j\rangle +\langle x_i\rangle\langle x_j\rangle \\&=\langle x_i x_j\rangle - \langle x_i\rangle\langle x_j\rangle\end{align*}$$ If $X_i$ and $X_j$ are independent (assuming $i \neq j$), we have that $$\langle x_i x_j\rangle = \langle x_i\rangle\langle x_j\rangle,$$ leading to $$Cov(X_i, X_j) = 0 \hspace{0.1cm} (i\neq j).$$ Numerical experiments and the covarianceNow that we have constructed an idealized mathematical framework, letus try to apply it to empirical observations. Examples of relevantphysical phenomena may be spontaneous decays of nuclei, or a purelymathematical set of numbers produced by some deterministicmechanism. It is the latter we will deal with, using so-called pseudo-randomnumber generators. In general our observations will contain only a limited set ofobservables. We remind the reader thata *stochastic process* is a process that produces sequentially achain of values $$\{x_1, x_2,\dots\,x_k,\dots\}.$$ Numerical experiments and the covarianceWe will call thesevalues our *measurements* and the entire set as our measured*sample*. The action of measuring all the elements of a samplewe will call a stochastic *experiment* (since, operationally,they are often associated with results of empirical observation ofsome physical or mathematical phenomena; precisely an experiment). Weassume that these values are distributed according to some PDF $p_X^{\phantom X}(x)$, where $X$ is just the formal symbol for thestochastic variable whose PDF is $p_X^{\phantom X}(x)$. Instead oftrying to determine the full distribution $p$ we are often onlyinterested in finding the few lowest moments, like the mean$\mu_X^{\phantom X}$ and the variance $\sigma_X^{\phantom X}$. Numerical experiments and the covariance, actual situationsIn practical situations however, a sample is always of finite size. Let thatsize be $n$. The expectation value of a sample $\alpha$, the **sample mean**, is then defined as follows $$\langle x_{\alpha} \rangle \equiv \frac{1}{n}\sum_{k=1}^n x_{\alpha,k}.$$ The *sample variance* is: $$\mathrm{Var}(x) \equiv \frac{1}{n}\sum_{k=1}^n (x_{\alpha,k} - \langle x_{\alpha} \rangle)^2,$$ with its square root being the *standard deviation of the sample*. Numerical experiments and the covariance, our observablesYou can think of the above observables as a set of quantities which definea given experiment. This experiment is then repeated several times, say $m$ times.The total average is then $$\begin{equation}\langle X_m \rangle= \frac{1}{m}\sum_{\alpha=1}^mx_{\alpha}=\frac{1}{mn}\sum_{\alpha, k} x_{\alpha,k},\label{eq:exptmean} \tag{6}\end{equation}$$ where the last sums end at $m$ and $n$.The total variance is $$\sigma^2_m= \frac{1}{mn^2}\sum_{\alpha=1}^m(\langle x_{\alpha} \rangle-\langle X_m \rangle)^2,$$ which we rewrite as $$\begin{equation}\sigma^2_m=\frac{1}{m}\sum_{\alpha=1}^m\sum_{kl=1}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle).\label{eq:exptvariance} \tag{7}\end{equation}$$ Numerical experiments and the covariance, the sample varianceWe define also the sample variance $\sigma^2$ of all $mn$ individual experiments as $$\begin{equation}\sigma^2=\frac{1}{mn}\sum_{\alpha=1}^m\sum_{k=1}^n (x_{\alpha,k}-\langle X_m \rangle)^2.\label{eq:sampleexptvariance} \tag{8}\end{equation}$$ These quantities, being known experimental values or the results from our calculations, may differ, in some casessignificantly, from the similarly namedexact values for the mean value $\mu_X$, the variance $\mathrm{Var}(X)$and the covariance $\mathrm{Cov}(X,Y)$. Numerical experiments and the covariance, central limit theoremThe central limit theorem states that the PDF $\tilde{p}(z)$ ofthe average of $m$ random values corresponding to a PDF $p(x)$ is a normal distribution whose mean is the mean value of the PDF $p(x)$ and whose variance is the varianceof the PDF $p(x)$ divided by $m$, the number of values used to compute $z$.The central limit theorem leads then to the well-known expression for thestandard deviation, given by $$\sigma_m=\frac{\sigma}{\sqrt{m}}.$$ In many cases the above estimate for the standard deviation, in particular if correlations are strong, may be too simplistic. We need therefore a more precise defintion of the error and the variance in our results. Definition of Correlation Functions and Standard DeviationOur estimate of the true average $\mu_{X}$ is the sample mean $\langle X_m \rangle$ $$\mu_{X}^{\phantom X} \approx X_m=\frac{1}{mn}\sum_{\alpha=1}^m\sum_{k=1}^n x_{\alpha,k}.$$ We can then use Eq. ([7](eq:exptvariance)) $$\sigma^2_m=\frac{1}{mn^2}\sum_{\alpha=1}^m\sum_{kl=1}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle),$$ and rewrite it as $$\sigma^2_m=\frac{\sigma^2}{n}+\frac{2}{mn^2}\sum_{\alpha=1}^m\sum_{k<l}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle),$$ where the first term is the sample variance of all $mn$ experiments divided by $n$and the last term is nothing but the covariance which arises when $k\ne l$. Definition of Correlation Functions and Standard DeviationOur estimate of the true average $\mu_{X}$ is the sample mean $\langle X_m \rangle$If the observables are uncorrelated, then the covariance is zero and we obtain a total variancewhich agrees with the central limit theorem. Correlations may often be present in our data set, resulting in a non-zero covariance. The first term is normally called the uncorrelated contribution.Computationally the uncorrelated first term is much easier to treatefficiently than the second.We just accumulate separately the values $x^2$ and $x$ for everymeasurement $x$ we receive. The correlation term, though, has to becalculated at the end of the experiment since we need all themeasurements to calculate the cross terms. Therefore, all measurementshave to be stored throughout the experiment. Definition of Correlation Functions and Standard DeviationLet us analyze the problem by splitting up the correlation term intopartial sums of the form $$f_d = \frac{1}{nm}\sum_{\alpha=1}^m\sum_{k=1}^{n-d}(x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,k+d}-\langle X_m \rangle),$$ The correlation term of the total variance can now be rewritten in terms of$f_d$ $$\frac{2}{mn^2}\sum_{\alpha=1}^m\sum_{k<l}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle)=\frac{2}{n}\sum_{d=1}^{n-1} f_d$$ Definition of Correlation Functions and Standard DeviationThe value of $f_d$ reflects the correlation between measurementsseparated by the distance $d$ in the samples. Notice that for$d=0$, $f$ is just the sample variance, $\sigma^2$. If we divide $f_d$by $\sigma^2$, we arrive at the so called **autocorrelation function** $$\begin{equation}\kappa_d = \frac{f_d}{\sigma^2}\label{eq:autocorrelformal} \tag{9}\end{equation}$$ which gives us a useful measure of the correlation pair correlationstarting always at $1$ for $d=0$. Definition of Correlation Functions and Standard Deviation, sample varianceThe sample variance of the $mn$ experiments can now bewritten in terms of the autocorrelation function $$\begin{equation}\sigma_m^2=\frac{\sigma^2}{n}+\frac{2}{n}\cdot\sigma^2\sum_{d=1}^{n-1}\frac{f_d}{\sigma^2}=\left(1+2\sum_{d=1}^{n-1}\kappa_d\right)\frac{1}{n}\sigma^2=\frac{\tau}{n}\cdot\sigma^2\label{eq:error_estimate_corr_time} \tag{10}\end{equation}$$ and we see that $\sigma_m$ can be expressed in terms of theuncorrelated sample variance times a correction factor $\tau$ whichaccounts for the correlation between measurements. We call thiscorrection factor the *autocorrelation time* $$\begin{equation}\tau = 1+2\sum_{d=1}^{n-1}\kappa_d\label{eq:autocorrelation_time} \tag{11}\end{equation}$$ For a correlation free experiment, $\tau$equals 1. Definition of Correlation Functions and Standard DeviationFrom the point of view ofEq. ([10](eq:error_estimate_corr_time)) we can interpret a sequentialcorrelation as an effective reduction of the number of measurements bya factor $\tau$. The effective number of measurements becomes $$n_\mathrm{eff} = \frac{n}{\tau}$$ To neglect the autocorrelation time $\tau$ will always cause oursimple uncorrelated estimate of $\sigma_m^2\approx \sigma^2/n$ tobe less than the true sample error. The estimate of the error will betoo "good". On the other hand, the calculation of the fullautocorrelation time poses an efficiency problem if the set ofmeasurements is very large. The solution to this problem is given by more practically oriented methods like the blocking technique. Code to compute the Covariance matrix and the Covariance
###Code
# Importing various packages
from math import exp, sqrt
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
# Sample covariance, note the factor 1/(n-1)
def covariance(x, y, n):
sum = 0.0
mean_x = np.mean(x)
mean_y = np.mean(y)
for i in range(0, n):
sum += (x[(i)]-mean_x)*(y[i]-mean_y)
return sum/(n-1.)
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
z = x**3+np.random.normal(size=n)
print(np.mean(z))
covxx = covariance(x,x,n)
covyy = covariance(y,y,n)
covzz = covariance(z,z,n)
covxy = covariance(x,y,n)
covxz = covariance(x,z,n)
covyz = covariance(y,z,n)
print(covxx,covyy, covzz)
print(covxy,covxz, covyz)
w = np.vstack((x, y, z))
#print(w)
c = np.cov(w)
print(c)
#eigen = np.zeros(n)
Eigvals, Eigvecs = np.linalg.eig(c)
print(Eigvals)
###Output
0.02730126581656065
4.105137868830763
0.04829457939163212
0.8718475896381779 9.358869339268145 15.339535706819584
2.6818813252071303 2.6510573774179256 7.986091753050161
[[ 0.87184759 2.68188133 2.65105738]
[ 2.68188133 9.35886934 7.98609175]
[ 2.65105738 7.98609175 15.33953571]]
[21.54237024 0.08251898 3.94536341]
###Markdown
Random NumbersUniform deviates are just random numbers that lie within a specified range(typically 0 to 1), with any one number in the range just as likely as any other. Theyare, in other words, what you probably think random numbers are. However,we want to distinguish uniform deviates from other sorts of random numbers, forexample numbers drawn from a normal (Gaussian) distribution of specified meanand standard deviation. These other sorts of deviates are almost always generated byperforming appropriate operations on one or more uniform deviates, as we will seein subsequent sections. So, a reliable source of random uniform deviates, the subjectof this section, is an essential building block for any sort of stochastic modelingor Monte Carlo computer work. Random Numbers, better name: pseudo random numbersA disclaimer is however appropriate. It should be fairly obvious that something as deterministic as a computer cannot generate purely random numbers.Numbers generated by any of the standard algorithms are in reality pseudo randomnumbers, hopefully abiding to the following criteria: * they produce a uniform distribution in the interval [0,1]. * correlations between random numbers are negligible * the period before the same sequence of random numbers is repeated is as large as possible and finally * the algorithm should be fast. Random number generator RNG The most common random number generators are based on so-calledLinear congruential relations of the type $$N_i=(aN_{i-1}+c) \mathrm{MOD} (M),$$ which yield a number in the interval [0,1] through $$x_i=N_i/M$$ The number $M$ is called the period and it should be as large as possible and $N_0$ is the starting value, or seed. The function $\mathrm{MOD}$ means the remainder,that is if we were to evaluate $(13)\mathrm{MOD}(9)$, the outcome is the remainderof the division $13/9$, namely $4$. Random number generator RNG and periodic outputsThe problem with such generators is that their outputs are periodic;they will start to repeat themselves with a period that is at most $M$. If howeverthe parameters $a$ and $c$ are badly chosen, the period may be even shorter.Consider the following example $$N_i=(6N_{i-1}+7) \mathrm{MOD} (5),$$ with a seed $N_0=2$. This generator produces the sequence$4,1,3,0,2,4,1,3,0,2,...\dots$, i.e., a sequence with period $5$.However, increasing $M$ may not guarantee a larger period as the followingexample shows $$N_i=(27N_{i-1}+11) \mathrm{MOD} (54),$$ which still, with $N_0=2$, results in $11,38,11,38,11,38,\dots$, a period ofjust $2$. Random number generator RNG and its periodTypical periods for the random generators provided in the program library are of the order of $\sim 10^9$ or larger. Other random number generators which havebecome increasingly popular are so-called shift-register generators.In these generators each successive number depends on many precedingvalues (rather than the last values as in the linear congruentialgenerator).For example, you could make a shift register generator whose $l$th number is the sum of the $l-i$th and $l-j$th values with modulo $M$, $$N_l=(aN_{l-i}+cN_{l-j})\mathrm{MOD}(M).$$ Random number generator RNG, other examplesSuch a generator again produces a sequence of pseudorandom numbersbut this time with a period much larger than $M$.It is also possible to construct more elaborate algorithms by includingmore than two past terms in the sum of each iteration.One example is the generator of [Marsaglia and Zaman](http://dl.acm.org/citation.cfm?id=187154)which consists of two congruential relations $$\begin{equation} N_l=(N_{l-3}-N_{l-1})\mathrm{MOD}(2^{31}-69),\label{eq:mz1} \tag{12}\end{equation}$$ followed by $$\begin{equation} N_l=(69069N_{l-1}+1013904243)\mathrm{MOD}(2^{32}),\label{eq:mz2} \tag{13}\end{equation}$$ which according to the authors has a period larger than $2^{94}$. Random number generator RNG, other examplesInstead of using modular addition, we could use the bitwiseexclusive-OR ($\oplus$) operation so that $$N_l=(N_{l-i})\oplus (N_{l-j})$$ where the bitwise action of $\oplus$ means that if $N_{l-i}=N_{l-j}$ the result is$0$ whereas if $N_{l-i}\ne N_{l-j}$ the result is$1$. As an example, consider the case where $N_{l-i}=6$ and $N_{l-j}=11$. The firstone has a bit representation (using 4 bits only) which reads $0110$ whereas the second number is $1011$. Employing the $\oplus$ operator yields $1101$, or $2^3+2^2+2^0=13$.In Fortran90, the bitwise $\oplus$ operation is coded through the intrinsicfunction $\mathrm{IEOR}(m,n)$ where $m$ and $n$ are the input numbers, while in $C$it is given by $m\wedge n$. Random number generator RNG, RAN0We show here how the linear congruential algorithm can be implemented, namely $$N_i=(aN_{i-1}) \mathrm{MOD} (M).$$ However, since $a$ and $N_{i-1}$ are integers and their multiplication could become greater than the standard 32 bit integer, there is a trick via Schrage's algorithm which approximates the multiplicationof large integers through the factorization $$M=aq+r,$$ where we have defined $$q=[M/a],$$ and $$r = M\hspace{0.1cm}\mathrm{MOD} \hspace{0.1cm}a.$$ where the brackets denote integer division. In the code below the numbers $q$ and $r$ are chosen so that $r < q$. Random number generator RNG, RAN0To see how this works we note first that $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= (aN_{i-1}-[N_{i-1}/q]M)\mathrm{MOD} (M),\label{eq:rntrick1} \tag{14}\end{equation}$$ since we can add or subtract any integer multiple of $M$ from $aN_{i-1}$.The last term $[N_{i-1}/q]M\mathrm{MOD}(M)$ is zero since the integer division $[N_{i-1}/q]$ just yields a constant which is multiplied with $M$. Random number generator RNG, RAN0We can now rewrite Eq. ([14](eq:rntrick1)) as $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= (aN_{i-1}-[N_{i-1}/q](aq+r))\mathrm{MOD} (M),\label{eq:rntrick2} \tag{15}\end{equation}$$ which resultsin $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= \left(a(N_{i-1}-[N_{i-1}/q]q)-[N_{i-1}/q]r)\right)\mathrm{MOD} (M),\label{eq:rntrick3} \tag{16}\end{equation}$$ yielding $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= \left(a(N_{i-1}\mathrm{MOD} (q)) -[N_{i-1}/q]r)\right)\mathrm{MOD} (M).\label{eq:rntrick4} \tag{17}\end{equation}$$ Random number generator RNG, RAN0The term $[N_{i-1}/q]r$ is always smaller or equal $N_{i-1}(r/q)$ and with $r < q$ we obtain always a number smaller than $N_{i-1}$, which is smaller than $M$. And since the number $N_{i-1}\mathrm{MOD} (q)$ is between zero and $q-1$ then$a(N_{i-1}\mathrm{MOD} (q))< aq$. Combined with our definition of $q=[M/a]$ ensures that this term is also smaller than $M$ meaning that both terms fit into a32-bit signed integer. None of these two terms can be negative, but their difference could.The algorithm below adds $M$ if their difference is negative.Note that the program uses the bitwise $\oplus$ operator to generatethe starting point for each generation of a random number. The periodof $ran0$ is $\sim 2.1\times 10^{9}$. A special feature of thisalgorithm is that is should never be called with the initial seed set to $0$. Random number generator RNG, RAN0 code /* ** The function ** ran0() ** is an "Minimal" random number generator of Park and Miller ** Set or reset the input value ** idum to any integer value (except the unlikely value MASK) ** to initialize the sequence; idum must not be altered between ** calls for sucessive deviates in a sequence. ** The function returns a uniform deviate between 0.0 and 1.0. */ double ran0(long &idum) { const int a = 16807, m = 2147483647, q = 127773; const int r = 2836, MASK = 123459876; const double am = 1./m; long k; double ans; idum ^= MASK; k = (*idum)/q; idum = a*(idum - k*q) - r*k; // add m if negative difference if(idum < 0) idum += m; ans=am*(idum); idum ^= MASK; return ans; } // End: function ran0() Properties of Selected Random Number GeneratorsAs mentioned previously, the underlying PDF for the generation ofrandom numbers is the uniform distribution, meaning that the probability for finding a number $x$ in the interval [0,1] is $p(x)=1$.A random number generator should produce numbers which are uniformly distributedin this interval. The table shows the distribution of $N=10000$ randomnumbers generated by the functions in the program library.We note in this table that the number of points in the variousintervals $0.0-0.1$, $0.1-0.2$ etc are fairly close to $1000$, with some minordeviations. Two additional measures are the standard deviation $\sigma$ and the mean$\mu=\langle x\rangle$. Properties of Selected Random Number GeneratorsFor the uniform distribution, the mean value $\mu$ is then $$\mu=\langle x\rangle=\frac{1}{2}$$ while the standard deviation is $$\sigma=\sqrt{\langle x^2\rangle-\mu^2}=\frac{1}{\sqrt{12}}=0.2886.$$ Properties of Selected Random Number GeneratorsThe various random number generators produce results which agree rather well withthese limiting values. $x$-bin ran0 ran1 ran2 ran3 0.0-0.1 1013 991 938 1047 0.1-0.2 1002 1009 1040 1030 0.2-0.3 989 999 1030 993 0.3-0.4 939 960 1023 937 0.4-0.5 1038 1001 1002 992 0.5-0.6 1037 1047 1009 1009 0.6-0.7 1005 989 1003 989 0.7-0.8 986 962 985 954 0.8-0.9 1000 1027 1009 1023 0.9-1.0 991 1015 961 1026 $\mu$ 0.4997 0.5018 0.4992 0.4990 $\sigma$ 0.2882 0.2892 0.2861 0.2915 Simple demonstration of RNGs using pythonThe following simple Python code plots the distribution of the produced random numbers using the linear congruential RNG employed by Python. The trend displayed in the previous table is seen rather clearly.
###Code
#!/usr/bin/env python
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import random
# initialize the rng with a seed
random.seed()
counts = 10000
values = np.zeros(counts)
for i in range (1, counts, 1):
values[i] = random.random()
# the histogram of the data
n, bins, patches = plt.hist(values, 10, facecolor='green')
plt.xlabel('$x$')
plt.ylabel('Number of counts')
plt.title(r'Test of uniform distribution')
plt.axis([0, 1, 0, 1100])
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Properties of Selected Random Number GeneratorsSince our random numbers, which are typically generated via a linear congruential algorithm,are never fully independent, we can then define an important test which measures the degree of correlation, namely the so-called auto-correlation function defined previously, see again Eq. ([9](eq:autocorrelformal)).We rewrite it here as $$C_k=\frac{f_d} {\sigma^2},$$ with $C_0=1$. Recall that $\sigma^2=\langle x_i^2\rangle-\langle x_i\rangle^2$ and that $$f_d = \frac{1}{nm}\sum_{\alpha=1}^m\sum_{k=1}^{n-d}(x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,k+d}-\langle X_m \rangle),$$ The non-vanishing of $C_k$ for $k\ne 0$ means that the randomnumbers are not independent. The independence of the random numbers is crucial in the evaluation of other expectation values. If they are not independent, ourassumption for approximating $\sigma_N$ is no longer valid. Autocorrelation functionThis program computes the autocorrelation function as discussed in the equation on the previous slide for random numbers generated with the normal distribution $N(0,1)$.
###Code
# Importing various packages
from math import exp, sqrt
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
def autocovariance(x, n, k, mean_x):
sum = 0.0
for i in range(0, n-k):
sum += (x[(i+k)]-mean_x)*(x[i]-mean_x)
return sum/n
n = 1000
x=np.random.normal(size=n)
autocor = np.zeros(n)
figaxis = np.zeros(n)
mean_x=np.mean(x)
var_x = np.var(x)
print(mean_x, var_x)
for i in range (0, n):
figaxis[i] = i
autocor[i]=(autocovariance(x, n, i, mean_x))/var_x
plt.plot(figaxis, autocor, "r-")
plt.axis([0,n,-0.1, 1.0])
plt.xlabel(r'$i$')
plt.ylabel(r'$\gamma_i$')
plt.title(r'Autocorrelation function')
plt.show()
###Output
-0.0032873138755776365 0.9279671770201344
###Markdown
As can be seen from the plot, the first point gives back the variance and a value of one. For the remaining values we notice that there are still non-zero values for the auto-correlation function. Correlation function and which random number generators should I useThe program here computes the correlation function for one of the standard functions included with the c++ compiler. // This function computes the autocorrelation function for // the standard c++ random number generator include include include include using namespace std; // output file as global variable ofstream ofile; // Main function begins here int main(int argc, char* argv[]) { int n; char *outfilename; cin >> n; double MCint = 0.; double MCintsqr2=0.; double invers_period = 1./RAND_MAX; // initialise the random number generator srand(time(NULL)); // This produces the so-called seed in MC jargon // Compute the variance and the mean value of the uniform distribution // Compute also the specific values x for each cycle in order to be able to // the covariance and the correlation function // Read in output file, abort if there are too few command-line arguments if( argc <= 2 ){ cout << "Bad Usage: " << argv[0] << " read also output file and number of cycles on same line" << endl; exit(1); } else{ outfilename=argv[1]; } ofile.open(outfilename); // Get the number of Monte-Carlo samples n = atoi(argv[2]); double *X; X = new double[n]; for (int i = 0; i < n; i++){ double x = double(rand())*invers_period; X[i] = x; MCint += x; MCintsqr2 += x*x; } double Mean = MCint/((double) n ); MCintsqr2 = MCintsqr2/((double) n ); double STDev = sqrt(MCintsqr2-Mean*Mean); double Variance = MCintsqr2-Mean*Mean; // Write mean value and standard deviation cout << " Standard deviation= " << STDev << " Integral = " << Mean << endl; // Now we compute the autocorrelation function double *autocor; autocor = new double[n]; for (int j = 0; j < n; j++){ double sum = 0.0; for (int k = 0; k < (n-j); k++){ sum += (X[k]-Mean)*(X[k+j]-Mean); } autocor[j] = sum/Variance/((double) n ); ofile << setiosflags(ios::showpoint | ios::uppercase); ofile << setw(15) << setprecision(8) << j; ofile << setw(15) << setprecision(8) << autocor[j] << endl; } ofile.close(); // close output file return 0; } // end of main program Which RNG should I use?* C++ has a class called **random**. The [random class](http://www.cplusplus.com/reference/random/) contains a large selection of RNGs and is highly recommended. Some of these RNGs have very large periods making it thereby very safe to use these RNGs in case one is performing large calculations. In particular, the [Mersenne twister random number engine](http://www.cplusplus.com/reference/random/mersenne_twister_engine/) has a period of $2^{19937}$. * Add RNGs in Python How to use the Mersenne generatorThe following part of a c++ code (from project 4) sets up the uniform distribution for $x\in [0,1]$. /* // You need this include // Initialize the seed and call the Mersienne algo std::random_device rd; std::mt19937_64 gen(rd()); // Set up the uniform distribution for x \in [[0, 1] std::uniform_real_distribution RandomNumberGenerator(0.0,1.0); // Now use the RNG int ix = (int) (RandomNumberGenerator(gen)*NSpins); Why blocking?**Statistical analysis.** * Monte Carlo simulations can be treated as *computer experiments* * The results can be analysed with the same statistical tools as we would use analysing experimental data. * As in all experiments, we are looking for expectation values and an estimate of how accurate they are, i.e., possible sources for errors.A very good article which explains blocking is H. Flyvbjerg and H. G. Petersen, *Error estimates on averages of correlated data*, [Journal of Chemical Physics 91, 461-466 (1989)](http://scitation.aip.org/content/aip/journal/jcp/91/1/10.1063/1.457480). Why blocking?**Statistical analysis.** * As in other experiments, Monte Carlo experiments have two classes of errors: * Statistical errors * Systematical errors * Statistical errors can be estimated using standard tools from statistics * Systematical errors are method specific and must be treated differently from case to case. (In VMC a common source is the step length or time step in importance sampling) Code to demonstrate the calculation of the autocorrelation functionThe following code computes the autocorrelation function, the covariance and the standard deviationfor standard RNG. The [following file](https://github.com/CompPhysics/ComputationalPhysics2/tree/gh-pages/doc/Programs/LecturePrograms/programs/Blocking/autocorrelation.cpp) gives the code. // This function computes the autocorrelation function for // the Mersenne random number generator with a uniform distribution include include include include include include include include using namespace std; using namespace arma; // output file ofstream ofile; // Main function begins here int main(int argc, char* argv[]) { int MonteCarloCycles; string filename; if (argc > 1) { filename=argv[1]; MonteCarloCycles = atoi(argv[2]); string fileout = filename; string argument = to_string(MonteCarloCycles); fileout.append(argument); ofile.open(fileout); } // Compute the variance and the mean value of the uniform distribution // Compute also the specific values x for each cycle in order to be able to // compute the covariance and the correlation function vec X = zeros(MonteCarloCycles); double MCint = 0.; double MCintsqr2=0.; std::random_device rd; std::mt19937_64 gen(rd()); // Set up the uniform distribution for x \in [[0, 1] std::uniform_real_distribution RandomNumberGenerator(0.0,1.0); for (int i = 0; i < MonteCarloCycles; i++){ double x = RandomNumberGenerator(gen); X(i) = x; MCint += x; MCintsqr2 += x*x; } double Mean = MCint/((double) MonteCarloCycles ); MCintsqr2 = MCintsqr2/((double) MonteCarloCycles ); double STDev = sqrt(MCintsqr2-Mean*Mean); double Variance = MCintsqr2-Mean*Mean; // Write mean value and variance cout << " Sample variance= " << Variance << " Mean value = " << Mean << endl; // Now we compute the autocorrelation function vec autocorrelation = zeros(MonteCarloCycles); for (int j = 0; j < MonteCarloCycles; j++){ double sum = 0.0; for (int k = 0; k < (MonteCarloCycles-j); k++){ sum += (X(k)-Mean)*(X(k+j)-Mean); } autocorrelation(j) = sum/Variance/((double) MonteCarloCycles ); ofile << setiosflags(ios::showpoint | ios::uppercase); ofile << setw(15) << setprecision(8) << j; ofile << setw(15) << setprecision(8) << autocorrelation(j) << endl; } // Now compute the exact covariance using the autocorrelation function double Covariance = 0.0; for (int j = 0; j < MonteCarloCycles; j++){ Covariance += autocorrelation(j); } Covariance *= 2.0/((double) MonteCarloCycles); // Compute now the total variance, including the covariance, and obtain the standard deviation double TotalVariance = (Variance/((double) MonteCarloCycles ))+Covariance; cout << "Covariance =" << Covariance << "Totalvariance= " << TotalVariance << "Sample Variance/n= " << (Variance/((double) MonteCarloCycles )) << endl; cout << " STD from sample variance= " << sqrt(Variance/((double) MonteCarloCycles )) << " STD with covariance = " << sqrt(TotalVariance) << endl; ofile.close(); // close output file return 0; } // end of main program What is blocking?**Blocking.** * Say that we have a set of samples from a Monte Carlo experiment * Assuming (wrongly) that our samples are uncorrelated our best estimate of the standard deviation of the mean $\langle \mathbf{M}\rangle$ is given by $$\sigma=\sqrt{\frac{1}{n}\left(\langle \mathbf{M}^2\rangle-\langle \mathbf{M}\rangle^2\right)}$$ * If the samples are correlated we can rewrite our results to show that $$\sigma=\sqrt{\frac{1+2\tau/\Delta t}{n}\left(\langle \mathbf{M}^2\rangle-\langle \mathbf{M}\rangle^2\right)}$$ where $\tau$ is the correlation time (the time between a sample and the next uncorrelated sample) and $\Delta t$ is time between each sample What is blocking?**Blocking.** * If $\Delta t\gg\tau$ our first estimate of $\sigma$ still holds * Much more common that $\Delta t<\tau$ * In the method of data blocking we divide the sequence of samples into blocks * We then take the mean $\langle \mathbf{M}_i\rangle$ of block $i=1\ldots n_{blocks}$ to calculate the total mean and variance * The size of each block must be so large that sample $j$ of block $i$ is not correlated with sample $j$ of block $i+1$ * The correlation time $\tau$ would be a good choice What is blocking?**Blocking.** * Problem: We don't know $\tau$ or it is too expensive to compute * Solution: Make a plot of std. dev. as a function of blocksize * The estimate of std. dev. of correlated data is too low $\to$ the error will increase with increasing block size until the blocks are uncorrelated, where we reach a plateau * When the std. dev. stops increasing the blocks are uncorrelated Implementation * Do a Monte Carlo simulation, storing all samples to file * Do the statistical analysis on this file, independently of your Monte Carlo program * Read the file into an array * Loop over various block sizes * For each block size $n_b$, loop over the array in steps of $n_b$ taking the mean of elements $i n_b,\ldots,(i+1) n_b$ * Take the mean and variance of the resulting array * Write the results for each block size to file for later analysis Actual implementation with code, main functionWhen the file gets large, it can be useful to write your data in binary mode instead of ascii characters.The [following python file](https://github.com/CompPhysics/MachineLearning/blob/master/doc/Programs/Sampling/analysis.py) reads data from file with the output from every Monte Carlo cycle.
###Code
# Blocking
@timeFunction
def blocking(self, blockSizeMax = 500):
blockSizeMin = 1
self.blockSizes = []
self.meanVec = []
self.varVec = []
for i in range(blockSizeMin, blockSizeMax):
if(len(self.data) % i != 0):
pass#continue
blockSize = i
meanTempVec = []
varTempVec = []
startPoint = 0
endPoint = blockSize
while endPoint <= len(self.data):
meanTempVec.append(np.average(self.data[startPoint:endPoint]))
startPoint = endPoint
endPoint += blockSize
mean, var = np.average(meanTempVec), np.var(meanTempVec)/len(meanTempVec)
self.meanVec.append(mean)
self.varVec.append(var)
self.blockSizes.append(blockSize)
self.blockingAvg = np.average(self.meanVec[-200:])
self.blockingVar = (np.average(self.varVec[-200:]))
self.blockingStd = np.sqrt(self.blockingVar)
###Output
_____no_output_____ |
09-object_oriented_programming_part-1.ipynb | ###Markdown
Object Oriented Programming Oop allows user to create their own objects. .method_name() -- Syntax
###Code
l=[2,3,4,5,6,7,8]
l.append(1)
l.sort()#This are methods which act as function
#Here list is a object
print(l)
###Output
[1, 2, 3, 4, 5, 6, 7, 8]
###Markdown
Syntax :
###Code
# Hashtag are used to write comment.
# class NameOfClass():
#def __init__(self,para1,para2):
#self.para1=para1
#self.para2=para2
#def any_method(self):
#print(self.para1)
# lets create our own class
class Cricket():
pass
my_class=Cricket() #We created instance of class
type(my_class)
###Output
_____no_output_____
###Markdown
Attributes Attributes are nothing but characteristic of object
###Code
class AdditionOp():
#Class object attribute
input= 'int or float'
def __init__(self,a,b):
#Attributes: Characteristic of object
# We take in parameter a and b
# we assign it using 'self' keyword
# self.attribute_name=parameter
self.num1=a
self.num2=b
print(self.num1+self.num2)
my_num=AdditionOp(b=3,a=4)
type(my_num)
my_num.num1
my_num.input
###Output
_____no_output_____
###Markdown
Methods Methods are nothing but function that are defined in the body of the class(object).
###Code
class MathOp:
def __init__(self,a=1,b=1):
self.a=a
self.b=b
def addition(self):
print(self.a+self.b)
def subtraction(self):
print(self.a-self.b)
def multiplication(self):
print(self.a*self.b)
def division(self,a,b):
print(self.a/self.b)
print(a/b)
c=MathOp(2,4)
c.multiplication()
#c.a() 'a' is attribute not method so we cant use '()'
c.a
c.subtraction()
c.division(50,10)
help(MathOp)
###Output
Help on class MathOp in module __main__:
class MathOp(builtins.object)
| MathOp(a, b)
|
| Methods defined here:
|
| __init__(self, a, b)
| Initialize self. See help(type(self)) for accurate signature.
|
| addition(self)
|
| division(self, a, b)
|
| multiplication(self)
|
| subtraction(self)
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Inheritance
###Code
class Father():
def __init__(self):
print('I am your Father')
def height(self,ht):
print('My height is {}'.format(ht))
def weight(self,weight):
print('My weight is {}'.format(weight))
def age(self,age):
print('My age is {}'.format(age))
class Son(Father):
def __init__(self):
Father.__init__(self)
print('This is me!')
def weight(self,wt):
print('My weight is {}'.format())
me=Son()
me.age(19)
me.height('6.9')
me.weight(65)
###Output
My weight is 65
|
training_data.ipynb | ###Markdown
Training DataThis notebook creates a simulation database from an ensemble of atmospheric states. Since the purpose of this data is to verify and evaluate the QRNN and BMCI methods, it is necessary that the prior distribution on the atmospheric states can be expressed analytically. To allow for this, the ensemble is generated from the distributions fitted to the ERA Interim data for northern hemisphere, mid-latititude data from 2016. The corresponding code can be found in the `era_interim_climatology.ipynb` notebook
###Code
%env ARTS_INCLUDE_PATH=/home/simonpf/src/atms_retrievals:/home/simonpf/src/arts_clean/controlfiles/
%env ARTS_DATA_PATH=/home/simonpf/src/arts_xml/
%env ARTS_BUILD_PATH=/home/simonpf/build/arts/
%load_ext autoreload
%autoreload 2
import scipy as sc
import numpy as np
%matplotlib inline
import matplotlib_settings
import matplotlib.pyplot as plt
from typhon.arts.workspace import Workspace
import atms
###Output
env: ARTS_INCLUDE_PATH=/home/simonpf/src/atms_retrievals:/home/simonpf/src/arts_clean/controlfiles/
env: ARTS_DATA_PATH=/home/simonpf/src/arts_xml/
env: ARTS_BUILD_PATH=/home/simonpf/build/arts/
Loading ARTS API from: /home/simonpf/build/arts/src/libarts_api.so
###Markdown
ARTS SetupFor the basic simulation, the following ATMS channels are used:| Channel Index | Frequency | Polarization ||---------------|--------------------|--------------|| 0 | $23$ GHz | H || 15 | $88$ GHz | H || 16 | $165$ GHz | H || 17 | $183 \pm 7$ GHz | H || 19 | $183 \pm 3$ GHz | H |
###Code
dataset = "summer"
suffix = ""
if not dataset == "":
suffix = "_" + dataset
channels = [0, 15, 16, 17, 19]
ws = Workspace()
atms.setup_atmosphere(ws, dataset = dataset)
atms.setup_sensor(ws, channels)
atms.checks(ws)
ws.jacobianOff()
###Output
ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/general.arts
ARTS[53396448]: {
ARTS[53396448]: - verbosityInit
ARTS[53396448]: - scat_speciesSet
ARTS[53396448]: - MatrixSet
ARTS[53396448]: - Tensor4SetConstant
ARTS[53396448]: - ArrayOfStringSet
ARTS[53396448]: - Touch
ARTS[53396448]: - FlagOff
ARTS[53396448]: - MatrixSet
ARTS[53396448]: - NumericSet
ARTS[53396448]: - ArrayOfStringSet
ARTS[53396448]: - Tensor3SetConstant
ARTS[53396448]: - Tensor3SetConstant
ARTS[53396448]: - Tensor3SetConstant
ARTS[53396448]: - Tensor3SetConstant
ARTS[53396448]: - Tensor3SetConstant
ARTS[53396448]: - Tensor3SetConstant
ARTS[53396448]: - IndexSet
ARTS[53396448]: - IndexSet
ARTS[53396448]: - IndexSet
ARTS[53396448]: - IndexSet
ARTS[53396448]: - FlagOff
ARTS[53396448]: - output_file_formatSetAscii
ARTS[53396448]: - StringSet
ARTS[53396448]: - IndexSet
ARTS[53396448]: - abs_lineshapeDefine
ARTS[53396448]: - NumericSet
ARTS[53396448]: - NumericSet
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - IndexSet
ARTS[53396448]: - IndexSet
ARTS[53396448]: - NumericSet
ARTS[53396448]: - NumericSet
ARTS[53396448]: - nlteOff
ARTS[53396448]: - partition_functionsInitFromBuiltin
ARTS[53396448]: - IndexSet
ARTS[53396448]: }
ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/continua.arts
ARTS[53396448]: {
ARTS[53396448]: - abs_cont_descriptionInit
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: - abs_cont_descriptionAppend
ARTS[53396448]: }
ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/agendas.arts
ARTS[53396448]: {
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - FlagOff
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - FlagOff
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - AgendaCreate
ARTS[53396448]: - AgendaSet
ARTS[53396448]: }
ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/planet_earth.arts
ARTS[53396448]: {
ARTS[53396448]: - isotopologue_ratiosInitFromBuiltin
ARTS[53396448]: - refellipsoidEarth
ARTS[53396448]: - NumericSet
ARTS[53396448]: - AgendaSet
ARTS[53396448]: - NumericSet
ARTS[53396448]: }
ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/instruments/metmm/sensor_descriptions/prepare_metmm.arts
ARTS[53396448]: {
ARTS[53396448]: - ArrayOfArrayOfIndexCreate
ARTS[53396448]: - ArrayOfIndexCreate
ARTS[53396448]: - VectorCreate
ARTS[53396448]: - ArrayOfIndexCreate
ARTS[53396448]: - NumericCreate
ARTS[53396448]: - VectorCreate
ARTS[53396448]: - IndexCreate
ARTS[53396448]: }
ARTS[53396448]: Executing /home/simonpf/src/atms_simulations/sensor_atms.arts
ARTS[53396448]: {
ARTS[53396448]: - MatrixSet
ARTS[53396448]: - MatrixSet
ARTS[53396448]: - ArrayOfStringSet
ARTS[53396448]: - VectorSet
ARTS[53396448]: - ArrayOfIndexSet
ARTS[53396448]: - VectorSet
ARTS[53396448]: - Extract
ARTS[53396448]: - nrowsGet
ARTS[53396448]: - VectorSetConstant
ARTS[53396448]: - Delete
ARTS[53396448]: }
ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/instruments/metmm/sensor_descriptions/apply_metmm.arts
ARTS[53396448]: {
ARTS[53396448]: - Select
ARTS[53396448]: - Select
ARTS[53396448]: - Select
ARTS[53396448]: - Select
ARTS[53396448]: - Select
ARTS[53396448]: - f_gridMetMM
ARTS[53396448]: - sensor_responseMetMM
ARTS[53396448]: }
###Markdown
Sampling the a priori
###Code
dist = atms.StateDistribution(dataset = "summer")
###Output
_____no_output_____
###Markdown
To generate the training data, we simply sample from the a priori distribution, computed the integrated column water vapor and then simulate the corresponding brightness temperatures.
###Code
def sample_a_priori(ws, n_samples):
cwv = np.zeros(n_samples)
y = np.zeros((n_samples, len(channels)))
q_profiles = np.zeros((n_samples, ws.p_grid.value.shape[0]))
t_profiles = np.zeros((n_samples, ws.p_grid.value.shape[0]))
p = ws.p_grid.value
for i in range(n_samples):
dist.sample(ws)
ws.yCalc()
cwv[i] = atms.vmr2cd(ws)
q_profiles[i,:] = np.copy(ws.vmr_field.value[0, :, 0, 0].ravel())
t_profiles[i, :] = np.copy(ws.t_field.value[:, 0, 0].ravel())
ws.sst = np.maximum(ws.t_field.value[0, 0, 0], 270.0)
y[i] = np.copy(ws.y.value)
if i % 1000 == 0:
print("progress: " + str(i))
return y, cwv, q_profiles, t_profiles
###Output
_____no_output_____
###Markdown
Training DataFor the training data $10^6$ samples from the joint a priori distribution of water vapor and temperature profiles and corresponding brightness temperatures are generated.
###Code
y, cwv, q_profiles, t_profiles = sample_a_priori(ws, 1000000)
np.save("data/x_train" + str(len(channels)) + suffix, y)
np.save("data/y_train" + str(len(channels)) + suffix, cwv)
###Output
progress: 0
progress: 1000
progress: 2000
progress: 3000
progress: 4000
progress: 5000
progress: 6000
progress: 7000
progress: 8000
progress: 9000
progress: 10000
progress: 11000
progress: 12000
progress: 13000
progress: 14000
progress: 15000
progress: 16000
progress: 17000
progress: 18000
progress: 19000
progress: 20000
progress: 21000
progress: 22000
progress: 23000
progress: 24000
progress: 25000
progress: 26000
progress: 27000
progress: 28000
progress: 29000
progress: 30000
progress: 31000
progress: 32000
progress: 33000
progress: 34000
progress: 35000
progress: 36000
progress: 37000
progress: 38000
progress: 39000
progress: 40000
progress: 41000
progress: 42000
progress: 43000
progress: 44000
progress: 45000
progress: 46000
progress: 47000
progress: 48000
progress: 49000
progress: 50000
progress: 51000
progress: 52000
progress: 53000
progress: 54000
progress: 55000
progress: 56000
progress: 57000
progress: 58000
progress: 59000
progress: 60000
progress: 61000
progress: 62000
progress: 63000
progress: 64000
progress: 65000
progress: 66000
progress: 67000
progress: 68000
progress: 69000
progress: 70000
progress: 71000
progress: 72000
progress: 73000
progress: 74000
progress: 75000
progress: 76000
progress: 77000
progress: 78000
progress: 79000
progress: 80000
progress: 81000
progress: 82000
progress: 83000
progress: 84000
progress: 85000
progress: 86000
progress: 87000
progress: 88000
progress: 89000
progress: 90000
progress: 91000
progress: 92000
progress: 93000
progress: 94000
progress: 95000
progress: 96000
progress: 97000
progress: 98000
progress: 99000
progress: 100000
progress: 101000
progress: 102000
progress: 103000
progress: 104000
progress: 105000
progress: 106000
progress: 107000
progress: 108000
progress: 109000
progress: 110000
progress: 111000
progress: 112000
progress: 113000
progress: 114000
progress: 115000
progress: 116000
progress: 117000
progress: 118000
progress: 119000
progress: 120000
progress: 121000
progress: 122000
progress: 123000
progress: 124000
progress: 125000
progress: 126000
progress: 127000
progress: 128000
progress: 129000
progress: 130000
progress: 131000
progress: 132000
progress: 133000
progress: 134000
progress: 135000
progress: 136000
progress: 137000
progress: 138000
progress: 139000
progress: 140000
progress: 141000
progress: 142000
progress: 143000
progress: 144000
progress: 145000
progress: 146000
progress: 147000
progress: 148000
progress: 149000
progress: 150000
progress: 151000
progress: 152000
progress: 153000
progress: 154000
progress: 155000
progress: 156000
progress: 157000
progress: 158000
progress: 159000
progress: 160000
progress: 161000
progress: 162000
progress: 163000
progress: 164000
progress: 165000
progress: 166000
progress: 167000
progress: 168000
progress: 169000
progress: 170000
progress: 171000
progress: 172000
progress: 173000
progress: 174000
progress: 175000
progress: 176000
progress: 177000
progress: 178000
progress: 179000
progress: 180000
progress: 181000
progress: 182000
progress: 183000
progress: 184000
progress: 185000
progress: 186000
progress: 187000
progress: 188000
progress: 189000
progress: 190000
progress: 191000
progress: 192000
progress: 193000
progress: 194000
progress: 195000
progress: 196000
progress: 197000
progress: 198000
progress: 199000
progress: 200000
progress: 201000
progress: 202000
progress: 203000
progress: 204000
progress: 205000
progress: 206000
progress: 207000
progress: 208000
progress: 209000
progress: 210000
progress: 211000
progress: 212000
progress: 213000
progress: 214000
progress: 215000
progress: 216000
progress: 217000
progress: 218000
progress: 219000
progress: 220000
progress: 221000
progress: 222000
progress: 223000
progress: 224000
progress: 225000
progress: 226000
progress: 227000
progress: 228000
progress: 229000
progress: 230000
progress: 231000
progress: 232000
progress: 233000
progress: 234000
progress: 235000
progress: 236000
progress: 237000
progress: 238000
progress: 239000
progress: 240000
progress: 241000
progress: 242000
progress: 243000
progress: 244000
progress: 245000
progress: 246000
progress: 247000
progress: 248000
progress: 249000
progress: 250000
progress: 251000
progress: 252000
progress: 253000
progress: 254000
progress: 255000
progress: 256000
progress: 257000
progress: 258000
progress: 259000
progress: 260000
progress: 261000
progress: 262000
progress: 263000
progress: 264000
progress: 265000
progress: 266000
progress: 267000
progress: 268000
progress: 269000
progress: 270000
progress: 271000
progress: 272000
progress: 273000
progress: 274000
progress: 275000
progress: 276000
progress: 277000
progress: 278000
progress: 279000
progress: 280000
progress: 281000
progress: 282000
progress: 283000
progress: 284000
progress: 285000
progress: 286000
progress: 287000
progress: 288000
progress: 289000
progress: 290000
progress: 291000
progress: 292000
progress: 293000
progress: 294000
progress: 295000
progress: 296000
progress: 297000
progress: 298000
progress: 299000
progress: 300000
progress: 301000
progress: 302000
progress: 303000
progress: 304000
progress: 305000
progress: 306000
progress: 307000
progress: 308000
progress: 309000
progress: 310000
progress: 311000
progress: 312000
progress: 313000
progress: 314000
progress: 315000
progress: 316000
progress: 317000
progress: 318000
progress: 319000
progress: 320000
progress: 321000
progress: 322000
progress: 323000
progress: 324000
progress: 325000
progress: 326000
progress: 327000
progress: 328000
progress: 329000
progress: 330000
progress: 331000
progress: 332000
progress: 333000
progress: 334000
progress: 335000
progress: 336000
progress: 337000
progress: 338000
progress: 339000
progress: 340000
progress: 341000
progress: 342000
progress: 343000
progress: 344000
progress: 345000
progress: 346000
progress: 347000
progress: 348000
progress: 349000
progress: 350000
progress: 351000
progress: 352000
progress: 353000
progress: 354000
progress: 355000
progress: 356000
progress: 357000
progress: 358000
progress: 359000
progress: 360000
progress: 361000
progress: 362000
progress: 363000
progress: 364000
progress: 365000
progress: 366000
progress: 367000
progress: 368000
progress: 369000
progress: 370000
progress: 371000
progress: 372000
progress: 373000
progress: 374000
progress: 375000
progress: 376000
progress: 377000
progress: 378000
progress: 379000
progress: 380000
progress: 381000
progress: 382000
progress: 383000
progress: 384000
progress: 385000
progress: 386000
progress: 387000
progress: 388000
progress: 389000
progress: 390000
progress: 391000
progress: 392000
progress: 393000
progress: 394000
progress: 395000
progress: 396000
progress: 397000
progress: 398000
progress: 399000
progress: 400000
progress: 401000
progress: 402000
progress: 403000
progress: 404000
progress: 405000
progress: 406000
progress: 407000
progress: 408000
progress: 409000
progress: 410000
progress: 411000
progress: 412000
progress: 413000
progress: 414000
progress: 415000
progress: 416000
progress: 417000
progress: 418000
progress: 419000
progress: 420000
progress: 421000
progress: 422000
progress: 423000
progress: 424000
progress: 425000
progress: 426000
progress: 427000
progress: 428000
progress: 429000
progress: 430000
progress: 431000
progress: 432000
progress: 433000
progress: 434000
progress: 435000
progress: 436000
progress: 437000
progress: 438000
progress: 439000
progress: 440000
progress: 441000
progress: 442000
progress: 443000
progress: 444000
progress: 445000
progress: 446000
progress: 447000
progress: 448000
progress: 449000
progress: 450000
progress: 451000
progress: 452000
progress: 453000
progress: 454000
progress: 455000
progress: 456000
progress: 457000
progress: 458000
progress: 459000
progress: 460000
progress: 461000
progress: 462000
progress: 463000
progress: 464000
progress: 465000
progress: 466000
progress: 467000
progress: 468000
progress: 469000
progress: 470000
progress: 471000
progress: 472000
progress: 473000
progress: 474000
progress: 475000
progress: 476000
progress: 477000
progress: 478000
progress: 479000
progress: 480000
progress: 481000
progress: 482000
progress: 483000
progress: 484000
progress: 485000
progress: 486000
progress: 487000
progress: 488000
###Markdown
Test DataFor the test data $10^5$ samples from the joint a priori distribution of water vapor and temperature profils and corresponding brightness temperatures are generated.
###Code
y, cwv, q_profiles, t_profiles = sample_a_priori(ws, 1000000)
np.save("data/x_train" + str(len(channels)) + suffix, y)
np.save("data/y_train" + str(len(channels)) + suffix, cwv)
###Output
_____no_output_____
###Markdown
Statistics
###Code
q_mean = np.load("data/q_mean.npy")
t_mean = np.load("data/t_mean.npy")
f, axs = plt.subplots(1, 2)
ps = np.arange(1, 28)[::-1]
axs[0].set_ylabel("Pressure Level")
axs[0].set_xlabel("q [kg / kg]")
axs[0].set_title("Specific Humidity")
axs[0].invert_yaxis()
axs[0].plot(q_mean.ravel()[::-1], p, c = 'b')
for i in range(1000):
ind = np.random.randint(0, q_profiles.shape[0])
axs[0].plot(q_profiles[ind, :].ravel() / 28.0 * 18.0, p, c = 'b', alpha = 0.01)
axs[0].set_xlim([-0.001, 0.03])
axs[1].set_ylabel("Pressure Level")
axs[1].set_xlabel("t [K]")
axs[1].set_title("Temperature")
axs[1].invert_yaxis()
axs[1].plot(t_mean.ravel()[::-1], p, c = 'r')
for i in range(1000):
ind = np.random.randint(0, t_profiles.shape[0])
axs[1].plot(t_profiles[ind, :].ravel(), p, c = 'r', alpha = 0.01)
axs[1].set_xlim([180, 320])
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Distribution of Brightness Temperatures
###Code
f, axs = plt.subplots(len(channels) // 2 + 1, 2)
for i, ax in enumerate([ax for l in axs for ax in l]):
if i >= len(channels):
ax.set_visible(False)
else:
bins = np.linspace(y[:,i].min(), y[:,i].max(), 41)
ax.hist(y[:,i], normed=True, bins=bins)
ax.set_xlabel("$T_B$")
f = ws.y_f.value[i] * 1e-9
ax.set_title("Channel " + str(i) + ", " + str(f) + " GHz")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
CWV Distribution
###Code
from netCDF4 import Dataset
rootgrp = Dataset("era_interim_mid_latitudes_2016_sst_cwv.nc")
cwv_grid = rootgrp.variables['tcwv']
bins = np.logspace(-1, 2.5, 51)
plt.hist(np.asarray(cwv_grid).ravel(),
bins = bins,
label = "ERA Interim",
alpha = 0.7,
normed = True)
plt.hist(cwv,
bins = bins,
alpha = 0.7,
normed = True,
label = "Fitted")
plt.xscale("log")
plt.xlabel("CWV [$kg / m^2$]")
plt.title("CWV Distribution")
###Output
_____no_output_____ |
Character_level_LSTM/Character_Level_RNN_Exercise.ipynb | ###Markdown
Character-Level LSTM in PyTorchIn this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.
###Code
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
###Output
_____no_output_____
###Markdown
Load in DataThen, we'll load the Anna Karenina text file and convert it into integers for our network to use.
###Code
# open text file and read in data as `text`
with open('../input/data.txt', 'r') as f:
text = f.read()
###Output
_____no_output_____
###Markdown
Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
###Code
text[:100]
###Output
_____no_output_____
###Markdown
TokenizationIn the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
###Code
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
###Output
_____no_output_____
###Markdown
And we can see those same characters from above, encoded as integers.
###Code
encoded[:100]
###Output
_____no_output_____
###Markdown
Pre-processing the dataAs you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
###Code
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1, 2]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
batch_size_total = batch_size*seq_length
# Get the number of batches we can make
n_batches = len(arr)//(batch_size_total)
# Keep only enough characters to make full batches
arr = arr[:batch_size_total*n_batches]
#Reshape into batch_size rows
arr = arr.reshape(batch_size,-1)
## Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:,n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:,:-1],y[:,-1] = x[:,1:], arr[:,n+seq_length]
except IndexError:
y[:,:-1],y[:,-1] = x[:,1:], arr[:,0]
yield x, y
###Output
_____no_output_____
###Markdown
Test Your ImplementationNow I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
###Code
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
###Output
x
[[ 20 94 67 96 29 98 72 13 13 27]
[ 94 113 67 49 72 94 110 72 38 29]
[ 72 37 94 25 15 94 113 81 72 74]
[ 15 94 15 113 51 29 98 112 72 94]
[ 99 32 15 72 38 12 52 29 51 29]
[112 12 32 72 94 67 29 72 94 110]
[110 38 12 25 72 94 81 97 72 105]
[ 12 81 72 52 38 29 81 72 38 29]]
y
[[ 94 67 96 29 98 72 13 13 27 67]
[113 67 49 72 94 110 72 38 29 72]
[ 37 94 25 15 94 113 81 72 74 62]
[ 94 15 113 51 29 98 112 72 94 81]
[ 32 15 72 38 12 52 29 51 29 67]
[ 12 32 72 94 67 29 72 94 110 16]
[ 38 12 25 72 94 81 97 72 105 12]
[ 81 72 52 38 29 81 72 38 29 72]]
###Markdown
If you implemented `get_batches` correctly, the above output should look something like ```x [[25 8 60 11 45 27 28 73 1 2] [17 7 20 73 45 8 60 45 73 60] [27 20 80 73 7 28 73 60 73 65] [17 73 45 8 27 73 66 8 46 27] [73 17 60 12 73 8 27 28 73 45] [66 64 17 17 46 7 20 73 60 20] [73 76 20 20 60 73 8 60 80 73] [47 35 43 7 20 17 24 50 37 73]]y [[ 8 60 11 45 27 28 73 1 2 2] [ 7 20 73 45 8 60 45 73 60 45] [20 80 73 7 28 73 60 73 65 7] [73 45 8 27 73 66 8 46 27 65] [17 60 12 73 8 27 28 73 45 27] [64 17 17 46 7 20 73 60 20 80] [76 20 20 60 73 8 60 80 73 17] [35 43 7 20 17 24 50 37 73 36]] ``` although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`. --- Defining the network with PyTorchBelow is where you'll define the network.Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters. Model StructureIn `__init__` the suggested structure is as follows:* Create and store the necessary dictionaries (this has been done for you)* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)* Define a dropout layer with `drop_prob`* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)* Finally, initialize the weights (again, this has been given)Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
###Code
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
# defining the layers of the model
self.lstm = nn.LSTM(len(self.chars),n_hidden,n_layers,dropout=drop_prob,batch_first=True)
self.dropout = nn.Dropout(drop_prob)
self.fc = nn.Linear(n_hidden,len(self.chars))
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
##Getting the outputs and the new hidden state from the lstm
r_output, hidden = self.lstm(x, hidden)
## passing through the dropout layer
out = self.dropout(r_output)
# Stack up LSTM outputs using view
# you may need to use contiguous to reshape the output
out = out.contiguous().view(-1, self.n_hidden)
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
###Output
_____no_output_____
###Markdown
Time to trainA couple of details about training: >* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
###Code
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length).long())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length).long())
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
###Output
_____no_output_____
###Markdown
Instantiating the modelNow we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
###Code
## TODO: set your model hyperparameters
# define and print the net
n_hidden= 512
n_layers= 2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
###Output
CharRNN(
(lstm): LSTM(117, 512, num_layers=2, batch_first=True, dropout=0.5)
(dropout): Dropout(p=0.5, inplace=False)
(fc): Linear(in_features=512, out_features=117, bias=True)
)
###Markdown
Set your training hyperparameters!
###Code
batch_size = 256
seq_length = 120
n_epochs = 25
# train the model
train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)
###Output
Epoch: 1/25... Step: 10... Loss: 2.7027... Val Loss: 2.6824
Epoch: 1/25... Step: 20... Loss: 2.6177... Val Loss: 2.6227
Epoch: 1/25... Step: 30... Loss: 2.5845... Val Loss: 2.6054
Epoch: 1/25... Step: 40... Loss: 2.5659... Val Loss: 2.5750
Epoch: 1/25... Step: 50... Loss: 2.5402... Val Loss: 2.5535
Epoch: 1/25... Step: 60... Loss: 2.5080... Val Loss: 2.5313
Epoch: 1/25... Step: 70... Loss: 2.5019... Val Loss: 2.5111
Epoch: 1/25... Step: 80... Loss: 2.4924... Val Loss: 2.4885
Epoch: 1/25... Step: 90... Loss: 2.4727... Val Loss: 2.4664
Epoch: 1/25... Step: 100... Loss: 2.4444... Val Loss: 2.4445
Epoch: 1/25... Step: 110... Loss: 2.4233... Val Loss: 2.4268
Epoch: 1/25... Step: 120... Loss: 2.3961... Val Loss: 2.4005
Epoch: 1/25... Step: 130... Loss: 2.3715... Val Loss: 2.3815
Epoch: 1/25... Step: 140... Loss: 2.3392... Val Loss: 2.3610
Epoch: 1/25... Step: 150... Loss: 2.3326... Val Loss: 2.3391
Epoch: 1/25... Step: 160... Loss: 2.2963... Val Loss: 2.3131
Epoch: 2/25... Step: 170... Loss: 2.2897... Val Loss: 2.2997
Epoch: 2/25... Step: 180... Loss: 2.2875... Val Loss: 2.2722
Epoch: 2/25... Step: 190... Loss: 2.2474... Val Loss: 2.2482
Epoch: 2/25... Step: 200... Loss: 2.2288... Val Loss: 2.2272
Epoch: 2/25... Step: 210... Loss: 2.2274... Val Loss: 2.2057
Epoch: 2/25... Step: 220... Loss: 2.2042... Val Loss: 2.1907
Epoch: 2/25... Step: 230... Loss: 2.1786... Val Loss: 2.1692
Epoch: 2/25... Step: 240... Loss: 2.1658... Val Loss: 2.1568
Epoch: 2/25... Step: 250... Loss: 2.1488... Val Loss: 2.1422
Epoch: 2/25... Step: 260... Loss: 2.1354... Val Loss: 2.1216
Epoch: 2/25... Step: 270... Loss: 2.1022... Val Loss: 2.0986
Epoch: 2/25... Step: 280... Loss: 2.1048... Val Loss: 2.0813
Epoch: 2/25... Step: 290... Loss: 2.0978... Val Loss: 2.0613
Epoch: 2/25... Step: 300... Loss: 2.0543... Val Loss: 2.0453
Epoch: 2/25... Step: 310... Loss: 2.0531... Val Loss: 2.0322
Epoch: 2/25... Step: 320... Loss: 2.0295... Val Loss: 2.0179
Epoch: 2/25... Step: 330... Loss: 2.0371... Val Loss: 2.0021
Epoch: 3/25... Step: 340... Loss: 2.0115... Val Loss: 1.9830
Epoch: 3/25... Step: 350... Loss: 1.9992... Val Loss: 1.9713
Epoch: 3/25... Step: 360... Loss: 1.9732... Val Loss: 1.9597
Epoch: 3/25... Step: 370... Loss: 1.9483... Val Loss: 1.9503
Epoch: 3/25... Step: 380... Loss: 1.9475... Val Loss: 1.9410
Epoch: 3/25... Step: 390... Loss: 1.9256... Val Loss: 1.9272
Epoch: 3/25... Step: 400... Loss: 1.9217... Val Loss: 1.9188
Epoch: 3/25... Step: 410... Loss: 1.9241... Val Loss: 1.9094
Epoch: 3/25... Step: 420... Loss: 1.8889... Val Loss: 1.8912
Epoch: 3/25... Step: 430... Loss: 1.8968... Val Loss: 1.8813
Epoch: 3/25... Step: 440... Loss: 1.9008... Val Loss: 1.8706
Epoch: 3/25... Step: 450... Loss: 1.8894... Val Loss: 1.8594
Epoch: 3/25... Step: 460... Loss: 1.8675... Val Loss: 1.8482
Epoch: 3/25... Step: 470... Loss: 1.8425... Val Loss: 1.8400
Epoch: 3/25... Step: 480... Loss: 1.8424... Val Loss: 1.8291
Epoch: 3/25... Step: 490... Loss: 1.8323... Val Loss: 1.8236
Epoch: 4/25... Step: 500... Loss: 1.8096... Val Loss: 1.8092
Epoch: 4/25... Step: 510... Loss: 1.8081... Val Loss: 1.7995
Epoch: 4/25... Step: 520... Loss: 1.8032... Val Loss: 1.7935
Epoch: 4/25... Step: 530... Loss: 1.8034... Val Loss: 1.7838
Epoch: 4/25... Step: 540... Loss: 1.8199... Val Loss: 1.7794
Epoch: 4/25... Step: 550... Loss: 1.7959... Val Loss: 1.7711
Epoch: 4/25... Step: 560... Loss: 1.7604... Val Loss: 1.7649
Epoch: 4/25... Step: 570... Loss: 1.7840... Val Loss: 1.7575
Epoch: 4/25... Step: 580... Loss: 1.7640... Val Loss: 1.7480
Epoch: 4/25... Step: 590... Loss: 1.7518... Val Loss: 1.7380
Epoch: 4/25... Step: 600... Loss: 1.7396... Val Loss: 1.7302
Epoch: 4/25... Step: 610... Loss: 1.7432... Val Loss: 1.7241
Epoch: 4/25... Step: 620... Loss: 1.7486... Val Loss: 1.7134
Epoch: 4/25... Step: 630... Loss: 1.7208... Val Loss: 1.7080
Epoch: 4/25... Step: 640... Loss: 1.7269... Val Loss: 1.7046
Epoch: 4/25... Step: 650... Loss: 1.7149... Val Loss: 1.7026
Epoch: 4/25... Step: 660... Loss: 1.7428... Val Loss: 1.6929
Epoch: 5/25... Step: 670... Loss: 1.7061... Val Loss: 1.6801
Epoch: 5/25... Step: 680... Loss: 1.7054... Val Loss: 1.6741
Epoch: 5/25... Step: 690... Loss: 1.6786... Val Loss: 1.6696
Epoch: 5/25... Step: 700... Loss: 1.6813... Val Loss: 1.6688
Epoch: 5/25... Step: 710... Loss: 1.6827... Val Loss: 1.6620
Epoch: 5/25... Step: 720... Loss: 1.6694... Val Loss: 1.6584
Epoch: 5/25... Step: 730... Loss: 1.6741... Val Loss: 1.6535
Epoch: 5/25... Step: 740... Loss: 1.6656... Val Loss: 1.6439
Epoch: 5/25... Step: 750... Loss: 1.6308... Val Loss: 1.6358
Epoch: 5/25... Step: 760... Loss: 1.6405... Val Loss: 1.6241
Epoch: 5/25... Step: 770... Loss: 1.6570... Val Loss: 1.6246
Epoch: 5/25... Step: 780... Loss: 1.6685... Val Loss: 1.6164
Epoch: 5/25... Step: 790... Loss: 1.6337... Val Loss: 1.6096
Epoch: 5/25... Step: 800... Loss: 1.6238... Val Loss: 1.6058
Epoch: 5/25... Step: 810... Loss: 1.6263... Val Loss: 1.6020
Epoch: 5/25... Step: 820... Loss: 1.6188... Val Loss: 1.5990
Epoch: 6/25... Step: 830... Loss: 1.5977... Val Loss: 1.5912
Epoch: 6/25... Step: 840... Loss: 1.5994... Val Loss: 1.5853
Epoch: 6/25... Step: 850... Loss: 1.6057... Val Loss: 1.5810
Epoch: 6/25... Step: 860... Loss: 1.6177... Val Loss: 1.5761
Epoch: 6/25... Step: 870... Loss: 1.6297... Val Loss: 1.5729
Epoch: 6/25... Step: 880... Loss: 1.5974... Val Loss: 1.5833
Epoch: 6/25... Step: 890... Loss: 1.5752... Val Loss: 1.5733
Epoch: 6/25... Step: 900... Loss: 1.6096... Val Loss: 1.5598
Epoch: 6/25... Step: 910... Loss: 1.5782... Val Loss: 1.5509
Epoch: 6/25... Step: 920... Loss: 1.5703... Val Loss: 1.5467
Epoch: 6/25... Step: 930... Loss: 1.5620... Val Loss: 1.5478
Epoch: 6/25... Step: 940... Loss: 1.5694... Val Loss: 1.5377
Epoch: 6/25... Step: 950... Loss: 1.5695... Val Loss: 1.5356
Epoch: 6/25... Step: 960... Loss: 1.5567... Val Loss: 1.5313
Epoch: 6/25... Step: 970... Loss: 1.5632... Val Loss: 1.5305
Epoch: 6/25... Step: 980... Loss: 1.5533... Val Loss: 1.5306
Epoch: 6/25... Step: 990... Loss: 1.5906... Val Loss: 1.5285
Epoch: 7/25... Step: 1000... Loss: 1.5538... Val Loss: 1.5168
Epoch: 7/25... Step: 1010... Loss: 1.5559... Val Loss: 1.5157
Epoch: 7/25... Step: 1020... Loss: 1.5247... Val Loss: 1.5112
Epoch: 7/25... Step: 1030... Loss: 1.5299... Val Loss: 1.5093
Epoch: 7/25... Step: 1040... Loss: 1.5427... Val Loss: 1.5029
Epoch: 7/25... Step: 1050... Loss: 1.5288... Val Loss: 1.5059
Epoch: 7/25... Step: 1060... Loss: 1.5293... Val Loss: 1.5028
Epoch: 7/25... Step: 1070... Loss: 1.5208... Val Loss: 1.5046
Epoch: 7/25... Step: 1080... Loss: 1.4898... Val Loss: 1.4965
Epoch: 7/25... Step: 1090... Loss: 1.5087... Val Loss: 1.4880
Epoch: 7/25... Step: 1100... Loss: 1.5210... Val Loss: 1.4878
Epoch: 7/25... Step: 1110... Loss: 1.5333... Val Loss: 1.4853
Epoch: 7/25... Step: 1120... Loss: 1.5132... Val Loss: 1.4885
Epoch: 7/25... Step: 1130... Loss: 1.4978... Val Loss: 1.4914
Epoch: 7/25... Step: 1140... Loss: 1.5032... Val Loss: 1.4834
Epoch: 7/25... Step: 1150... Loss: 1.4936... Val Loss: 1.4751
Epoch: 8/25... Step: 1160... Loss: 1.4869... Val Loss: 1.4713
Epoch: 8/25... Step: 1170... Loss: 1.4907... Val Loss: 1.4749
Epoch: 8/25... Step: 1180... Loss: 1.4868... Val Loss: 1.4764
Epoch: 8/25... Step: 1190... Loss: 1.5036... Val Loss: 1.4658
Epoch: 8/25... Step: 1200... Loss: 1.5197... Val Loss: 1.4632
Epoch: 8/25... Step: 1210... Loss: 1.4871... Val Loss: 1.4630
Epoch: 8/25... Step: 1220... Loss: 1.4711... Val Loss: 1.4608
Epoch: 8/25... Step: 1230... Loss: 1.5064... Val Loss: 1.4628
Epoch: 8/25... Step: 1240... Loss: 1.4782... Val Loss: 1.4598
Epoch: 8/25... Step: 1250... Loss: 1.4665... Val Loss: 1.4607
Epoch: 8/25... Step: 1260... Loss: 1.4634... Val Loss: 1.4522
Epoch: 8/25... Step: 1270... Loss: 1.4735... Val Loss: 1.4459
Epoch: 8/25... Step: 1280... Loss: 1.4670... Val Loss: 1.4448
Epoch: 8/25... Step: 1290... Loss: 1.4569... Val Loss: 1.4417
Epoch: 8/25... Step: 1300... Loss: 1.4685... Val Loss: 1.4414
Epoch: 8/25... Step: 1310... Loss: 1.4601... Val Loss: 1.4382
Epoch: 8/25... Step: 1320... Loss: 1.5010... Val Loss: 1.4373
Epoch: 9/25... Step: 1330... Loss: 1.4629... Val Loss: 1.4317
Epoch: 9/25... Step: 1340... Loss: 1.4650... Val Loss: 1.4310
###Markdown
Getting the best modelTo set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network. CheckpointAfter training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
###Code
# change the name, for saving multiple files
model_name = 'rnn_x_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
###Output
_____no_output_____
###Markdown
Priming and generating text Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
###Code
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='I am ', top_k=5))
###Output
I am often as if his
marked her activity of the distant mind, and see that she was the salary
and that it was to bring the stop in the window. He did not have
been those that is traced in the soldiers, the memery there seemed to him to
that through his father.
And as shoot on the cruelty, when her house stopped him with the
sense of a certain serious and stoppings and watching her feet with
a sentence where their hands and the position was now, and to
them hardly. But the price of the son of harrows, at the son of
whom he was continually forgetting there, and was in which he was
stringed into the dream.
"You can shall be already as able to mon horse, there's not," she
said. "All you have been at a latter, but I won't be in the sense
of the sound."
"That's not always."
"This most still have the children."
"Why do you know, they have as an intention of society," he said
angrily about her, and was an excitement so much stringling.
"Oh, you'll be to do at her? You've been in the position o
###Markdown
Loading a checkpoint
###Code
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_x_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# Sample using a loaded model
print(sample(loaded, 400, top_k=5, prime="Once he "))
###Output
Once he came to the
manner of her bedroom of her son. He was the same thing in the
classing of the party, as
|
02_deep_learning/transfer-learning/transfer_learning_tutorial.ipynb | ###Markdown
Transfer Learning Tutorial==========================**Author**: `Sasank Chilamkurthy `_In this tutorial, you will learn how to train your network usingtransfer learning. You can read more about the transfer learning at `cs231nnotes `__Quoting these notes, In practice, very few people train an entire Convolutional Network from scratch (with random initialization), because it is relatively rare to have a dataset of sufficient size. Instead, it is common to pretrain a ConvNet on a very large dataset (e.g. ImageNet, which contains 1.2 million images with 1000 categories), and then use the ConvNet either as an initialization or a fixed feature extractor for the task of interest.These two major transfer learning scenarios look as follows:- **Finetuning the convnet**: Instead of random initializaion, we initialize the network with a pretrained network, like the one that is trained on imagenet 1000 dataset. Rest of the training looks as usual.- **ConvNet as fixed feature extractor**: Here, we will freeze the weights for all of the network except that of the final fully connected layer. This last fully connected layer is replaced with a new one with random weights and only this layer is trained.
###Code
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
###Output
_____no_output_____
###Markdown
Load Data---------We will use torchvision and torch.utils.data packages for loading thedata.The problem we're going to solve today is to train a model to classify**ants** and **bees**. We have about 120 training images each for ants and bees.There are 75 validation images for each class. Usually, this is a verysmall dataset to generalize upon, if trained from scratch. Since weare using transfer learning, we should be able to generalize reasonablywell.This dataset is a very small subset of imagenet... Note :: Download the data from `here `_ and extract it to the current directory.
###Code
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
Visualize a few images^^^^^^^^^^^^^^^^^^^^^^Let's visualize a few training images so as to understand the dataaugmentations.
###Code
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
###Output
_____no_output_____
###Markdown
Training the model------------------Now, let's write a general function to train a model. Here, we willillustrate:- Scheduling the learning rate- Saving the best modelIn the following, parameter ``scheduler`` is an LR scheduler object from``torch.optim.lr_scheduler``.
###Code
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
###Output
_____no_output_____
###Markdown
Visualizing the model predictions^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Generic function to display predictions for a few images
###Code
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
###Output
_____no_output_____
###Markdown
Finetuning the convnet----------------------Load a pretrained model and reset final fully connected layer.
###Code
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
###Output
Downloading: "https://download.pytorch.org/models/resnet18-5c106cde.pth" to C:\Users\tgoral/.torch\models\resnet18-5c106cde.pth
46827520it [00:42, 1095686.14it/s]
###Markdown
Train and evaluate^^^^^^^^^^^^^^^^^^It should take around 15-25 min on CPU. On GPU though, it takes less than aminute.
###Code
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
visualize_model(model_ft)
###Output
_____no_output_____
###Markdown
ConvNet as fixed feature extractor----------------------------------Here, we need to freeze all the network except the final layer. We needto set ``requires_grad == False`` to freeze the parameters so that thegradients are not computed in ``backward()``.You can read more about this in the documentation`here `__.
###Code
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as
# opposed to before.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
###Output
_____no_output_____
###Markdown
Train and evaluate^^^^^^^^^^^^^^^^^^On CPU this will take about half the time compared to previous scenario.This is expected as gradients don't need to be computed for most of thenetwork. However, forward does need to be computed.
###Code
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=25)
visualize_model(model_conv)
plt.ioff()
plt.show()
###Output
_____no_output_____ |
quant_finance_lectures/Lecture34-Factor-Risk-Exposure.ipynb | ###Markdown
© Copyright Quantopian Inc.© Modifications Copyright QuantRocket LLCLicensed under the [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/legalcode).Disclaimer Factor Risk ExposureBy Evgenia "Jenny" Nitishinskaya, Delaney Granizo-Mackenzie, and Maxwell Margenot. DISCLAIMERAs always, this analysis is based on historical data, and risk exposures estimated on historical data may or may not affect the exposures going forward. As such, computing the risk exposure of a factor is not enough. You must put confidence bounds on that risk exposure, and determine whether the risk exposure can even be modeled reasonably. For more information on this, please see our other lectures, especially Instability of Parameter Estimates. Using Factor Models to Determine Risk ExposureWe can use factor models to analyze the sources of risks and returns in portfolios. Recall that a factor model expresses the returns as$$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \ldots + b_{iK} F_K + \epsilon_i$$By modelling the historical returns, we can see how much of them is due to speculation on different factors and how much to asset-specific fluctuations ($\epsilon_p$). We can also examine what sources of risk the portfolio is exposed to. In risk analysis, we often model active returns (returns relative to a benchmark) and active risk (standard deviation of active returns, also known as tracking error or tracking risk).For instance, we can find a factor's marginal contribution to active risk squared (FMCAR). For factor $j$, this is$$ \text{FMCAR}_j = \frac{b_j^a \sum_{i=1}^K b_i^a Cov(F_j, F_i)}{(\text{Active risk})^2} $$where $b_i^a$ is the portfolio's active exposure to factor $i$. This tells us how much risk we incur by being exposed to factor $j$, given all the other factors we're already exposed to.Fundamental factor models are often used to evaluate portfolios because they correspond directly to investment choices (e.g. whether we invest in small-cap or large-cap stocks, etc.). Below, we construct a model to evaluate a single asset; for more information on the model construction, check out the fundamental factor models notebook.We'll use the canonical Fama-French factors for this example, which are the returns of portfolios constructred based on fundamental factors. How many factors do you want?In the Arbitrage Pricing Theory lecture we mention that for predictive models you want fewer parameters. However, this doesn't quite hold for risk exposure. Instead of trying to not overfit a predictive model, you are looking for any possible risk factor that could be influencing your returns. Therefore it's actually safer to estimate exposure to many many risk factors to see if any stick. Anything left over in our $\alpha$ is risk exposure that is currently unexplained by the selected factors. You want your strategy's return stream to be all alpha, and to be unexplained by as many parameters as possible. If you can show that your historical returns have little to no dependence on many factors, this is very positive. Certainly some unrelated risk factors might have spurious relationships over time in a large dataset, but those are not likely to be consistent. SetupThe first thing we do is compute a year's worth of factor returns. NOTEThe process for doing this is described in the Fundamental Factor Models lecture and uses pipeline. For more information please see that lecture.
###Code
import numpy as np
import statsmodels.api as sm
import scipy.stats as stats
from statsmodels import regression
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from zipline.pipeline import Pipeline
from zipline.pipeline.data import sharadar, EquityPricing
from zipline.pipeline.factors import CustomFactor, Returns
def make_pipeline():
"""
Create and return our pipeline.
We break this piece of logic out into its own function to make it easier to
test and modify in isolation.
In particular, this function can be copy/pasted into research and run by itself.
"""
pipe = Pipeline()
Fundamentals = sharadar.Fundamentals.slice(dimension='ARQ', period_offset=0)
# Add our factors to the pipeline
market_cap = Fundamentals.MARKETCAP.latest
# Raw market cap and book to price data gets fed in here
pipe.add(market_cap, "market_cap")
book_to_price = 1/Fundamentals.PB.latest
pipe.add(book_to_price, "book_to_price")
# We also get daily returns
returns = Returns(inputs=[EquityPricing.close], window_length=2)
pipe.add(returns, "returns")
# We compute a daily rank of both factors, this is used in the next step,
# which is computing portfolio membership.
market_cap_rank = market_cap.rank()
pipe.add(market_cap_rank, 'market_cap_rank')
book_to_price_rank = book_to_price.rank()
pipe.add(book_to_price_rank, 'book_to_price_rank')
# Build Filters representing the top and bottom 1000 stocks by our combined ranking system.
biggest = market_cap_rank.top(1000)
smallest = market_cap_rank.bottom(1000)
highpb = book_to_price_rank.top(1000)
lowpb = book_to_price_rank.bottom(1000)
# Don't return anything not in this set, as we don't need it.
pipe.set_screen(biggest | smallest | highpb | lowpb)
# Add the boolean flags we computed to the output data
pipe.add(biggest, 'biggest')
pipe.add(smallest, 'smallest')
pipe.add(highpb, 'highpb')
pipe.add(lowpb, 'lowpb')
return pipe
pipe = make_pipeline()
from zipline.research import run_pipeline
start_date = '2014-01-01'
end_date = '2015-01-01'
results = run_pipeline(pipe, start_date=start_date, end_date=end_date, bundle='usstock-1d-bundle')
R_biggest = results[results.biggest]['returns'].groupby(level=0).mean()
R_smallest = results[results.smallest]['returns'].groupby(level=0).mean()
R_highpb = results[results.highpb]['returns'].groupby(level=0).mean()
R_lowpb = results[results.lowpb]['returns'].groupby(level=0).mean()
SMB = R_smallest - R_biggest
HML = R_highpb - R_lowpb
###Output
_____no_output_____
###Markdown
How did each factor do over 2014?
###Code
SMB_CUM = np.cumprod(SMB+1)
HML_CUM = np.cumprod(HML+1)
plt.plot(SMB_CUM.index, SMB_CUM.values)
plt.plot(HML_CUM.index, HML_CUM.values)
plt.ylabel('Cumulative Return')
plt.legend(['SMB Portfolio Returns', 'HML Portfolio Returns']);
###Output
_____no_output_____
###Markdown
Computing Risk ExposureNow we can determine how exposed another return stream is to each of these factors. We can do this by running static or rolling linear regressions between our return stream and the factor portfolio returns. First we'll compute the active returns (returns - benchmark) of some random asset and then model that asset as a linear combination of our two factors. The more a factor contributes to the active returns, the more exposed the active returns are to that factor.
###Code
from quantrocket.master import get_securities
from quantrocket import get_prices
securities = get_securities(symbols=['MSFT', 'AAPL', 'YHOO', 'FB', 'TSLA'], vendors='usstock')
# Get returns data for our portfolio
portfolio = get_prices(
'usstock-1d-bundle',
data_frequency='daily',
sids=securities.index.tolist(),
fields='Close',
start_date=start_date,
end_date=end_date).loc['Close'].pct_change()[1:]
R = np.mean(portfolio, axis=1)
SPY = get_securities(symbols='SPY', vendors='usstock').index[0]
bench = get_prices(
'usstock-1d-bundle',
data_frequency='daily',
sids=SPY,
fields='Close',
start_date=start_date,
end_date=end_date).loc['Close'][SPY].pct_change()[1:]
# The excess returns of our active management, in this case just holding a portfolio of our one asset
active = R - bench
# Define a constant to compute intercept
constant = pd.Series(np.ones(len(active.index)), index=active.index)
df = pd.DataFrame({'R': active,
'F1': SMB.tz_localize(None),
'F2': HML.tz_localize(None),
'Constant': constant})
df = df.dropna()
# Perform linear regression to get the coefficients in the model
b1, b2 = regression.linear_model.OLS(df['R'], df[['F1', 'F2']]).fit().params
# Print the coefficients from the linear regression
print('Sensitivities of active returns to factors:\nSMB: %f\nHML: %f' % (b1, b2))
###Output
Sensitivities of active returns to factors:
SMB: -0.031226
HML: -0.068289
###Markdown
Using the formula from the start of the notebook, we can compute the factors' marginal contributions to active risk squared:
###Code
F1 = df['F1']
F2 = df['F2']
cov = np.cov(F1, F2)
ar_squared = (active.std())**2
fmcar1 = (b1*(b2*cov[0,1] + b1*cov[0,0]))/ar_squared
fmcar2 = (b2*(b1*cov[0,1] + b2*cov[1,1]))/ar_squared
print('SMB Risk Contribution:', fmcar1)
print('HML Risk Contribution:', fmcar2)
###Output
SMB Risk Contribution: 0.000310877026642
HML Risk Contribution: 0.00130904951642
###Markdown
© Copyright Quantopian Inc.© Modifications Copyright QuantRocket LLCLicensed under the [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/legalcode).Disclaimer Factor Risk ExposureBy Evgenia "Jenny" Nitishinskaya, Delaney Granizo-Mackenzie, and Maxwell Margenot. DISCLAIMERAs always, this analysis is based on historical data, and risk exposures estimated on historical data may or may not affect the exposures going forward. As such, computing the risk exposure of a factor is not enough. You must put confidence bounds on that risk exposure, and determine whether the risk exposure can even be modeled reasonably. For more information on this, please see our other lectures, especially Instability of Parameter Estimates. Using Factor Models to Determine Risk ExposureWe can use factor models to analyze the sources of risks and returns in portfolios. Recall that a factor model expresses the returns as$$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \ldots + b_{iK} F_K + \epsilon_i$$By modelling the historical returns, we can see how much of them is due to speculation on different factors and how much to asset-specific fluctuations ($\epsilon_p$). We can also examine what sources of risk the portfolio is exposed to. In risk analysis, we often model active returns (returns relative to a benchmark) and active risk (standard deviation of active returns, also known as tracking error or tracking risk).For instance, we can find a factor's marginal contribution to active risk squared (FMCAR). For factor $j$, this is$$ \text{FMCAR}_j = \frac{b_j^a \sum_{i=1}^K b_i^a Cov(F_j, F_i)}{(\text{Active risk})^2} $$where $b_i^a$ is the portfolio's active exposure to factor $i$. This tells us how much risk we incur by being exposed to factor $j$, given all the other factors we're already exposed to.Fundamental factor models are often used to evaluate portfolios because they correspond directly to investment choices (e.g. whether we invest in small-cap or large-cap stocks, etc.). Below, we construct a model to evaluate a single asset; for more information on the model construction, check out the fundamental factor models notebook.We'll use the canonical Fama-French factors for this example, which are the returns of portfolios constructred based on fundamental factors. How many factors do you want?In the Arbitrage Pricing Theory lecture we mention that for predictive models you want fewer parameters. However, this doesn't quite hold for risk exposure. Instead of trying to not overfit a predictive model, you are looking for any possible risk factor that could be influencing your returns. Therefore it's actually safer to estimate exposure to many many risk factors to see if any stick. Anything left over in our $\alpha$ is risk exposure that is currently unexplained by the selected factors. You want your strategy's return stream to be all alpha, and to be unexplained by as many parameters as possible. If you can show that your historical returns have little to no dependence on many factors, this is very positive. Certainly some unrelated risk factors might have spurious relationships over time in a large dataset, but those are not likely to be consistent. SetupThe first thing we do is compute a year's worth of factor returns. **NOTE**The process for doing this is described in the Fundamental Factor Models lecture and uses pipeline. For more information please see that lecture.
###Code
import numpy as np
import statsmodels.api as sm
import scipy.stats as stats
from statsmodels import regression
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from zipline.pipeline import Pipeline
from zipline.pipeline.data import sharadar, EquityPricing
from zipline.pipeline.factors import CustomFactor, Returns
def make_pipeline():
"""
Create and return our pipeline.
We break this piece of logic out into its own function to make it easier to
test and modify in isolation.
In particular, this function can be copy/pasted into research and run by itself.
"""
pipe = Pipeline()
Fundamentals = sharadar.Fundamentals.slice(dimension='ARQ', period_offset=0)
# Add our factors to the pipeline
market_cap = Fundamentals.MARKETCAP.latest
# Raw market cap and book to price data gets fed in here
pipe.add(market_cap, "market_cap")
book_to_price = 1/Fundamentals.PB.latest
pipe.add(book_to_price, "book_to_price")
# We also get daily returns
returns = Returns(inputs=[EquityPricing.close], window_length=2)
pipe.add(returns, "returns")
# We compute a daily rank of both factors, this is used in the next step,
# which is computing portfolio membership.
market_cap_rank = market_cap.rank()
pipe.add(market_cap_rank, 'market_cap_rank')
book_to_price_rank = book_to_price.rank()
pipe.add(book_to_price_rank, 'book_to_price_rank')
# Build Filters representing the top and bottom 1000 stocks by our combined ranking system.
biggest = market_cap_rank.top(1000)
smallest = market_cap_rank.bottom(1000)
highpb = book_to_price_rank.top(1000)
lowpb = book_to_price_rank.bottom(1000)
# Don't return anything not in this set, as we don't need it.
pipe.set_screen(biggest | smallest | highpb | lowpb)
# Add the boolean flags we computed to the output data
pipe.add(biggest, 'biggest')
pipe.add(smallest, 'smallest')
pipe.add(highpb, 'highpb')
pipe.add(lowpb, 'lowpb')
return pipe
pipe = make_pipeline()
from zipline.research import run_pipeline
start_date = '2014-01-01'
end_date = '2015-01-01'
results = run_pipeline(pipe, start_date=start_date, end_date=end_date, bundle='usstock-1d-bundle')
R_biggest = results[results.biggest]['returns'].groupby(level=0).mean()
R_smallest = results[results.smallest]['returns'].groupby(level=0).mean()
R_highpb = results[results.highpb]['returns'].groupby(level=0).mean()
R_lowpb = results[results.lowpb]['returns'].groupby(level=0).mean()
SMB = R_smallest - R_biggest
HML = R_highpb - R_lowpb
###Output
_____no_output_____
###Markdown
How did each factor do over 2014?
###Code
SMB_CUM = np.cumprod(SMB+1)
HML_CUM = np.cumprod(HML+1)
plt.plot(SMB_CUM.index, SMB_CUM.values)
plt.plot(HML_CUM.index, HML_CUM.values)
plt.ylabel('Cumulative Return')
plt.legend(['SMB Portfolio Returns', 'HML Portfolio Returns']);
###Output
_____no_output_____
###Markdown
Computing Risk ExposureNow we can determine how exposed another return stream is to each of these factors. We can do this by running static or rolling linear regressions between our return stream and the factor portfolio returns. First we'll compute the active returns (returns - benchmark) of some random asset and then model that asset as a linear combination of our two factors. The more a factor contributes to the active returns, the more exposed the active returns are to that factor.
###Code
from quantrocket.master import get_securities
from quantrocket import get_prices
securities = get_securities(symbols=['MSFT', 'AAPL', 'YHOO', 'FB', 'TSLA'], vendors='usstock')
# Get returns data for our portfolio
portfolio = get_prices(
'usstock-1d-bundle',
data_frequency='daily',
sids=securities.index.tolist(),
fields='Close',
start_date=start_date,
end_date=end_date).loc['Close'].pct_change()[1:]
R = np.mean(portfolio, axis=1)
SPY = get_securities(symbols='SPY', vendors='usstock').index[0]
bench = get_prices(
'usstock-1d-bundle',
data_frequency='daily',
sids=SPY,
fields='Close',
start_date=start_date,
end_date=end_date).loc['Close'][SPY].pct_change()[1:]
# The excess returns of our active management, in this case just holding a portfolio of our one asset
active = R - bench
# Define a constant to compute intercept
constant = pd.Series(np.ones(len(active.index)), index=active.index)
df = pd.DataFrame({'R': active,
'F1': SMB.tz_localize(None),
'F2': HML.tz_localize(None),
'Constant': constant})
df = df.dropna()
# Perform linear regression to get the coefficients in the model
b1, b2 = regression.linear_model.OLS(df['R'], df[['F1', 'F2']]).fit().params
# Print the coefficients from the linear regression
print('Sensitivities of active returns to factors:\nSMB: %f\nHML: %f' % (b1, b2))
###Output
Sensitivities of active returns to factors:
SMB: -0.027129
HML: -0.047605
###Markdown
Using the formula from the start of the notebook, we can compute the factors' marginal contributions to active risk squared:
###Code
F1 = df['F1']
F2 = df['F2']
cov = np.cov(F1, F2)
ar_squared = (active.std())**2
fmcar1 = (b1*(b2*cov[0,1] + b1*cov[0,0]))/ar_squared
fmcar2 = (b2*(b1*cov[0,1] + b2*cov[1,1]))/ar_squared
print('SMB Risk Contribution:', fmcar1)
print('HML Risk Contribution:', fmcar2)
###Output
SMB Risk Contribution: 0.00022347420398894437
HML Risk Contribution: 0.000645267708587981
|
notebooks/Function_Testing.ipynb | ###Markdown
Testing Notebook
###Code
# test View
import torch
from torch import nn
# Torch uses NCHW
image_tensor = torch.randn(64, 3, 200, 200)
# define some layers
# In channel, out channel, kernel, stride
conv2d_depth = nn.Conv2d(3, 3*10, 3, 1, 1)
image_tensor.size()
###Output
_____no_output_____
###Markdown
depthwise conv increases the num channels
###Code
conv2d_depth(image_tensor).size()
conv2d_2 = nn.Conv2d(3, 3*10, 7, 1, 1)
###Output
_____no_output_____
###Markdown
due to the kernel / stride and padding the image dimensions can change
###Code
conv2d_2(image_tensor).size()
# Testing the view function
image_tensor.view(image_tensor.size(0), -1).size()
# Quick Resnet print
import torchvision.models as models
resnet18 = models.resnet18()
print(resnet18)
import torch
import torchvision
import torchprof
model = torchvision.models.alexnet(pretrained=False).cuda()
x = torch.rand([1, 3, 224, 224]).cuda()
with torchprof.Profile(model, use_cuda=True) as prof:
model(x)
print(prof.display(show_events=False)) # equivalent to `print(prof)` and `print(prof.display())`
###Output
Module | Self CPU total | CPU total | CUDA total | Occurrences
---------------|----------------|-----------|------------|------------
AlexNet | | | |
├── features | | | |
│├── 0 | 359.165ms | 1.437s | 1.437s | 1
│├── 1 | 959.043us | 1.714ms | 1.875ms | 1
│├── 2 | 231.398us | 504.635us | 574.112us | 1
│├── 3 | 785.574us | 3.403ms | 3.620ms | 1
│├── 4 | 80.059us | 99.289us | 100.064us | 1
│├── 5 | 195.049us | 433.918us | 436.928us | 1
│├── 6 | 881.273us | 3.541ms | 3.526ms | 1
│├── 7 | 63.520us | 78.270us | 78.144us | 1
│├── 8 | 732.715us | 2.950ms | 3.202ms | 1
│├── 9 | 74.500us | 93.040us | 94.048us | 1
│├── 10 | 446.876us | 1.797ms | 1.834ms | 1
│├── 11 | 67.679us | 84.399us | 84.704us | 1
│└── 12 | 171.409us | 380.277us | 376.192us | 1
├── avgpool | 165.409us | 368.668us | 367.328us | 1
└── classifier | | | |
├── 0 | 634.294us | 1.349ms | 780.512us | 1
├── 1 | 1.960ms | 2.021ms | 2.289ms | 1
├── 2 | 79.449us | 100.449us | 100.000us | 1
├── 3 | 200.019us | 471.578us | 467.008us | 1
├── 4 | 209.079us | 265.389us | 380.192us | 1
├── 5 | 63.530us | 78.220us | 79.392us | 1
└── 6 | 200.908us | 255.868us | 278.592us | 1
|
plotting/MNIST_DBM.ipynb | ###Markdown
Load results Load or create evaluation results of initial model
###Code
n_iter = 10
n_checkpoint = 2
perc = 10
res_path = os.path.join('..', 'models', 'MNIST')
initial_path = os.path.join(res_path,'initial')
assert os.path.exists(initial_path), "Model does not exist yet. Train initial DBM first by running pruning/MNIST_Baselines.py - requires GPU"
dbm = get_initial_DBM(initial_path) # loads initial DBM.
params = dbm.get_params()
n_vis = params['n_visible_']
nh1 = initial_units_l1 = params['n_hiddens_'][0]
nh2 = initial_units_l2 = params['n_hiddens_'][1]
rec_fields = dbm.get_tf_params(scope='masks')['rf_mask']
initial_n_weights_l1 = len(rec_fields.flatten()[rec_fields.flatten()==1])
initial_n_weights_l2 = int(nh1 * nh2)
del dbm
try:
initial_probs_win_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[1,:]
initial_count_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[2,:]
initial_logreg = np.load(os.path.join(initial_path, 'Accuracy_hidden_layer_reps.npy'))
except IOError:
evaluate_initial_DBM(dbm, initial_path)
initial_probs_win_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[1,:]
initial_count_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[2,:]
initial_logreg = np.load(os.path.join(initial_path, 'Accuracy_hidden_layer_reps.npy'))
###Output
Loading RBM #1 ...
No pruning, array of ones is initialized
Loading RBM #2 ...
No pruning, array of ones is initialized
Loading DBM ...
INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer1/model
INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer2/model
INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer1/model
INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer2/model
INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_InitialDBM/model
###Markdown
Load or create MNIST baselines
###Code
if not os.path.exists(os.path.join(res_path, "logreg_MNIST.pkl")):
_ = create_baseline_classifier(res_path)
test_probs_digits = np.load(os.path.join(res_path,'ProbsWinDig_TestDigits.npy'))[1,:]
test_count_digits = np.load(os.path.join(res_path,'ProbsWinDig_TestDigits.npy'))[2,:]
mean_test_prob = np.mean(test_probs_digits)
random_probs_digits = np.load(os.path.join(res_path,'ProbsWinDig_Random.npy'))[1,:]
random_count_digits = np.load(os.path.join(res_path,'ProbsWinDig_Random.npy'))[2,:]
mean_random_prob = np.mean(random_probs_digits)
acc_rawdigits_logreg = np.load(os.path.join(res_path, 'Accuracy_TestDigits.npy'))
###Output
_____no_output_____
###Markdown
Load results of pruned models
###Code
exp_labels=['Variance FI', 'Heuristic FI', r'|w|', 'Random', 'Anti-FI']
exp_names = [f'varianceFI_{perc}perc_{n_iter}sessions',
f'heuristicFI_{perc}perc_{n_iter}sessions',
f'w_{perc}perc_{n_iter}sessions',
f'random_{perc}perc_{n_iter}sessions',
f'antiFI_{perc}perc_{n_iter}sessions']
exp_colors = GATHER
for i, exp in enumerate(exp_names):
assert os.path.exists(os.path.join(res_path, exp)), f"Pruning experiment does not exist yet. Run {exp_labels[i]} pruning script"
###Output
_____no_output_____
###Markdown
Load encoding performance results of pruned models
###Code
acc_logreg = []
n_act_weights_l1 = []
n_act_weights_l2 = []
n_hid_units_L1 = []
n_hid_units_L2 = []
for exp in range(len(exp_names)):
temp_link = res_path+'/{}/res'.format(exp_names[exp])
acc_logreg.append(np.load(os.path.join(temp_link,'AccLogReg.npy')).flatten())
n_act_weights_l1.append(np.load(os.path.join(temp_link, 'n_active_weights_L1.npy')).flatten())
n_act_weights_l2.append(np.load(os.path.join(temp_link, 'n_active_weights_L2.npy')).flatten())
n_hid_units_L1.append(np.load(os.path.join(temp_link, 'n_hid_units_L1.npy')).flatten())
n_hid_units_L2.append(np.load(os.path.join(temp_link, 'n_hid_units_L2.npy')).flatten())
# add the initial performance
acc_logreg[exp] = np.insert(acc_logreg[exp], 0, initial_logreg)
n_hid_units_L1[exp] = np.insert(n_hid_units_L1[exp],0, initial_units_l1)
n_hid_units_L2[exp] = np.insert(n_hid_units_L2[exp], 0, initial_units_l2)
n_act_weights_l1[exp] = np.insert(n_act_weights_l1[exp], 0, initial_n_weights_l1)
n_act_weights_l2[exp] = np.insert(n_act_weights_l2[exp], 0, initial_n_weights_l2)
# convert lists to arrays
acc_logreg = np.asarray(acc_logreg)[:,:n_iter*2+1]
n_act_weights_l1 = np.asarray(n_act_weights_l1)[:,:n_iter*2+1]
n_act_weights_l2 = np.asarray(n_act_weights_l2)[:,:n_iter*2+1]
n_hid_units_L1 = np.asarray(n_hid_units_L1)[:,:n_iter*2+1]
n_hid_units_L2 = np.asarray(n_hid_units_L2)[:,:n_iter*2+1]
###Output
_____no_output_____
###Markdown
Load generative performance results of pruned models
###Code
diversity_digits_all = [[] for i in range(len(exp_names))]
mean_prob_all = [[] for i in range(len(exp_names))]
min_prob_all = [[] for i in range(len(exp_names))]
max_prob_all = [[] for i in range(len(exp_names))]
entropy_all = [[] for i in range(len(exp_names))]
probs_win_digits_all = [[] for i in range(len(exp_names))]
count_digits_all = [[] for i in range(len(exp_names))]
which_digit_all = [[] for i in range(len(exp_names))]
for exp in range(len(exp_names)):
temp_link = res_path+'/{}/res'.format(exp_names[exp])
probs_win_digits = []
count_digits = []
probs_win_digits.append(initial_probs_win_digits)
count_digits.append(initial_count_digits)
entropy_all[exp].append(entropy(initial_count_digits))
count_digits_all[exp].append(initial_count_digits)
which_digit_all[exp].append(range(10))
probs_win_digits_all[exp].append(initial_probs_win_digits)
for sess in np.arange(1,n_iter+1):
for checkpoint in np.arange(1,n_checkpoint+1):
which_digit_all[exp].append(np.load(os.path.join(temp_link, 'ProbsWinDig_sess{}_checkpoint{}.npy'.format(sess,checkpoint)))[0,:])
temp = np.load(os.path.join(temp_link, 'ProbsWinDig_sess{}_checkpoint{}.npy'.format(sess,checkpoint)))[1,:]
count = np.load(os.path.join(temp_link, 'ProbsWinDig_sess{}_checkpoint{}.npy'.format(sess,checkpoint)))[2,:]
entropy_all[exp].append(entropy(count))
probs_win_digits.append(temp)
count_digits.append(count)
count_digits_all[exp].append(count)
probs_win_digits_all[exp].append(temp)
probs_win_digits = np.asarray(probs_win_digits)
count_digits = np.asarray(count_digits)
mean_prob = np.zeros(len(probs_win_digits))
max_prob = np.zeros(len(probs_win_digits))
min_prob = np.zeros(len(probs_win_digits))
for i in range(len(probs_win_digits)):
mean_prob[i] = np.mean(probs_win_digits[i], axis=0)
max_prob[i] = probs_win_digits[i].max()
min_prob[i] = probs_win_digits[i].min()
# we only saved these two times
diversity_digits = np.zeros(len(count_digits))
for i in range(len(count_digits)):
if len(count_digits[i]) < 10: # if one category has a zero count
min_counts = 0
else:
min_counts = count_digits[i].min()
max_counts = count_digits[i].max()
diversity_digits[i] = min_counts/max_counts
diversity_digits_all[exp].append(diversity_digits)
mean_prob_all[exp].append(mean_prob)
min_prob_all[exp].append(min_prob)
max_prob_all[exp].append(max_prob)
entropy_all = np.asarray(entropy_all)
diversity_digits_all = np.asarray(diversity_digits_all)[:,0,:]
mean_prob_all = np.asarray(mean_prob_all)[:,0,:]
min_prob_all = np.asarray(min_prob_all)[:,0,:]
max_prob_all = np.asarray(max_prob_all)[:,0,:]
###Output
_____no_output_____
###Markdown
Figure: Final visible layers Retrieve final visible layers
###Code
indices_unconnected_v = []
for exp in range(len(exp_names)):
temp_path = os.path.join(res_path, f'{exp_names[exp]}', 'res')
if not os.path.exists(os.path.join(temp_path, 'final_indices_of_lost_visibles.npy')):
model_path = os.path.join(os.path.join(res_path, f'{exp_names[exp]}', f'MNIST_PrunedDBM_both_Sess{n_iter}'))
dbm = DBM.load_model(model_path+'/')
prune_mask = dbm.get_tf_params(scope='masks')['prune_mask']
out_synapses = np.sum(mask, axis=1) # sum of outgoing synapses from the visible layer
current_unconnected_v = sum(out_synapses==0)
ind = np.argwhere(out_synapses == 0)
if len(ind)==0:
indices_unconnected_v.append(None)
else:
indices_unconnected_v.append(ind)
np.save(os.path.join(temp_path, 'final_indices_of_lost_visibles.npy'), ind)
del dbm
else:
indices_unconnected_v.append(np.load(os.path.join(temp_path, 'final_indices_of_lost_visibles.npy')))
fig = plt.figure(figsize=(12,6))#constrained_layout=True
gs = gridspec.GridSpec(1, len(exp_names))
for i in range(len(exp_names)):
ax = fig.add_subplot(gs[0, i])
visible = np.ones(400)
visible[indices_unconnected_v[i]]=0
im = ax.imshow(visible.reshape(20,20), cmap=plt.cm.binary_r)
ax.set_xticks(np.arange(0.5,20), [])
ax.set_yticks(np.arange(0.5,20), [])
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.set_yticklabels([])
ax.set_xticklabels([])
title= exp_labels[i]
ax.set_title(title, fontsize=fs+2)
ax.grid()
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Figure: Exemplary visible samples Retrieve final visible samples
###Code
samples_v = []
n_samples = 25
for exp in range(len(exp_names)):
temp_path = os.path.join(res_path, f'{exp_names[exp]}', 'res')
if not os.path.exists(os.path.join(temp_path, f'final_visible_samples_n{n_samples}.npy')):
model_path = os.path.join(os.path.join(res_path, f'{exp_names[exp]}', f'MNIST_PrunedDBM_both_Sess{n_iter}'))
dbm = DBM.load_model(model_path+'/')
sample = dbm.sample_gibbs(n_gibbs_steps=10, save_model=True, n_runs=np.max([n_samples, 1000]))
sample_v = sample[:,:n_vis] # extract visible samples
random_indices = random.sample(range(sample_v.shape[0]), n_samples)
random_sample_v = sample_v[random_indices, :].astype('bool') # randomly select some visible samples
samples_v.append(random_sample_v)
np.save(os.path.join(temp_path, f'final_visible_samples_n{n_samples}.npy'), random_sample_v)
del dbm
else:
samples_v.append(np.load(os.path.join(temp_path, f'final_visible_samples_n{n_samples}.npy')))
fig = plt.figure(figsize=(14, 3))
outer = gridspec.GridSpec(1, len(exp_names))
for j in range(len(exp_names)):
inner = gridspec.GridSpecFromSubplotSpec(int(math.sqrt(n_samples)), int(math.sqrt(n_samples)),
subplot_spec=outer[j], wspace=0.1, hspace=0.1)
for i in range(int(math.sqrt(n_samples))*int(math.sqrt(n_samples))):
if i < len(samples_v[j].T):
img = samples_v[j][i].reshape((int(math.sqrt(n_vis)),int(math.sqrt(n_vis))))
ax = plt.Subplot(fig, inner[i])
for d in ('bottom', 'top', 'left', 'right'):
ax.spines[d].set_linewidth(1.)
if i == 2:
ax.set_title(exp_labels[j], fontsize=16)
ax.axis('off')
ax.imshow(img, cmap =plt.cm.binary)
fig.add_subplot(ax)
###Output
_____no_output_____
###Markdown
Figure: Encoding performance
###Code
PostPrune = False # whether to show performance immediately after pruning
show_every = 2# set to 1 if you want to display pruned & retrained in the same plot
fig = plt.figure(figsize=(12,6))#constrained_layout=True
gs = gridspec.GridSpec(2, len(exp_names))
ax1 = fig.add_subplot(gs[0:2, 0:3])
ax1.axhline(1-acc_rawdigits_logreg, color='black', linestyle='--', label='Performance on raw digits')
ax1.text(-0.1, 1.1, string.ascii_uppercase[0], transform=ax1.transAxes,
size=20, weight='bold')
for exp in range(len(exp_names)):
ax1.plot(n_act_weights_l2[exp][0::show_every]+n_act_weights_l1[exp][0::show_every], 1-acc_logreg[exp][0::show_every], c=exp_colors[exp],marker=".")
if PostPrune:
ax1.plot(n_act_weights_l2[exp][1::show_every]+n_act_weights_l1[exp][1::show_every], 1-acc_logreg[exp][1::show_every], c=exp_colors[exp],marker="v", linestyle='None')
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.yaxis.set_major_formatter(ScalarFormatter())
ax1.set_ylim(None,1)
ax1.set_xlabel(r'$n_w$', fontsize=fs+2)
ax1.invert_xaxis()
ax1.set_ylabel('Classification Error', fontsize=fs+2)
for exp in range(len(exp_names)):
ax1.plot([-5], [0], marker='o', markersize=10, color=exp_colors[exp], label=exp_labels[exp], linestyle='None')
ax1.legend(loc='best', fontsize=fs)
ax1.grid(True, which='both')
ax2 = fig.add_subplot(gs[0, 3:])
ax2.text(-0.1, 1.25, string.ascii_uppercase[1], transform=ax2.transAxes,
size=20, weight='bold')
for exp in range(len(exp_names)):
ax2.plot(n_act_weights_l1[exp], n_hid_units_L1[exp], c=exp_colors[exp], marker=".")
ax2.invert_xaxis()
ax2.set_xscale('log')
ax2.set_xlabel(r'$n_{w_{\mathbf{h}^1}}$', fontsize=fs+2)
ax2.set_ylabel(r'$n_{\mathbf{h}^1}$', fontsize=fs+2)
ax2.grid(True, which='both')
ax3 = fig.add_subplot(gs[1, 3:])
for exp in reversed(range(len(exp_names))):
ax3.plot(n_act_weights_l2[exp], n_hid_units_L2[exp], c=exp_colors[exp], marker=".")
ax3.invert_xaxis()
ax3.set_xscale('log')
ax3.set_xlabel(r'$n_{w_{\mathbf{h}^2}}$', fontsize=fs+2)
ax3.set_ylabel(r'$n_{\mathbf{h}^2}$', fontsize=fs+2)
ax3.grid(True, which='both')
ax3.set_yticks([0,250,500,676])
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Figure: digit quality during pruning
###Code
fig = plt.figure(figsize=(12,4))#constrained_layout=True
plt.rcParams.update({'font.size': 14})
gs = gridspec.GridSpec(2, 3)
ax1 = fig.add_subplot(gs[0:2, 0:2])
for exp in range(len(exp_names)):
ax1.plot(n_act_weights_l2[exp][0::show_every]+n_act_weights_l1[exp][0::show_every], mean_prob_all[exp][0::show_every], c=exp_colors[exp], marker='.')
if PostPrune:
ax1.plot(n_act_weights_l2[exp][1::show_every]+n_act_weights_l1[exp][1::show_every], mean_prob_all[exp][1::show_every], c=exp_colors[exp], marker='v', linestyle='None')
for exp in range(len(exp_names)):
ax1.plot([-5], [0], marker='o', markersize=10, color=exp_colors[exp], label=exp_labels[exp], linestyle='None')
ax1.set_xscale('log')
ax1.text(-0.1, 1.1, string.ascii_uppercase[0], transform=ax1.transAxes,
size=15, weight='bold')
ax1.invert_xaxis()
ax1.set_title('Digit quality during pruning')
ax1.set_ylabel('Probability of winning class', fontsize=fs-2)
ax1.axhline(mean_test_prob, label='Test digits', color='black', linestyle='--')
ax1.axhline(mean_random_prob, label='Random patterns', color='grey', linestyle='--')
ax1.set_ylim(0.45,1.)
ax1.set_xlabel(r'$n_w$', fontsize=fs)
ax1.set_yticks(np.arange(0.5, 1.01, step=0.1))
ax1.grid(True, which='both')
ax1.legend(loc='upper right', fontsize=fs-2, bbox_to_anchor=(1.45, 1.05))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Figure: digit diversity during pruning
###Code
fig = plt.figure(figsize=(5,8))#constrained_layout=True
gs = gridspec.GridSpec(len(exp_names), 1)
for exp in range(len(exp_names)):
ax = fig.add_subplot(gs[exp, 0])
ax.set_ylabel('% of samples', fontsize=fs-2)
for number in range(10):
ax.plot(n_act_weights_l2[exp][0::show_every]+n_act_weights_l1[exp][0::show_every], count_digit_complete[exp,0::show_every,number].astype(float)/600, marker='${}$'.format(int(number)), c=plt.cm.Set3.colors[number])
if PostPrune:
ax.plot(n_act_weights_l2[exp][1::show_every]+n_act_weights_l1[exp][1::show_every], count_digit_complete[exp,1::show_every,number].astype(float)/600, marker='${}$'.format(int(number)), linestyle='None', c=plt.cm.Set3.colors[number])
ax.axhline(10.0, color='black', linestyle='--')
ax.invert_xaxis()
ax.grid(which='both')
ax.set_xscale('log')
ax.set_yscale('symlog')
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_ylim(0,100)
ax.set_title(exp_labels[exp], fontsize=fs-2)
if exp==len(exp_names)-1:
ax.set_xlabel(r'$n_w$', fontsize=fs-2)
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
LibroDeClasesDigital_ValidarJSON_V02.ipynb | ###Markdown
Sección: CARGAR LIBRERÍAS DE TRABAJO---
###Code
!sudo apt install sqlcipher libsqlcipher0 libsqlcipher-dev -q -y
!sudo -H pip3 install pysqlcipher3
!pip install pycryptodome
!pip install validate_email
!pip install pyDNS
!apt-get install python3-dns
import re
from datetime import datetime
import pytz
from time import time #importamos la función time para capturar tiempos
import os
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import json
from zipfile import ZipFile
import requests, io
from sqlalchemy import create_engine
import csv
import string, random
import Crypto
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto import Random
import ast
#https://www.novixys.com/blog/using-aes-encryption-decryption-python-pycrypto/
#----------------------------------------------------------------------------
# Encripta la clave de la BD SQLCipher utilizando la llave pública de la
# Superintendencia de Educación.
#----------------------------------------------------------------------------
def encryptTextUsingSiePublicKey(txt):
url_to_pem_file = "https://static.superintendencia-educacion.cl/KP/clave.pub.txt"
r = requests.get(url_to_pem_file, verify=False, stream=True)
path_to_public_pem_file = io.BytesIO(r.content).read()
publickey = RSA.importKey(path_to_public_pem_file)
encryptor = PKCS1_OAEP.new(publickey)
encrypted = encryptor.encrypt(bytes(txt,"utf-8"))
return encrypted
encryptTextUsingSiePublicKey('El Mundo')
from itertools import cycle
def validarRut(rut):
if(rut is not None):
dv = ''.join([c for c in list(rut.upper()) if c.isalpha()])
aux = ''.join([c for c in list(rut) if c.isdigit()])
if(dv == ''):
dv = aux[-1:]
aux = aux[:-1]
revertido = map(int, reversed(str(aux)))
factors = cycle(range(2,8))
s = sum(d * f for d, f in zip(revertido,factors))
res = (-s)%11
if ((str(res) == dv) or (dv=="K" and res==10)):
return True
return False
validarRut('22171685-K')
import requests
from lxml import html
def nameFromRUN(r,d):
root = 'https://zeus.sii.cl/cvc_cgi/nar/nar_consulta'
url = f'{root}?ACEPTAR=consulta&RUT={r}&DV={d}'
pageContent=requests.get(url)
tree = html.fromstring(pageContent.content)
return tree.xpath('//*/tr[1]/td/*/text()')[1]
nameFromRUN('1','9')
from validate_email import validate_email
is_valid = validate_email('[email protected]')
print(is_valid)
test = """
def openConnection(DB_NAME,secPhrase):
global dfLog,_sep,_encode
_r = True
try:
params = 'cipher=aes-256-cfb&kdf_iter=256000&cipher_page_size=4096'
engine = create_engine(f"sqlite+pysqlcipher://:{secPhrase}@/{DB_NAME}?{params}")
engine.execute("PRAGMA cipher_compatibility = 4;")
conn = engine.connect()
rows = conn.execute("SELECT * FROM Person;")
if(not rows.returns_rows):
raise Exception("Error al leer los datos de la BD")
else:
print(rows)
except Exception as e:
_t = "ERROR COMMIT: "+str(e)
print(_t)
_r = False
finally:
conn.close()
return _r
openConnection('/content/ceds-nds-v7_1_encryptedD4.db','test')
"""
###Output
_____no_output_____
###Markdown
Sección: DEFINICIÓN DE FUNCIONES---
###Code
#----------------------------------------------------------------------------
#PASO N° 20 - Transformar el archivo JSON en archivos CSV's. Uno por tabla.
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Transforma archivo JSON en un DataFrame de pandas con todas sus columnas.
# Agrega las columnas que faltan.
#----------------------------------------------------------------------------
def jsonToDataframe(elem, jsonData):
global dfLog
data=json_normalize(jsonData[elem['JSONGroupName']],elem['TableName'])
df = pd.DataFrame(data, columns=elem['ColumnList'])
df.drop_duplicates(inplace=True)
if(elem['TableName']=='Person'):
print(df)
_t = f"Tabla: {elem['TableName']} cargada como DataFrame exitosamente"
print(_t); dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return df
def validaLosTiposDeDatos(df, elem):
global dfLog
#Mapeo de tipos de datos SQL -> Pyhton
_dTypes = {
"bit": [pd.api.types.is_bool_dtype,np.int_,"bool"],
"char": [pd.api.types.is_string_dtype,np.unicode_,"str"],
"nchar": [pd.api.types.is_string_dtype,np.unicode_,"str"],
"nvarchar": [pd.api.types.is_string_dtype,np.unicode_,"str"],
"nvarcharmax": [pd.api.types.is_string_dtype,np.unicode_,"str"],
"varchar": [pd.api.types.is_string_dtype,np.unicode_,"str"],
"bigint": [pd.api.types.is_integer_dtype,np.int_,"int64"],
"int": [pd.api.types.is_integer_dtype,np.int_,"int32"],
"smallint": [pd.api.types.is_integer_dtype,np.int_,"int64"],
"tinyint": [pd.api.types.is_integer_dtype,np.int_,"int64"],
"float": [pd.api.types.is_float_dtype,np.float_,"float64"],
"real": [pd.api.types.is_float_dtype,np.float_,"float64"],
"decimal": [pd.api.types.is_float_dtype,np.float_,"float64"],
"numeric": [pd.api.types.is_float_dtype,np.float_,"float64"],
"varbinary": ['bytes'],
"binary": ['raw'],
"date": [pd.api.types.is_string_dtype,np.unicode_,"str"],
"time": [pd.api.types.is_string_dtype,np.unicode_,"str"],
"datetime": [pd.api.types.is_string_dtype,np.unicode_,"str"]}
_columnNames = elem['ColumnList']
_dataTypes=elem['DataType']
for idx,dt in enumerate(_dataTypes):
_tipo = ''.join([s for s in list(dt) if s.isalpha()])
field = _columnNames[idx]
fn = _dTypes[_tipo][1]
if(_tipo=='bit'):
df[field] = df[field].astype(fn, errors='ignore')
elif(_tipo=='date'):
df[field].replace('0000-00-00','',inplace=True)
_t = f"Tipos de datos de la tabla {elem['TableName']} verificados con éxito";
print(_t); dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return df
#----------------------------------------------------------------------------
# Convierte archivo JSON en varios archivos CSV. Uno por cada tabla del modelo.
# Se genera un elemento por cada tabla del JSON
# {'Column': ['OrganizationCalendarId','OrganizationId','CalendarCode','CalendarDescription','CalendarYear'],
# 'DataType': ['int', 'int', 'nvarchar(30)', 'nvarchar(60)', 'nchar(4)'],
# 'JSONGroup': '_Calendarios',
# 'SIERequired': ['YES', 'YES', 'NO', 'YES', 'NO'],
# 'Table': 'OrganizationCalendar'
# }
#----------------------------------------------------------------------------
def parseJsonToCSVs(path_to_zip_file,path_to_dir_csv_file):
global dfLog, _encode, _sep
xd = cargarPlanillaConDatosDelModelo()
jsonData,jsonFileName = readJsonData(path_to_zip_file)
for row in list(xd[xd["JSONGroup"].notnull()].groupby(["JSONGroup","Table"])):
elem = {
"JSONGroupName":row[0][0],
"TableName":row[0][1],
"ColumnList":list(row[1]["Column"]),
"DataType": list(row[1]["Data Type"]),
"SIERequired": list(row[1]["SIERequired"])
}
df = jsonToDataframe(elem,jsonData)
df = validaLosTiposDeDatos(df, elem)
_fileName = path_to_dir_csv_file+elem['TableName']+'.csv'
_c = str(df.count()[0])
print('Guardando : '+_fileName+' -> '+_c+' registros procesados.\n')
df.to_csv(_fileName,sep=_sep,encoding=_encode,index=False)
dfLog = dfLog.append(pd.Series({'json':jsonFileName,
'csv': elem['TableName']+'.csv',
'#savingRows':_c,
'resultSaving':'OK'}), ignore_index=True)
_t = 'Archivo JSON completamente transformado.'
print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return True
def eliminarDuplicados(mylist):
seen = set()
newlist = []
for item in mylist:
t = tuple(item)
if t not in seen:
newlist.append(item)
seen.add(t)
return newlist
def crearCSV(jsonFileName, fileName,TableName,columnList,unique_records):
#https://pymotw.com/2/csv/
global dfLog,_sep
try:
csv.register_dialect('escaped', delimiter=_sep, lineterminator ='\n',
skipinitialspace=0, escapechar=None, doublequote=True,
quoting=csv.QUOTE_MINIMAL, quotechar='"')
_c = len(unique_records)
_f = open(fileName, 'w', encoding=_encode)
dialect = csv.get_dialect("escaped")
writer = csv.writer(_f, dialect=dialect)
writer.writerow(columnList)
writer.writerows(unique_records)
_t = f"Table {TableName} -> {_c} registros procesados."
except Exception as e:
_t = f"ERROR:'{str(e)}'. Tabla:'{TableName}'. {_c} registros perdidos."
finally:
_f.close()
print(_t);
dfLog = dfLog.append(pd.Series({'json':jsonFileName,
'csv': fileName,
'#savingRows':_c,
'resultSaving':_t}),
ignore_index=True)
return True
def readJsonData(path_to_zip_file):
global dfLog
# Descomprime el contenido del archivo ZIP y lo carga en memoria
if(path_to_zip_file):
with ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall('./')
_t=f'Archivo ZIP "{path_to_zip_file}" descomprimido con éxito'; print(_t)
dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
for file in zip_ref.namelist():
_t=f"Trabajando sobre archivo: '{file}'"; print(_t)
dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
with open(file, mode='r', encoding="utf-8") as jsonfile:
jsonData = json.load(jsonfile)
_t=f"Archivo '{jsonfile}' leído sin inconvenientes\n"; print(_t)
dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
jsonfile.close()
os.remove(file)
return jsonData,file
def cargarPlanillaConDatosDelModelo():
global dfLog
#Carga planilla con todas las tablas y campos del modelo https://ceds.ed.gov
idFile = '1R8iEWpa2-buQijoI9NzniCbyZm5-zZcN'
url = f'http://drive.google.com/uc?export=download&id={idFile}'
xd = pd.read_excel(url,'NDS Columns')
_t=f'Planilla {url} cargada satisfactoriamente'; print(_t)
dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return xd;
def leerTodosLosRegistrosDeLaTalaDesdeArchivoJson(jsonData,elem):
#Mapeo de tipos de datos SQL -> Pyhton
records = []
for grupo in jsonData[elem['JSONGroupName']]:
for tbl in grupo[elem['TableName']]:
record = []
for indice,col in enumerate(elem['ColumnList']):
dt = elem['DataType'][indice]
_tipo = ''.join([s for s in list(dt) if s.isalpha()])
value = tbl.get(col) if (tbl.get(col) is not None) else ''
if(_tipo in {'bit', 'bigint', 'int', 'smallint', 'tinyint'} and value!=''):
value = int(value)
elif(_tipo=='date'):
value = str(value).replace('0000-00-00','')
record.append(value)
records.append(record)
return eliminarDuplicados(records)
def readJsonSaveCSV(path_to_zip_file,path_to_dir_csv_file):
global dfLog, _encode, _sep
xd = cargarPlanillaConDatosDelModelo()
jsonData,jsonFileName = readJsonData(path_to_zip_file)
for row in list(xd[xd["JSONGroup"].notnull()].groupby(["JSONGroup","Table"])):
elem = {
"JSONGroupName":row[0][0],
"TableName":row[0][1],
"ColumnList":list(row[1]["Column"]),
"DataType": list(row[1]["Data Type"]),
"SIERequired": list(row[1]["SIERequired"])
}
records = leerTodosLosRegistrosDeLaTalaDesdeArchivoJson(jsonData,elem)
crearCSV(jsonFileName,path_to_dir_csv_file+elem['TableName']+'.csv',
elem['TableName'],
elem['ColumnList'],
records)
_t = 'Archivo JSON completamente transformado.'
print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return True
#----------------------------------------------------------------------------
#PASO N° 30 - Guarda elmentos en CSV en archivos CSV
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Zip the files from given directory that matches the filter
#----------------------------------------------------------------------------
def zipFilesInDir(dirName, zipFileName, filter):
global dfLog
# create a ZipFile object
with ZipFile(zipFileName, 'w') as zipObj:
# Iterate over all the files in directory
for folderName, subfolders, filenames in os.walk(dirName):
for filename in filenames:
if filter(filename):
# create complete filepath of file in directory
filePath = os.path.join(folderName, filename)
# Add file to zip
zipObj.write(filePath)
_t = "Archivo ZIP con todos los CSV's creado con éxito"; print(_t)
dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return True
def transferCSVToSQL_withPandas(path_to_dir_csv_file,DB_NAME,timestamp):
global dfLog,_sep,_encode
_r = True
secPhase = 'BD en blanco solo con parámetros definidos por Enlaces-Mineduc'
engine = create_engine(f"sqlite+pysqlcipher://:{secPhase}@/{DB_NAME}?cipher=aes-256-cfb&kdf_iter=64000")
conn = engine.connect()
try:
for root, dirs, files in os.walk(path_to_dir_csv_file, topdown=False):
for name in files:
_fileName = os.path.join(root, name)
df = pd.read_csv(_fileName,sep=_sep,encoding=_encode)
_c = str(df.count()[0])
tbl = name[:-4]
print(f'\nLeyendo: {_fileName} -> {_c} registros procesados.');
try:
df.to_sql(tbl, con = conn, index=False, if_exists='append')
_result = "OK"
except Exception as e:
print(f'RollBack')
_result='ERROR: '+str(e)
_r = False
pass
finally:
print("name:",name)
dfLog.loc[dfLog['csv']==_fileName,dfLog.columns=='#readingRows']=_c
dfLog.loc[dfLog['csv']==_fileName,dfLog.columns=='resultReading']=_result
print("Table:",tbl,"#Rows:",len(df.index),_result)
#trans.commit()
#-------------------- REVISION DE REGLAS DEL NEGOCIO --------------------------
# VERIFICA LA INTEGRIDAD REFERENCIAL DE LOS DATOS
print("# VERIFICA LA INTEGRIDAD REFERENCIAL DE LOS DATOS")
rows = conn.execute("PRAGMA foreign_key_check;")
if(rows.returns_rows):
pd.DataFrame(rows
,columns=['Table', 'rowId', 'Parent', 'FkId']
).to_csv('ForenKeyErrors.csv'
,sep=_sep
,encoding=_encode
,index=False)
raise Exception("BD con errores de Integridad Referencial. Revise ForenKeyErrors.csv para más detalle")
# VERIFICA QUE LA BD CONTENGA EL RBD DEL ESTABLECIMIENTO
print("# VERIFICA QUE LA BD CONTENGA EL RBD DEL ESTABLECIMIENTO")
RBD = conn.execute("""
SELECT
i.Identifier as RBD
,Organization.Name as 'NombreEstablecimiento'
,i.OrganizationId
FROM OrganizationIdentifier i
INNER JOIN Organization USING(OrganizationId)
INNER JOIN RefOrganizationIdentificationSystem rbd
ON i.RefOrganizationIdentificationSystemId = rbd.RefOrganizationIdentificationSystemId
AND i.RefOrganizationIdentificationSystemId = (
SELECT RefOrganizationIdentificationSystemId
FROM RefOrganizationIdentificationSystem
WHERE Code = 'RBD')
INNER JOIN RefOrganizationIdentifierType Mineduc
ON i.RefOrganizationIdentifierTypeId = Mineduc.RefOrganizationIdentifierTypeId
AND i.RefOrganizationIdentifierTypeId = (
SELECT RefOrganizationIdentifierTypeId
FROM RefOrganizationIdentifierType
WHERE Code = 'Mineduc')
""")
if(not RBD.returns_rows):
raise Exception("RBD del establecimiento no fue encontrado en la Base de datos")
else:
row = RBD.fetchall();
r = re.compile('^RBD[0-9]{5}$')
print(row[0][0])
if r.match(row[0][0]) is not None:
print('RBD con formato correcto',row[0][0])
else:
raise Exception("RBD con formato incorrecto", row[0][0])
# VERIFICA QUE LA BD CONTENGA LA INFORMACIÓN DE SOLO UN AÑO
print("# VERIFICA QUE LA BD CONTENGA LA INFORMACIÓN DE SOLO UN AÑO")
Year = conn.execute("""
SELECT CalendarYear as 'AñoEscolar'
FROM OrganizationCalendar
GROUP BY CalendarYear
""");
if(Year.returns_rows):
rows = Year.fetchall();
print("Año: ",rows[0])
if(len(rows)!=1):
raise Exception("La BD contiene más de un año de referencia en los datos")
else:
raise Exception("La BD no contiene el año de referencia de los datos")
# VERIFICA JERARQUIA DE LOS DATOS
# la jerarquí es:
# RBD -> Modalidad -> Jornada -> Niveles -> Rama ->
# Sector Económico (shorName) + Especialidad (Name)
# Tipo de Curso -> COD_ENSE (shortName) + Grado (Name) -> Curso -> Asignatura
print("# VERIFICA JERARQUIA DE LOS DATOS")
Jerarquias = conn.execute("""
SELECT
ee.RBD
, ee.nombreEstablecimiento
, modalidad.Name as modalidad
, jornada.Name as jornada
, nivel.Name as nivel
, rama.Name as rama
, sector.Name as sector
, especialidad.Name as especialidad
, tipoCurso.Name as tipoCurso
, codEnse.Name as codigoEnseñanza
, grado.Name as grado
, curso.Name as letraCurso
, curso.OrganizationId as OrganizationIdDelCurso
, profesorJefe.apellidoPaternoDocenteLiderCurso
, profesorJefe.apellidoMaternoDocenteLiderCurso
, profesorJefe.primerNombreDocenteLiderCurso
, profesorJefe.otrosNombresDocenteLiderCurso
, profesorJefe.runDocenteLiderCurso
FROM Organization as curso
INNER JOIN OrganizationRelationship as rsCurso on curso.OrganizationId=rsCurso.OrganizationId
INNER JOIN Organization as grado on grado.OrganizationId=rsCurso.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsGrado on grado.OrganizationId=rsGrado.OrganizationId
INNER JOIN Organization as codEnse on codEnse.OrganizationId=rsGrado.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsCodEnse on codEnse.OrganizationId=rsCodEnse.OrganizationId
INNER JOIN Organization as tipoCurso on tipoCurso.OrganizationId=rsCodEnse.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsTipoCurso on tipoCurso.OrganizationId=rsTipoCurso.OrganizationId
INNER JOIN Organization as especialidad on especialidad.OrganizationId=rsTipoCurso.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsEspecialidad on especialidad.OrganizationId=rsEspecialidad.OrganizationId
INNER JOIN Organization as sector on sector.OrganizationId=rsEspecialidad.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsSector on sector.OrganizationId=rsSector.OrganizationId
INNER JOIN Organization as rama on rama.OrganizationId=rsSector.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsRama on rama.OrganizationId=rsRama.OrganizationId
INNER JOIN Organization as nivel on nivel.OrganizationId=rsRama.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsNivel on nivel.OrganizationId=rsNivel.OrganizationId
INNER JOIN Organization as jornada on jornada.OrganizationId=rsNivel.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsJornada on jornada.OrganizationId=rsJornada.OrganizationId
INNER JOIN Organization as modalidad on modalidad.OrganizationId=rsJornada.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsModalidad on modalidad.OrganizationId=rsModalidad.OrganizationId
INNER JOIN (
SELECT
i.Identifier as RBD
,Organization.Name as 'nombreEstablecimiento'
,i.OrganizationId as OrganizationId
FROM OrganizationIdentifier i
INNER JOIN Organization USING(OrganizationId)
INNER JOIN RefOrganizationIdentificationSystem rbd
ON i.RefOrganizationIdentificationSystemId = rbd.RefOrganizationIdentificationSystemId
AND i.RefOrganizationIdentificationSystemId = (
SELECT RefOrganizationIdentificationSystemId
FROM RefOrganizationIdentificationSystem
WHERE Code = 'RBD' )
INNER JOIN RefOrganizationIdentifierType Mineduc
ON i.RefOrganizationIdentifierTypeId = Mineduc.RefOrganizationIdentifierTypeId
AND i.RefOrganizationIdentifierTypeId = (
SELECT RefOrganizationIdentifierTypeId
FROM RefOrganizationIdentifierType
WHERE Code = 'Mineduc' )) as ee on ee.OrganizationId=rsModalidad.Parent_OrganizationId
INNER JOIN (
SELECT
OrganizationPersonRoleId
, OrganizationId
, PersonId
, LastName as 'apellidoPaternoDocenteLiderCurso'
, SecondLastName as 'apellidoMaternoDocenteLiderCurso'
, FirstName as 'primerNombreDocenteLiderCurso'
, MiddleName as 'otrosNombresDocenteLiderCurso'
, runDocenteLiderCurso
FROM K12StaffAssignment
INNER JOIN OrganizationPersonRole USING(OrganizationPersonRoleId)
INNER JOIN (
SELECT DISTINCT
Person.PersonId
,Person.LastName
,Person.SecondLastName
,Person.FirstName
,Person.MiddleName
,rut.Identifier as RunDocenteLiderCurso
FROM Person
INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId
AND rut.RefPersonIdentificationSystemId = 51
) USING(PersonId)
WHERE RefTeachingAssignmentRoleId = 1
) profesorJefe ON OrganizationIdDelCurso = profesorJefe.OrganizationId
WHERE curso.RefOrganizationTypeId = 21
""");
print("Jerarquias.returns_rows->",Jerarquias.returns_rows)
if(Jerarquias.returns_rows):
rows = Jerarquias.fetchall()
if(len(rows)==0):
raise Exception("No se encuentra ningún dato de jerarquía")
else:
raise Exception("No se encuentra ningún dato de jerarquía")
modalidades = list(set([m[2] for m in rows]))
madalidadesList = ['Regular','Especial','Adulto']
if(False in [m in madalidadesList for m in modalidades]):
raise Exception("La modalidad de enseñanza no corresponde")
jornadas = list(set([m[3] for m in rows]))
jornadasList = ['Mañana','Tarde','Mañana y Tarde','Vespertina/Nocturna']
if(False in [m in jornadasList for m in jornadas]):
raise Exception("La jornada de enseñanza no corresponde")
nivel = list(set([m[4] for m in rows]))
nivelList = [
'01:Educación Parvularia',
'02:Enseñanza Básica Niños',
'03:Educación Básica Adultos',
'04:Educación Especial',
'05:Enseñanza Media Humanístico Científica Jóvenes',
'06:Educación Media Humanístico Científica Adultos',
'07:Enseñanza Media Técnico Profesional y Artística, Jóvenes',
'08:Educación Media Técnico Profesional y Artística, Adultos']
if(False in [m in nivelList for m in nivel]):
raise Exception("El nivel de enseñanza agrupado no corresponde")
rama = list(set([m[5] for m in rows]))
ramaList = ['000:Ciclo General',
'000:Sin Información',
'400:Comercial',
'500:Industrial',
'600:Técnica',
'700:Agrícola',
'800:Marítima',
'900:Artística',]
if(False in [m in ramaList for m in rama]):
raise Exception("La rama de enseñanza no corresponde")
sector = list(set([m[6] for m in rows]))
# Ciclo general corresponde a alumnos de 1° y 2° Medio
#en Enseñanza Media T-P y Artística niños y jóvenes y
#primer nivel en educación media T-P y Artística Adultos.
#En todo otro caso colocar "Sin Información"
sectorList = ['000:Ciclo General',
'000:Sin Información',
'410:Administración y Comercio',
'510:Construcción',
'520:Metalmecánico',
'530:Electricidad',
'540:Minero',
'550:Gráfica',
'560:Químico',
'570:Confección',
'580:Tecnología y Telecomunicaciones',
'610:Alimentación',
'620:Programas y Proyectos Sociales',
'630:Hotelería y Turismo',
'640:Salud y Educación',
'710:Maderero',
'720:Agropecuario',
'810:Marítimo',
'910:Artes Visuales',
'920:Artes Escénicas Teatro',
'930:Artes Escénicas Danza']
if(False in [m in sectorList for m in sector]):
raise Exception("El sector de enseñanza no corresponde")
especialidad = list(set([m[7] for m in rows]))
especialidadList = ['000:Ciclo General',
'000:Sin Información',
'410.41001:Administración',
'410.41002:Contabilidad',
'410.41003:Secretariado',
'410.41004:Ventas',
'410.41005:Administración (con mención)',
'510.51001:Edificación',
'510.51002:Terminaciones de Construcción',
'510.51003:Montaje Industrial',
'510.51004:Obras viales y de infraestructura',
'510.51005:Instalaciones sanitarias',
'510.51006:Refrigeración y climatización',
'510.51009:Construcción (con mención)',
'520.52008:Mecánica Industrial',
'520.52009:Construcciones Metálicas',
'520.52010:Mecánica Automotriz',
'520.52011:Matricería',
'520.52012:Mecánica de mantención de aeronaves',
'520.52013:Mecánica Industrial (con mención)',
'530.53014:Electricidad',
'530.53015:Electrónica',
'530.53016:Telecomunicaciones hasta el año 2015',
'540.54018:Explotación minera',
'540.54019:Metalurgia Extractiva',
'540.54020:Asistencia de geología',
'550.55022:Gráfica',
'550.55023:Dibujo Técnico',
'560.56025:Operación de planta química',
'560.56026:Laboratorio químico',
'560.56027:Química Industrial (con mención)',
'570.57028:Tejido',
'570.57029:Textil',
'570.57030:Vestuario y Confección Textil',
'570.57031:Productos del cuero',
'580.58033:Conectividad y Redes',
'580.58034:Programación',
'580.58035:Telecomunicaciones',
'610.61001:Elaboración Industrial de Alimentos',
'610.61002:Servicio de Alimentación Colectiva',
'610.61003:Gastronomía (con mención)',
'620.62004:Atención de párvulos hasta año 2015',
'620.62005:Atención de adultos mayores',
'620.62006:Atención de Enfermería',
'620.62007:Atención Social y Recreativa',
'620.62008:Atención de Enfermería (con mención) hasta año 2015',
'630.63009:Servicio de turismo',
'630.63010:Servicios Hoteleros',
'630.63011:Servicio de hotelería',
'640.64001:Atención de párvulos',
'640.64008:Atención de Enfermería (con mención)',
'710.71001:Forestal',
'710.71002:Procesamiento de la madera',
'710.71003:Productos de la madera',
'710.71004:Celulosa y Papel',
'710.71005:Muebles y Terminaciones de la madera',
'720.72006:Agropecuaria',
'720.72007:Agropecuaria (con mención)',
'810.81001:Naves mercantes y especiales',
'810.81002:Pesquería',
'810.81003:Acuicultura',
'810.81004:Operación portuaria',
'810.81005:Tripulación naves mercantes y especiales',
'910.91001:Artes Visuales',
'910.91002:Artes Audiovisuales',
'910.91003:Diseño',
'920.92004:Interpretación Teatral',
'920.92005:Diseño Escénico',
'930.93006:Interpretación en Danza de Nivel Intermedio',
'930.93007:Monitoría de Danza']
if(False in [m in especialidadList for m in especialidad]):
raise Exception("La especialidad de enseñanza no corresponde")
tipoCurso = list(set([m[8] for m in rows]))
tipoCursoList = ['01:Simple','02:Combinado']
if(False in [m in tipoCursoList for m in tipoCurso]):
raise Exception("El codigo de nivel agrupado no corresponde")
codigoEnse = list(set([m[9] for m in rows]))
codigoEnseList = ['010:Educación Parvularia',
'110:Enseñanza Básica',
'160:Educación Básica Común Adultos (Decreto 584/2007)',
'161:Educación Básica Especial Adultos',
'163:Escuelas Cárceles (Básica Adultos)',
'165:Educación Básica Adultos Sin Oficios (Decreto 584/2007)'
'167:Educación Básica Adultos Con Oficios (Decreto 584/2007 y 999/2009)',
'211:Educación Especial Discapacidad Auditiva',
'212:Educación Especial Discapacidad Intelectual',
'213:Educación Especial Discapacidad Visual',
'214:Educación Especial Trastornos Específicos del Lenguaje',
'215:Educación Especial Trastornos Motores',
'216:Educación Especial Autismo',
'217:Educación Especial Discapacidad Graves Alteraciones en la Capacidad de Relación y Comunicación',
'299:Opción 4 Programa Integración Escolar',
'310:Enseñanza Media H-C niños y jóvenes',
'360:Educación Media H-C adultos vespertino y nocturno (Decreto N° 190/1975)',
'361:Educación Media H-C adultos (Decreto N° 12/1987)',
'362:Escuelas Cárceles (Media Adultos)',
'363:Educación Media H-C Adultos (Decreto N°1000/2009)',
'410:Enseñanza Media T-P Comercial Niños y Jóvenes',
'460:Educación Media T-P Comercial Adultos (Decreto N° 152/1989)',
'461:Educación Media T-P Comercial Adultos (Decreto N° 152/1989)',
'463:Educación Media T-P Comercial Adultos (Decreto N° 1000/2009)',
'510:Enseñanza Media T-P Industrial Niños y Jóvenes',
'560:Educación Media T-P Industrial Adultos (Decreto N° 152/1989)',
'561:Educación Media T-P Industrial Adultos (Decreto N° 152/1989)',
'563:Educación Media T-P Industrial Adultos (Decreto N° 1000/2009)',
'610:Enseñanza Media T-P Técnica Niños y Jóvenes',
'660:Educación Media T-P Técnica Adultos (Decreto N° 152/1989)',
'661:Educación Media T-P Técnica Adultos (Decreto N° 152/1989)',
'663:Educación Media T-P Técnica Adultos (Decreto N° 1000/2009)',
'710:Enseñanza Media T-P Agrícola Niños y Jóvenes',
'760:Educación Media T-P Agrícola Adultos (Decreto N° 152/1989)',
'761:Educación Media T-P Agrícola Adultos (Decreto N° 152/1989)',
'763:Educación Media T-P Agrícola Adultos (Decreto N° 1000/2009)',
'810:Enseñanza Media T-P Marítima Niños y Jóvenes',
'860:Enseñanza Media T-P Marítima Adultos (Decreto N° 152/1989)',
'863:Enseñanza Media T-P Marítima Adultos (Decreto N° 1000/2009)',
'910:Enseñanza Media Artística Niños y Jóvenes',
'963:Enseñanza Media Artística Adultos',]
if(False in [m in codigoEnseList for m in codigoEnse]):
raise Exception("El código de enseñanza no corresponde")
grado = list(set([m[10] for m in rows]))
gradoList = ['010.01:Sala Cuna',
'010.02:Nivel Medio Menor',
'010.03:Nivel Medio Mayor',
'010.04:Primer Nivel de Transición (Pre-kinder)',
'010.05:Segundo Nivel de Transición (Kinder)',
'110.01:1º Básico',
'110.02:2º Básico',
'110.03:3º Básico',
'110.04:4º Básico',
'110.05:5º Básico',
'110.06:6º Básico',
'110.07:7º Básico',
'110.08:8º Básico',
'165.01:Nivel Básico 1 (1º a 4º básico)',
'165.02:Nivel Básico 2 (5º a 6º básico)',
'165.03:Nivel Básico 3 (7º a 8º básico)',
'167.02:Nivel Básico 2 (5º a 6º básico)',
'167.03:Nivel Básico 3 (7º a 8º básico)',
'211.01:Prebásico materno 1º',
'211.02:Prebásico 1º - 1',
'211.03:Prebásico 1º - 2',
'211.04:Prebásico 1º - 3',
'211.05:Prebásico 2º - 4',
'211.06:Prebásico 2º - 5',
'211.07:Básico 1º - 1',
'211.08:Básico 1º - 2',
'211.09:Básico 1º - 3',
'211.10:Básico 1º - 4',
'211.11:Básico 2º - 5',
'211.12:Básico 2º - 6',
'211.13:Básico 2º - 7',
'211.14:Básico 2º - 8',
'211.15:Laboral 1',
'211.16:Laboral 2',
'211.17:Laboral 3',
'212.01:Prebásico 1º - 1',
'212.02:Prebásico 1º - 2',
'212.03:Prebásico 1º - 3',
'212.04:Prebásico 2º - 4',
'212.05:Básico 1º - 5',
'212.06:Básico 1º - 6',
'212.07:Básico 1º - 7',
'212.08:Básico 2º - 8',
'212.09:Básico 2º - 9',
'212.10:Básico 2º - 10',
'212.11:Laboral 1',
'212.12:Laboral 2',
'212.13:Laboral 3',
'212.14:Prebásico Materno 1° (Estimulación Temprana)',
'213.01:Estimulación temprana 1º - 1',
'213.02:Estimulación temprana 1º - 2',
'213.03:Prebásico 1º - 1',
'213.04:Prebásico 1º - 2',
'213.05:Prebásico 2º - 3',
'213.06:Prebásico 2º - 4',
'213.07:Básico 1º - 1',
'213.08:Básico 1º - 2',
'213.09:Básico 1º - 3',
'213.10:Básico 1º - 4',
'213.11:Básico 2º - 5',
'213.12:Básico 2º - 6',
'213.13:Básico 2º - 7',
'213.14:Básico 2º - 8',
'213.15:Laboral 1º - 1',
'213.16:Laboral 1º - 2',
'213.17:Laboral 2º - 3',
'213.18:Laboral 2º - 4',
'214.01:Medio Menor',
'214.02:Medio Mayor',
'214.03:Primer Nivel de Transición (Pre-kinder)',
'214.04:Segundo Nivel de Transición (Kinder)',
'215.01:Estimulación temprana 1º - 1',
'215.02:Estimulación temprana 1º - 2',
'215.03:Prebásico 1º - 1',
'215.04:Prebásico 1º - 2',
'215.05:Prebásico 1º - 3',
'215.06:Prebásico 2º - 4',
'215.07:Prebásico 2º - 5',
'215.08:Básico 1º - 1',
'215.09:Básico 1º - 2',
'215.10:Básico 1º - 3',
'215.11:Básico 1º - 4',
'215.12:Básico 2º - 5',
'215.13:Básico 2º - 6',
'215.14:Básico 2º - 7',
'215.15:Básico 2º - 8',
'215.16:Laboral 1º - 1',
'215.17:Laboral 1º - 2',
'215.18:Laboral 2º - 3',
'215.19:Laboral 2º - 4',
'216.05:Básico 1° - 5',
'216.06:Básico 1° - 6',
'216.07:Básico 1° - 7',
'216.08:Básico 2° - 8',
'216.09:Básico 2° - 9',
'216.10:Básico 2° - 10',
'216.11:Laboral 1',
'216.12:Laboral 2',
'216.13:Laboral 3',
'216.14:Prebásico Materno 1° (Estimulación Temprana)',
'216.15:Prebásico 1° - 1',
'216.16:Prebásico 1° - 2',
'216.17:Prebásico 2° - 3',
'216.18:Prebásico 2° - 4',
'217.01:Prebásico 1º - 1',
'217.02:Prebásico 1º - 2',
'217.03:Prebásico 2º - 3',
'217.04:Prebásico 2º - 4',
'217.05:Básico 1º - 5',
'217.06:Básico 1º - 6',
'217.07:Básico 1º - 75',
'217.08:Básico 2º - 8',
'217.09:Básico 2º - 9',
'217.10:Básico 2º - 10',
'217.11:Laboral 1',
'217.12:Laboral 2',
'217.13:Laboral 3',
'217.14:Prebásico Materno 1º (Estimulación temprana)',
'299.01:Prebásico 1',
'299.02:Prebásico 2',
'299.03:Prebásico 3',
'299.04:Prebásico 4',
'299.05:Prebásico 5',
'299.06:Básico 1',
'299.07:Básico 2',
'299.08:Básico 3',
'299.09:Básico 4',
'299.10:Básico 5',
'299.11:Básico 6',
'299.12:Básico 7',
'299.13:Básico 8',
'299.14:Básico 9',
'299.15:Básico 10',
'299.16:Laboral 1',
'299.17:Laboral 2',
'299.18:Laboral 3',
'299.19:Laboral 4',
'310.01:1º medio',
'310.02:2º medio',
'310.03:3º medio',
'310.04:4º medio',
'363.01:Primer nivel (1º y 2º medio)',
'363.03:Segundo nivel (3º y 4º medio)',
'410.01:1º medio',
'410.02:2º medio',
'410.03:3º medio',
'410.04:4º medio',
'463.01:Primer nivel (1º y 2º medio)',
'463.03:Segundo nivel (3º medio)',
'463.04:Tercero nivel (4º medio)',
'510.01:1º medio',
'510.02:2º medio',
'510.03:3º medio',
'510.04:4º medio',
'563.01:Primer nivel (1º y 2º medio)',
'563.03:Segundo nivel (3º medio)',
'563.04:Tercero nivel (4º medio)',
'610.01:1º medio',
'610.02:2º medio',
'610.03:3º medio',
'610.04:4º medio',
'663.01:Primer nivel (1º y 2º medio)',
'663.03:Segundo nivel (3º medio)',
'663.04:Tercero nivel (4º medio)',
'710.01:1º medio',
'710.02:2º medio',
'710.03:3º medio',
'710.04:4º medio',
'763.01:Primer nivel (1º y 2º medio)',
'763.03:Segundo nivel (3º medio)',
'763.04:Tercero nivel (4º medio)',
'810.01:1º medio',
'810.02:2º medio',
'810.03:3º medio',
'810.04:4º medio',
'863.01:Primer nivel (1º y 2º medio)',
'863.03:Segundo nivel (3º medio)',
'863.04:Tercero nivel (4º medio)',
'910.01:1º medio',
'910.02:2º medio',
'910.03:3º medio',
'910.04:4º medio',
'963.01:Primer nivel (1º y 2º medio)',
'963.03:Segundo nivel (3º medio)',
'963.04:Tercero nivel (4º medio)']
if(False in [m in gradoList for m in grado]):
raise Exception("El grado no corresponde")
letraCurso = list(set([m[11] for m in rows]))
r = re.compile('^[A-Z]{1,2}$')
if(None in [r.match(letra) for letra in letraCurso]):
raise Exception("La letra de curso inválida")
runDocenteLider = list(set([m[17] for m in rows]))
if(False in [validarRut(run) for run in runDocenteLider]):
raise Exception("Existe RUN de docentes inválidos")
# VERIFICA LISTA DE ESTUDIANTES
print("# VERIFICA LISTA DE ESTUDIANTES")
estudiantes = conn.execute("""
SELECT DISTINCT
person.PersonId as 'personIdEstudiante'
,numLista.StudentListNumber as 'númeroListaEstudiante'
,mat.Identifier as 'númeroMatriculaEstudiante'
,Person.LastName as apellidoPaternoEstudiante, Person.SecondLastName as apellidoMaternoEstudiante, Person.FirstName as primerNombreEstudiante, Person.MiddleName as otrosNombresEstudiante
,rut.Identifier as runEstudiante
,CASE RefSex.Code WHEN 'Male' THEN 'M' WHEN 'Female' THEN 'F' ELSE 'Sin Registro' END as sexoEstudiante
,Birthdate as fechaNacimientoEstudiante
,address.StreetNumberAndName as DirecciónEstudiante
,address.comuna as ComunaEstudiante
,padre.ApellidoPaternoPadre, padre.ApellidoMaternoPadre, padre.PrimerNombrePadre, padre.OtrosNombresPadre, padre.RunPadre
,madre.ApellidoPaternoMadre, madre.ApellidoMaternoMadre, madre.PrimerNombreMadre, madre.OtrosNombresMadre, madre.RunMadre
,tutor.ApellidoPaternoTutor, tutor.ApellidoMaternoTutor, tutor.PrimerNombreTutor, tutor.OtrosNombresTutor, tutor.RunTutor, tutor.FonoTutor, tutor.EmailTutor
,curso.RBD
,curso.nombreEstablecimiento
,curso.modalidad
,curso.jornada
,curso.nivel
,curso.grado
,curso.letraCurso
,curso.OrganizationIdDelCurso
,curso.apellidoPaternoDocenteLiderCurso, curso.apellidoMaternoDocenteLiderCurso, curso.primerNombreDocenteLiderCurso, curso.otrosNombresDocenteLiderCurso, curso.runDocenteLiderCurso
,oc.AñoCalendario
,Opr.EntryDate as 'fechaIncorporaciónEstudiante'
,Opr.ExitDate as 'fechaRetiroEstudiante'
FROM Person
INNER JOIN PersonIdentifier mat
ON mat.PersonId = Person.PersonId
AND mat.RefPersonIdentificationSystemId = 6
LEFT OUTER JOIN RefSex
USING(RefSexId)
LEFT OUTER JOIN PersonIdentifier rut
ON rut.PersonId = Person.PersonId
AND rut.RefPersonIdentificationSystemId = 51
LEFT OUTER JOIN (
SELECT
PersonId
,StreetNumberAndName
,RefCounty.Description as Comuna
FROM PersonAddress
INNER JOIN RefCounty ON PersonAddress.RefCountyId = RefCounty.RefCountyId
) address USING(PersonId)
LEFT OUTER JOIN (
SELECT DISTINCT
Person.PersonId
,Person.LastName as ApellidoPaternoPadre
,Person.SecondLastName as ApellidoMaternoPadre
,Person.FirstName as PrimerNombrePadre
,Person.MiddleName as OtrosNombresPadre
,rut.Identifier as RunPadre
FROM Person
INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId
AND rut.RefPersonIdentificationSystemId = 51
INNER JOIN PersonRelationship padre ON padre.PersonId = Person.PersonId
AND padre.RefPersonRelationshipId IN (8,9,10,11)
) padre USING(PersonId)
LEFT OUTER JOIN (
SELECT DISTINCT
Person.PersonId
,Person.LastName as ApellidoPaternoMadre
,Person.SecondLastName as ApellidoMaternoMadre
,Person.FirstName as PrimerNombreMadre
,Person.MiddleName as OtrosNombresMadre
,rut.Identifier as RunMadre
FROM Person
INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId
AND rut.RefPersonIdentificationSystemId = 51
INNER JOIN PersonRelationship ON PersonRelationship.PersonId = Person.PersonId
AND PersonRelationship.RefPersonRelationshipId IN (18,19,20)
) madre USING(PersonId)
LEFT OUTER JOIN (
SELECT DISTINCT
Person.PersonId
,Person.LastName as ApellidoPaternoTutor
,Person.SecondLastName as ApellidoMaternoTutor
,Person.FirstName as PrimerNombreTutor
,Person.MiddleName as OtrosNombresTutor
,rut.Identifier as RunTutor
,p.TelephoneNumber as FonoTutor
,PersonEmailAddress.EmailAddress as EmailTutor
FROM Person
INNER JOIN PersonIdentifier rut
ON rut.PersonId = Person.PersonId
AND rut.RefPersonIdentificationSystemId = 51
INNER JOIN PersonRelationship
ON PersonRelationship.PersonId = Person.PersonId
AND PersonRelationship.PrimaryContactIndicator = 1
LEFT OUTER JOIN (
SELECT *
FROM PersonTelephone
ORDER BY PrimaryTelephoneNumberIndicator DESC
LIMIT 1
) p USING(PersonId)
LEFT OUTER JOIN PersonEmailAddress
USING(PersonId)
) tutor USING(PersonId)
LEFT OUTER JOIN (
SELECT DISTINCT
StudentListNumber
,OrganizationPersonRole.PersonId
FROM K12StudentEnrollment
INNER JOIN OrganizationPersonRole
USING(OrganizationPersonRoleId)
WHERE StudentListNumber NOT NULL AND StudentListNumber != 0
) numLista USING(PersonId)
LEFT OUTER JOIN OrganizationPersonRole as Opr
USING(PersonId)
INNER JOIN (
SELECT
ee.RBD
, ee.nombreEstablecimiento
, modalidad.Name as modalidad
, jornada.Name as jornada
, nivel.Name as nivel
, rama.Name as rama
, sector.Name as sector
, especialidad.Name as especialidad
, tipoCurso.Name as tipoCurso
, codEnse.Name as codigoEnseñanza
, grado.Name as grado
, curso.Name as letraCurso
, curso.OrganizationId as OrganizationIdDelCurso
, profesorJefe.apellidoPaternoDocenteLiderCurso
, profesorJefe.apellidoMaternoDocenteLiderCurso
, profesorJefe.primerNombreDocenteLiderCurso
, profesorJefe.otrosNombresDocenteLiderCurso
, profesorJefe.runDocenteLiderCurso
FROM Organization as curso
INNER JOIN OrganizationRelationship as rsCurso on curso.OrganizationId=rsCurso.OrganizationId
INNER JOIN Organization as grado on grado.OrganizationId=rsCurso.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsGrado on grado.OrganizationId=rsGrado.OrganizationId
INNER JOIN Organization as codEnse on codEnse.OrganizationId=rsGrado.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsCodEnse on codEnse.OrganizationId=rsCodEnse.OrganizationId
INNER JOIN Organization as tipoCurso on tipoCurso.OrganizationId=rsCodEnse.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsTipoCurso on tipoCurso.OrganizationId=rsTipoCurso.OrganizationId
INNER JOIN Organization as especialidad on especialidad.OrganizationId=rsTipoCurso.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsEspecialidad on especialidad.OrganizationId=rsEspecialidad.OrganizationId
INNER JOIN Organization as sector on sector.OrganizationId=rsEspecialidad.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsSector on sector.OrganizationId=rsSector.OrganizationId
INNER JOIN Organization as rama on rama.OrganizationId=rsSector.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsRama on rama.OrganizationId=rsRama.OrganizationId
INNER JOIN Organization as nivel on nivel.OrganizationId=rsRama.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsNivel on nivel.OrganizationId=rsNivel.OrganizationId
INNER JOIN Organization as jornada on jornada.OrganizationId=rsNivel.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsJornada on jornada.OrganizationId=rsJornada.OrganizationId
INNER JOIN Organization as modalidad on modalidad.OrganizationId=rsJornada.Parent_OrganizationId
INNER JOIN OrganizationRelationship as rsModalidad on modalidad.OrganizationId=rsModalidad.OrganizationId
INNER JOIN (
SELECT
i.Identifier as RBD
,Organization.Name as 'nombreEstablecimiento'
,i.OrganizationId as OrganizationId
FROM OrganizationIdentifier i
INNER JOIN Organization USING(OrganizationId)
INNER JOIN RefOrganizationIdentificationSystem rbd
ON i.RefOrganizationIdentificationSystemId = rbd.RefOrganizationIdentificationSystemId
AND i.RefOrganizationIdentificationSystemId = (
SELECT RefOrganizationIdentificationSystemId
FROM RefOrganizationIdentificationSystem
WHERE Code = 'RBD' )
INNER JOIN RefOrganizationIdentifierType Mineduc
ON i.RefOrganizationIdentifierTypeId = Mineduc.RefOrganizationIdentifierTypeId
AND i.RefOrganizationIdentifierTypeId = (
SELECT RefOrganizationIdentifierTypeId
FROM RefOrganizationIdentifierType
WHERE Code = 'Mineduc' )) as ee on ee.OrganizationId=rsModalidad.Parent_OrganizationId
INNER JOIN (
SELECT
OrganizationPersonRoleId
, OrganizationId
, PersonId
, LastName as 'apellidoPaternoDocenteLiderCurso'
, SecondLastName as 'apellidoMaternoDocenteLiderCurso'
, FirstName as 'primerNombreDocenteLiderCurso'
, MiddleName as 'otrosNombresDocenteLiderCurso'
, runDocenteLiderCurso
FROM K12StaffAssignment
INNER JOIN OrganizationPersonRole USING(OrganizationPersonRoleId)
INNER JOIN (
SELECT DISTINCT
Person.PersonId
,Person.LastName
,Person.SecondLastName
,Person.FirstName
,Person.MiddleName
,rut.Identifier as RunDocenteLiderCurso
FROM Person
INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId
AND rut.RefPersonIdentificationSystemId = 51
) USING(PersonId)
WHERE RefTeachingAssignmentRoleId = 1
) profesorJefe ON OrganizationIdDelCurso = profesorJefe.OrganizationId
WHERE curso.RefOrganizationTypeId = 21
) curso ON Opr.OrganizationId = curso.OrganizationIdDelCurso
LEFT OUTER JOIN (
Select MAX(CalendarYear) as 'AñoCalendario', OrganizationId
FROM OrganizationCalendar
) oc ON oc.OrganizationId=curso.OrganizationIdDelCurso
ORDER BY nivel, grado, letraCurso, StudentListNumber
""")
print('Estudiantes.returns_rows->',estudiantes.returns_rows)
if(estudiantes.returns_rows):
rows = estudiantes.fetchall()
if(len(rows)==0):
raise Exception("No se encuentra ningún dato de los studiante")
else:
raise Exception("No se encuentra ningún dato de los estudiantes")
runEstudiante = list(set([m[7] for m in rows]))
if(False in [validarRut(run) for run in runEstudiante]):
print([run for run in runEstudiante if not validarRut(run)])
raise Exception("Existe RUN inválidos (runEstudiante)")
runPadres = list(set([m[16] for m in rows if m[16] is not None]))
runPadresErroneos = [run for run in runPadres if not validarRut(run)]
if(len(runPadresErroneos)>0):
print(runPadresErroneos)
raise Exception("Existe RUN inválidos (runPadres)")
runMadres = list(set([m[21] for m in rows if m[21] is not None]))
runMadresErroneos = [run for run in runMadres if not validarRut(run)]
if(len(runMadresErroneos)>0):
print(runMadresErroneos)
raise Exception("Existe RUN inválidos (runMadres)")
runTutores = list(set([m[26] for m in rows if m[26] is not None]))
runTutoresErroneos = [run for run in runTutores if not validarRut(run)]
if(len(runTutoresErroneos)>0):
print(runTutoresErroneos)
raise Exception("Existe RUN inválidos (runTutores)")
#Teléfonos con formato E164
phoneTutor = list(set([m[27] for m in rows if m[27] is not None]))
r = re.compile('^\+56\d{9,15}$')
phoneTutorErroneos = [phone for phone in phoneTutor if not r.match(phone)]
if(len(phoneTutorErroneos)>0):
print(phoneTutorErroneos)
raise Exception("El teléfono del tutor no tiene el formato correcto")
emailTutor = list(set([m[28] for m in rows if m[28] is not None]))
emailTutoresErroneos = [email for email in emailTutor if not valida_email(email)]
if(len(runTutoresErroneos)>0):
print(emailTutoresErroneos)
raise Exception("Existe un email inválido (emailTutor)")
# CAMBIA CLAVE A LA BD Y CREA ARCHIVO CON CLAVE PARA LA SIE
print("# CAMBIA CLAVE A LA BD Y CREA ARCHIVO CON CLAVE PARA LA SIE")
conn.execute(f"PRAGMA key = '{secPhase}';")
psw = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(50))
tiempoPromedioDesifrado = pow(26+26+10,50)/4000000000
print(psw,tiempoPromedioDesifrado)
text_file = open("key.txt", "w");text_file.write(psw);text_file.close()
psw2 = encryptTextUsingSiePublicKey(psw)
text_file = open("key.encrypted", "wb");text_file.write(psw2);text_file.close()
conn.execute(f"PRAGMA rekey = '{psw}';")
except Exception as e:
_t = "ERROR COMMIT: "+str(e)
print(_t);dfLog=dfLog.append(pd.Series({'result': _t}),ignore_index=True);
_r = False
finally:
#closind database connection
conn.close()
return _r
def cargarBaseDeDatos():
global dfLog
idFile = '1hqAjAknc6dY720X5zO_ZU2FqI_mZa3nB'
url_to_zipDB_file = f'http://drive.google.com/uc?export=download&id={idFile}'
r = requests.get(url_to_zipDB_file, stream=True)
fileName = 'ceds-nds-v7_1_encryptedD3.db'
with open(fileName,'wb') as out:
out.write(io.BytesIO(r.content).read()) ## Read bytes into file
path_to_DB_file = os.path.join(os.path.dirname(fileName), fileName)
_t=f"Base de datos: '{path_to_DB_file}' descomprimida exitosamente "; print(_t)
dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return path_to_DB_file
#----------------------------------------------------------------------------
#PASO N° 10 - Preparar ambiente de trabajo
#----------------------------------------------------------------------------
# Clean o Create CSV directory
#----------------------------------------------------------------------------
def cleanDirectory(d):
global dfLog
if(not os.path.exists(d)):
os.mkdir(d)
else:
for root, dirs, files in os.walk(d, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
_t = f'Directorio : {d} limpio y preparado con éxito!!!'; print(_t);
dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
return True
###Output
_____no_output_____
###Markdown
Sección: CÓDIGO PRINCIPAL---
###Code
dfLog = pd.DataFrame(columns=['json', 'csv', '#savingRows', '#readingRows',
'resultSaving', 'resultReading', 'result'])
_encode = 'utf8' #Opciones Windows:'cp1252', Google Colab: 'utf8'
_sep = ';' #Opciones Windows:';', Google Colab: ','
def main():
global dfLog, _encode, _sep
tiempo_inicial = time()
now = datetime.now(pytz.timezone('Chile/Continental'))
t_stamp = datetime.timestamp(now)
path_to_dir_csv_file = './csv/'
path_to_DB_file = cargarBaseDeDatos()
path_to_zip_file = '_tmp_json_librodigital_mineduc_8833_02enero2020.zip' #Ingresar solo nombre del archivo
if(cleanDirectory(path_to_dir_csv_file)):
if(readJsonSaveCSV(path_to_zip_file,path_to_dir_csv_file)):
if(transferCSVToSQL_withPandas(path_to_dir_csv_file,path_to_DB_file,t_stamp)):
#zipFilesInDir (path_to_dir_csv_file, './'+str(int(t_stamp))+'_Data.zip',lambda name : 'csv' in name);
cleanDirectory(path_to_dir_csv_file)
_nameErrorFile = str(int(t_stamp))+'_ERRORES.csv'
dfLog.to_csv(_nameErrorFile, sep=_sep, encoding=_encode, index=False)
zip = ZipFile('./'+str(int(t_stamp))+'_Data.zip','a')
zip.write('./ceds-nds-v7_1_encryptedD3.db')
zip.write('./key.txt');zip.write('./key.encrypted')
zip.write('./'+str(int(t_stamp))+'_ERRORES.csv')
zip.close()
_t = "Proceso finalizado correctamente!!!"
print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
else:
cleanDirectory(path_to_dir_csv_file)
_nameErrorFile = str(int(t_stamp))+'_ERRORES.csv'
dfLog.to_csv(_nameErrorFile, sep=_sep, encoding=_encode, index=False)
_t = "Proceso finalizado con ERRORES!!!"
print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
if os.path.exists('./ceds-nds-v7_1_encryptedD3.db'):
os.remove('./ceds-nds-v7_1_encryptedD3.db')
if os.path.exists('./key.txt'):
os.remove('./key.txt')
if os.path.exists('./key.encrypted'):
os.remove('./key.encrypted')
_t = f'El tiempo de ejecucion fue: {str(time() - tiempo_inicial)}'
print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True)
del dfLog, tiempo_inicial, now, path_to_dir_csv_file
del path_to_zip_file, _t
if __name__== "__main__":
main()
###Output
_____no_output_____ |
resnet50rs_cifar100.ipynb | ###Markdown
###Code
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optimizers
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torchvision
import torchvision.transforms as transforms
from sklearn.metrics import accuracy_score
###Output
_____no_output_____
###Markdown
**Network model**
###Code
class ResNet50rs(nn.Module):
def __init__(self, output_dim):
super(ResNet50rs, self).__init__()
self.stem = StemBlock(channel_in=3, channel_out=64)
# Block 1
self.id1 = Block(channel_in=64, channel_out=256, stride=2, identity=True)
self.block1 = nn.ModuleList([Block(channel_in=256, channel_out=256, stride=1, identity=False) for _ in range(2)])
# Block 2
self.id2 = Block(channel_in=256, channel_out=512, stride=2, identity=True)
self.block2 = nn.ModuleList([Block(channel_in=512, channel_out=512, stride=1, identity=False) for _ in range(3)])
# Block 3
self.id3 = Block(channel_in=512, channel_out=1024, stride=2, identity=True)
self.block3 = nn.ModuleList([Block(channel_in=1024, channel_out=1024, stride=1, identity=False) for _ in range(5)])
# Block 4
self.id4 = Block(channel_in=1024, channel_out=2048, stride=2, identity=True)
self.block4 = nn.ModuleList([Block(channel_in=2048, channel_out=2048, stride=1, identity=False) for _ in range(2)])
self.avg_pool = GlobalAvgPool2d()
self.dropout = nn.Dropout(p=0.25)
self.fc = nn.Linear(2048, output_dim, bias=False)
def forward(self, x):
h = self.stem(x)
h = self.id1(h)
for block in self.block1:
h = block(h)
h = self.id2(h)
for block in self.block2:
h = block(h)
h = self.id3(h)
for block in self.block3:
h = block(h)
h = self.id4(h)
for block in self.block4:
h = block(h)
h = self.avg_pool(h)
h = self.dropout(h)
h = torch.relu(h)
h = self.fc(h)
y = torch.log_softmax(h, dim=-1)
return y
class StemBlock(nn.Module):
def __init__(self, channel_in, channel_out):
super(StemBlock, self).__init__()
channel =int(channel_out / 2)
self.stem = nn.Sequential(nn.Conv2d(channel_in, channel, kernel_size=(3, 3), stride=2, padding=1, bias=False),
nn.BatchNorm2d(channel),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, kernel_size=(3, 3), stride=1, padding=1, bias=False),
nn.BatchNorm2d(channel),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel_out, kernel_size=(3, 3), stride=1, padding=1, bias=False))
self.init_weights()
def init_weights(self):
for _, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')
if isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
def forward(self, x):
return self.stem(x)
class Block(nn.Module):
def __init__(self, channel_in, channel_out, stride, identity):
super(Block, self).__init__()
channel = int(channel_out / 4)
self.se = SEBlock(channel_in)
# 1x1 conv
self.bn1 = nn.BatchNorm2d(channel_in)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(channel_in, channel, kernel_size=(1, 1), bias=False)
# 3x3 conv
self.bn2 = nn.BatchNorm2d(channel)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(channel, channel, kernel_size=(3, 3), stride=stride, padding=1, bias=False)
# 1x1 conv
self.bn3 = nn.BatchNorm2d(channel)
self.drop_out = DropPath(drop_prob=0.)
self.conv3 = nn.Conv2d(channel, channel_out, kernel_size=(1, 1), bias=False)
# skip connection
self.identity = identity
self.downsample = DownSample(channel_in, channel_out, stride)
self.init_weights()
def init_weights(self):
for _, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')
if isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
def forward(self, x):
h = self.se(x)
h = self.bn1(h)
h = self.relu1(h)
h = self.conv1(h)
h = self.bn2(h)
h = self.relu2(h)
h = self.conv2(h)
h = self.bn3(h)
h = self.drop_out(h)
h = self.conv3(h)
shortcut = self.downsample(x) if self.identity else x
y = h + shortcut
return y
class SEBlock(nn.Module):
def __init__(self, channel, ratio=0.25):
super(SEBlock, self).__init__()
reduced_channel = int(channel * ratio)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(channel, reduced_channel, bias=False),
nn.ReLU(inplace=True),
nn.Linear(reduced_channel, channel, bias=False),
nn.Sigmoid())
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
if self.drop_prob is None or self.drop_prob == 0 or not self.training:
return x
keep_prob = 1 - self.drop_prob
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
rand_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
rand_tensor = rand_tensor.floor_()
out = x.div(keep_prob) * rand_tensor
return out
class DownSample(nn.Module):
def __init__(self, channel_in, channel_out, stride):
super(DownSample, self).__init__()
if stride == 1:
avg_pool = nn.Identity()
else:
avg_pool = nn.AvgPool2d(kernel_size=2, stride=stride)
self.downsample = nn.Sequential(avg_pool, nn.Conv2d(channel_in, channel_out, kernel_size=(1, 1), bias=False))
def forward(self, x):
return self.downsample(x)
class GlobalAvgPool2d(nn.Module):
def __init__(self, device='cuda'):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
return F.avg_pool2d(x, kernel_size=x.size()[2:]).view(-1, x.size(1))
###Output
_____no_output_____
###Markdown
Tensor sizeCNN networks are capable of adaptive to Image. Therefore, using pytorch library, you can show that tensor $h$ size is$$h = \begin{bmatrix} batch size & channel & width & height \end{bmatrix} $$$batch size$ is Neural Network minibatch, $channel$ is channel number, $width$ and $height$ are width and height of image source. **CIFAR-100**
###Code
if __name__ == '__main__':
np.random.seed(1234)
torch.manual_seed(1234)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
scaler = torch.cuda.amp.GradScaler()
cifar_classes = ['apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle',
'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel',
'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock',
'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur',
'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster',
'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion',
'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse',
'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear',
'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine',
'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose',
'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake',
'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table',
'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout',
'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman',
'worm']
transform_train = transforms.Compose([transforms.Resize((160, 160)), transforms.RandAugment(magnitude=10), transforms.ToTensor()])
transform_val = transforms.Compose([transforms.Resize((160, 160)), transforms.ToTensor()])
cifar_train = torchvision.datasets.CIFAR100(root="CIFAR100", download=True, train=True, transform=transform_train)
cifar_val = torchvision.datasets.CIFAR100(root="CIFAR100", download=True, train=False, transform=transform_val)
train_dataloader = DataLoader(cifar_train, batch_size=128, shuffle=True, num_workers=2, pin_memory=True, drop_last=True)
val_dataloader = DataLoader(cifar_val, batch_size=128, shuffle=False, num_workers=2, pin_memory=True, drop_last=True)
model = ResNet50rs(len(cifar_classes)).to(device)
def compute_loss(label, pred):
return criterion(pred, label)
def train_step(x, t):
model.train()
with torch.cuda.amp.autocast():
preds = model(x)
loss = compute_loss(t, preds)
del preds, x
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return loss
def val_step(x, t):
model.eval()
with torch.cuda.amp.autocast():
preds = model(x)
loss = compute_loss(t, preds)
del x
return loss, preds
criterion = nn.NLLLoss()
log_prob = nn.LogSoftmax(dim=1)
optimizer = optimizers.SGD(model.parameters(),lr=0.00078125, momentum=0.9, weight_decay=4e-5)
epochs = 50
torch.backends.cudnn.benchmark = True
result_list = np.zeros(4)
for epoch in range(epochs):
train_loss = 0.
val_loss = 0.
val_acc = 0.
for(x, t) in train_dataloader:
x, t = x.to(device), t.to(device)
loss = train_step(x, t)
train_loss += loss.item()
del t, loss
torch.cuda.empty_cache()
train_loss /= len(train_dataloader)
print("Epoch: {}, Train Cost: {:.3f}".format(epoch+1, train_loss))
with torch.inference_mode():
for (x, t) in val_dataloader:
x, t = x.to(device), t.to(device)
loss, preds = val_step(x, t)
val_loss += loss.item()
val_acc += accuracy_score(t.tolist(), preds.argmax(dim=-1).tolist())
del t, loss, preds
torch.cuda.empty_cache()
val_loss /= len(val_dataloader)
val_acc /= len(val_dataloader)
result_list = np.vstack((result_list, np.array([epoch+1, train_loss, val_loss, val_acc])))
print("Epoch: {}, Valid Cost: {:.3f}, Acc: {:.3f}".format(epoch+1, val_loss, val_acc))
###Output
Downloading https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz to CIFAR100/cifar-100-python.tar.gz
###Markdown
**Plot**
###Code
import matplotlib.pyplot as plt
%matplotlib inline
result = result_list.T
epoch = result[0]
train_loss = result[1]
val_loss = result[2]
accuracy = result[3]
fig, ax1 = plt.subplots(figsize=(10,7))
ax1.set_xlim(1, 50)
ax1.set_ylim(0, 4)
ax1.tick_params(labelsize=16)
ax1.set_xlabel("Epoch", fontsize=22)
ax1.set_ylabel("Cost", fontsize=22)
ax1.plot(epoch, train_loss, color="#ce1021", label="Train")
ax1.plot(epoch, val_loss, color="#5ab639", label="Val")
ax2 = ax1.twinx()
ax2.set_ylim(0, 1)
ax2.tick_params(labelsize=16)
ax2.set_ylabel("Accuracy", fontsize=22)
ax2.plot(epoch, accuracy, color="#0086ce", label="Accuracy")
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax1.legend(h1+h2, l1+l2, fontsize=18, loc='upper right')
plt.show()
###Output
_____no_output_____
###Markdown
**Image classification on validation data**
###Code
from PIL import Image
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cifar_val = torchvision.datasets.CIFAR100(root="CIFAR100", download=False, train=False, transform=None)
images, labels=[], []
for sample in cifar_val:
image, label = sample
images.append(image)
labels.append(label)
idx = 51
img_src = images[idx]
plt.imshow(img_src)
label = labels[idx]
trans = transforms.Compose([transforms.Resize((160, 160)), transforms.ToTensor()])
img = trans(img_src)
img_batch = img[None]
model.eval()
torch.backends.cudnn.benchmark = True
with torch.cuda.amp.autocast():
prediction = model(img_batch.to(device))
idx = torch.argmax(prediction[0])
print("Pred: ", cifar_classes[idx])
print("Correct: ", cifar_classes[label])
###Output
Pred: turtle
Correct: bee
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.