path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
optimization/rosenbrock/optimize_rosenbrock.ipynb
###Markdown testing out rosenbrock function optimization withgradient descentinspiration:https://www.indusmic.com/post/rosenbrock-function ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt from mpl_toolkits import mplot3d import numpy as np import matplotlib.pyplot as plt from matplotlib import cm # 2 dim rosenbrock function and its partial derivatives b = 10 f = lambda w1, w2: b * (w2 - w1**2)**2 + (w1 - 1)**2 f_w1 = lambda w1, w2: -4 * b * w1 * (w2 - w1**2) + 2 * (w1 - 1) f_w2 = lambda w1, w2: 2 * b * (w2 - w1**2) # hypterparameter eta = .01 # learning rate iterations = 1000 sc = 1.5 # scale of plot plot_function = 1 # plot surface of function # initial values w1, w2 = np.random.uniform(-1, 1, 2) # the history of minimizing (for plotting) hw1, hw2, hf = [w1], [w2], [f(w1, w2)] # gradient descent for i in range(iterations): w1b = w1 w1 -= eta * f_w1(w1, w2) w2 -= eta * f_w2(w1b, w2) hw1.append(w1) hw2.append(w2) hf.append(f(w1, w2)) # Initialize figure fig = plt.figure(figsize=(36, 21)) ax = plt.axes(projection='3d') # Evaluate function X1 = np.arange(sc*-2, sc*2, sc*0.15) X2 = np.arange(sc*-1, sc*3, sc*0.15) X1, X2 = np.meshgrid(X1, X2) Z = f(X1, X2) # Plot the surface if plot_function: # ax.plot_surface(X, Y, Z, cmap=cm.gist_heat_r, linewidth=0, antialiased=True) ax.scatter(X, Y, Z, c='g') points = [hw1, hw2, hf] ax.scatter(*points, c='r') plt.show() # create a 'movie' of the graph ax.elev = 10 for i in range(0, 360): ax.azim = i fig.savefig("./optimize_rosenbrock/%d.png" % i) # make images to movie !ffmpeg -r 60 -f image2 -s 1920x1080 -i ./optimize_rosenbrock/%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p out.mp4 ###Output ffmpeg version 4.4-6ubuntu5 Copyright (c) 2000-2021 the FFmpeg developers built with gcc 11 (Ubuntu 11.2.0-7ubuntu1) configuration: --prefix=/usr --extra-version=6ubuntu5 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libdav1d --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzimg --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --enable-pocketsphinx --enable-librsvg --enable-libmfx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-nvenc --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared libavutil 56. 70.100 / 56. 70.100 libavcodec 58.134.100 / 58.134.100 libavformat 58. 76.100 / 58. 76.100 libavdevice 58. 13.100 / 58. 13.100 libavfilter 7.110.100 / 7.110.100 libswscale 5. 9.100 / 5. 9.100 libswresample 3. 9.100 / 3. 9.100 libpostproc 55. 9.100 / 55. 9.100 Input #0, image2, from './optimize_rosenbrock/%d.png': Duration: 00:00:06.00, start: 0.000000, bitrate: N/A Stream #0:0: Video: png, rgba(pc), 2592x1512 [SAR 2835:2835 DAR 12:7], 60 fps, 60 tbr, 60 tbn, 60 tbc Stream mapping: Stream #0:0 -> #0:0 (png (native) -> h264 (libx264)) Press [q] to stop, [?] for help [libx264 @ 0x558574067000] using SAR=1/1 [libx264 @ 0x558574067000] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2 [libx264 @ 0x558574067000] profile High, level 5.1, 4:2:0, 8-bit [libx264 @ 0x558574067000] 264 - core 160 r3011 cde9a93 - H.264/MPEG-4 AVC codec - Copyleft 2003-2020 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=12 lookahead_threads=2 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=25.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00 Output #0, mp4, to 'out.mp4': Metadata: encoder : Lavf58.76.100 Stream #0:0: Video: h264 (avc1 / 0x31637661), yuv420p(tv, progressive), 2592x1512 [SAR 1:1 DAR 12:7], q=2-31, 60 fps, 15360 tbn Metadata: encoder : Lavc58.134.100 libx264 Side data: cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: N/A frame= 360 fps= 46 q=-1.0 Lsize= 2910kB time=00:00:05.95 bitrate=4006.4kbits/s speed=0.756x video:2905kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 0.172691% [libx264 @ 0x558574067000] frame I:2 Avg QP:19.38 size: 27712 [libx264 @ 0x558574067000] frame P:102 Avg QP:26.12 size: 13303 [libx264 @ 0x558574067000] frame B:256 Avg QP:30.17 size: 6100 [libx264 @ 0x558574067000] consecutive B-frames: 1.4% 8.3% 9.2% 81.1% [libx264 @ 0x558574067000] mb I I16..4: 32.4% 61.0% 6.6% [libx264 @ 0x558574067000] mb P I16..4: 1.6% 1.8% 1.0% P16..4: 4.0% 2.9% 1.5% 0.0% 0.0% skip:87.1% [libx264 @ 0x558574067000] mb B I16..4: 0.7% 0.2% 0.1% B16..8: 8.0% 2.7% 0.6% direct: 0.2% skip:87.6% L0:47.2% L1:46.3% BI: 6.5% [libx264 @ 0x558574067000] 8x8 transform intra:40.7% inter:35.5% [libx264 @ 0x558574067000] coded y,uvDC,uvAC intra: 12.3% 4.8% 4.0% inter: 1.2% 0.6% 0.3% [libx264 @ 0x558574067000] i16 v,h,dc,p: 90% 8% 2% 0% [libx264 @ 0x558574067000] i8 v,h,dc,ddl,ddr,vr,hd,vl,hu: 28% 5% 65% 0% 0% 0% 0% 0% 0% [libx264 @ 0x558574067000] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 43% 18% 28% 2% 2% 2% 2% 2% 2% [libx264 @ 0x558574067000] i8c dc,h,v,p: 93% 3% 4% 0% [libx264 @ 0x558574067000] Weighted P-Frames: Y:0.0% UV:0.0% [libx264 @ 0x558574067000] ref P L0: 68.1% 4.3% 19.0% 8.6% [libx264 @ 0x558574067000] ref B L0: 84.8% 12.5% 2.7% [libx264 @ 0x558574067000] ref B L1: 95.1% 4.9% [libx264 @ 0x558574067000] kb/s:3965.31
02_MetaLearning/02_B_Eval_Meta_Model_per_Drift_Class.ipynb
###Markdown Evaluate Meta-Model per Drift Class Before fine-tune and after fine-tune! ###Code import arrow import learn2learn as l2l import numpy as np import os import pickle import torch from torch.nn import Module, Linear, Sequential, ReLU from torch.nn.functional import mse_loss from torch.optim import Adam, SGD from torch.utils.data import TensorDataset from sklearn.model_selection import train_test_split import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from utils.evalUtils import print_confusion_matrix from sklearn.preprocessing import MinMaxScaler %run -i ./scripts/setConfigs.py ###Output Set configs.. ###Markdown Read Meta-Model ###Code %run -i ./scripts/ReadSimpleAE_MetaModel.py ###Output Load Meta Model AE.. /home/torge/dev/masterthesis_code/02_Experimente/MetaLearning/models/model_bib/20200319_firstMetaModel.pt SimpleAutoEncoder( (encoder): Sequential( (0): Linear(in_features=17, out_features=12, bias=True) (1): ReLU(inplace=True) (2): Linear(in_features=12, out_features=8, bias=True) (3): Tanh() ) (decoder): Sequential( (0): Linear(in_features=8, out_features=12, bias=True) (1): ReLU(inplace=True) (2): Linear(in_features=12, out_features=17, bias=True) (3): Tanh() ) ) ###Markdown Read LogReg ###Code %run -i ./scripts/ReadLogReg_Meta.py ###Output Load trained LogReg.. LogisticRegression(C=1.0, class_weight={1: 2.0}, dual=False, fit_intercept=True, intercept_scaling=1, l1_ratio=None, max_iter=100, multi_class='auto', n_jobs=None, penalty='l2', random_state=42, solver='liblinear', tol=0.0001, verbose=0, warm_start=False) ###Markdown Prepare the data ###Code data_fn = os.path.join(data_path, 'simulation_data_y_2020_2021_reduced.h5') df_data_train = pd.read_hdf(data_fn, key='df') print('Shape of X_train data: {}'.format(df_data_train.shape)) data_fn = os.path.join(data_path, 'anomalous_drifted_data_y_2023_reduced_more_cos_phi.h5') df_data_drifted_ano = pd.read_hdf(data_fn, key='df') print('Shape of X_drifted,ano data: {}'.format(df_data_drifted_ano.shape)) s_drift_labels = df_data_drifted_ano['drift_labels'] s_drift_labels.reset_index(inplace=True, drop=True) s_ano_labels = df_data_drifted_ano['anomaly_labels'] s_ano_labels.reset_index(inplace=True, drop=True) df_data_drifted_ano.drop('drift_labels', axis=1, inplace=True) df_data_drifted_ano.drop('anomaly_labels', axis=1, inplace=True) print('Shape of X_drifted,ano data: {}'.format(df_data_drifted_ano.shape)) print('Scale data..') scaler_train = MinMaxScaler((-1,1)) scaler_train = scaler_train.fit(df_data_train) scaled_drifted_ano = scaler_train.transform(df_data_drifted_ano.to_numpy()) del(df_data_train) # build tensor from numpy anormal_drifted_torch_tensor = torch.from_numpy(scaled_drifted_ano).type(torch.FloatTensor) ###Output _____no_output_____ ###Markdown Make Predictions vor evaluation ###Code re_drifted_ano = [] for val in anormal_drifted_torch_tensor: loss = meta_model.calc_reconstruction_error(val) re_drifted_ano.append(loss.item()) s_re_drifted_ano = pd.Series(re_drifted_ano) s_re_drifted_ano = s_re_drifted_ano.values.reshape(-1,1) predictions_drifted_ano = [] for val in s_re_drifted_ano: val = val.reshape(1,-1) pred = clf_meta.predict(val) predictions_drifted_ano.append(pred[0]) ###Output _____no_output_____ ###Markdown Build dataset for analysis ###Code df_analyze = pd.DataFrame() df_analyze['anomaly_labels'] = s_ano_labels df_analyze['drift_labels'] = s_drift_labels df_analyze['reconstruction_error'] = s_re_drifted_ano df_analyze['ano_prediction'] = predictions_drifted_ano df_analyze.head() ###Output _____no_output_____ ###Markdown Split Dataset per Drift Class ###Code df_drift_class_0 = df_analyze[df_analyze['drift_labels'] == 0] df_drift_class_1 = df_analyze[df_analyze['drift_labels'] == 1] df_drift_class_2 = df_analyze[df_analyze['drift_labels'] == 2] df_drift_class_3 = df_analyze[df_analyze['drift_labels'] == 3] df_drift_class_0['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_0['anomaly_labels']] df_drift_class_1['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_1['anomaly_labels']] df_drift_class_2['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_2['anomaly_labels']] df_drift_class_3['reduced_ano_labels'] = [1 if x > 0 else 0 for x in df_drift_class_3['anomaly_labels']] df_drift_class_0.describe() df_drift_class_1.describe() df_drift_class_2.describe() df_drift_class_3.describe() ###Output _____no_output_____ ###Markdown KPIs per Drift Class ###Code from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from mlxtend.plotting import plot_confusion_matrix from utils.evalUtils import calc_cm_metrics cm_dc_0 = confusion_matrix(df_drift_class_0['reduced_ano_labels'], df_drift_class_0['ano_prediction']) tn, fp, fn, tp = cm_dc_0.ravel() accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn) print('Accuracy: {}'.format(accuracy)) print('Precision: {}'.format(precision)) print('Specifity: {}'.format(specifity)) print('Sensitivity: {}'.format(sensitivity)) print('F1-Score: {}'.format(f1_score)) fig = print_confusion_matrix(cm_dc_0, class_names=['k. Anomalie', 'Anomalie']) fig.suptitle('Confusion Matrix Drift Class 0 (Keine Manipulation)', fontsize=20) file_n = os.path.join(fig_path, 'cm_drift_class_0.pdf') fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 ) cm_dc_1 = confusion_matrix(df_drift_class_1['reduced_ano_labels'], df_drift_class_1['ano_prediction']) tn, fp, fn, tp = cm_dc_1.ravel() accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn) print('Accuracy: {}'.format(accuracy)) print('Precision: {}'.format(precision)) print('Specifity: {}'.format(specifity)) print('Sensitivity: {}'.format(sensitivity)) print('F1-Score: {}'.format(f1_score)) fig = print_confusion_matrix(cm_dc_1, class_names=['k. Anomalie', 'Anomalie']) fig.suptitle('Confusion Matrix Drift Class 1 (Switch)', fontsize=20) file_n = os.path.join(fig_path, 'meta_modell_cm_drift_class_1.pdf') fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 ) cm_dc_2 = confusion_matrix(df_drift_class_2['reduced_ano_labels'], df_drift_class_2['ano_prediction']) tn, fp, fn, tp = cm_dc_2.ravel() accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn) print('Accuracy: {}'.format(accuracy)) print('Precision: {}'.format(precision)) print('Specifity: {}'.format(specifity)) print('Sensitivity: {}'.format(sensitivity)) print('F1-Score: {}'.format(f1_score)) fig = print_confusion_matrix(cm_dc_2, class_names=['k. Anomalie', 'Anomalie']) fig.suptitle('Confusion Matrix Drift Class 2 (Load Mapping)', fontsize=20) file_n = os.path.join(fig_path, 'meta_modell_cm_drift_class_2.pdf') fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 ) cm_dc_3 = confusion_matrix(df_drift_class_3['reduced_ano_labels'], df_drift_class_3['ano_prediction']) tn, fp, fn, tp = cm_dc_3.ravel() accuracy, precision, specifity, sensitivity, f1_score = calc_cm_metrics(tp, tn, fp, fn) print('Accuracy: {}'.format(accuracy)) print('Precision: {}'.format(precision)) print('Specifity: {}'.format(specifity)) print('Sensitivity: {}'.format(sensitivity)) print('F1-Score: {}'.format(f1_score)) fig = print_confusion_matrix(cm_dc_3, class_names=['k. Anomalie', 'Anomalie']) fig.suptitle('Confusion Matrix Drift Class 3 (Cos Phi)', fontsize=20) file_n = os.path.join(fig_path, 'meta_modell_cm_drift_class_3.pdf') fig.savefig(file_n, bbox_inches = 'tight', pad_inches = 0 ) ###Output _____no_output_____
scratch work/Yuqing-Data-Merge/df4-Scenario2.ipynb
###Markdown Gradient descent algorithm for Scenario 2In this part, we implement an gradient descent algorithm to optimization the objective loss function in Scenario 2:$$\min F := \min \frac{1}{2(n-i)} \sum_{i=1000}^n (fbpredic(i) + a*tby(i) +b*ffr(i) + c*fta(i) - asp(i))^2$$Gradient descent: $$ \beta_k = \beta_{k-1} + \delta* \nabla F, $$where $\delta$ control how far does each iteration go. Detailed planFirst, split the data as train and test with 80% and 20% respectively. For the training part, we need prophet() predicted price, there are a couple of issues. One is prophet() can not predict too far in the future. The other is we can not call prophet() too many times, this takes a lot of time. So we will use a sliding window strategy:1, Split the train data as train_1 and train_2, where train_1 is used as a sliding window to fit prophet(), and give predictions in train_2. Train_2 is used train the model we proposed above.2, After we got full size (size of train_2) predictions from prophet(), then we use gradient descent to fit the above model, extracting the coefficients of features to make predicution in the testing data. ###Code import pandas as pd import numpy as np from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.preprocessing import FunctionTransformer from numpy import meshgrid ## For plotting import matplotlib.pyplot as plt from matplotlib import style import datetime as dt import seaborn as sns sns.set_style("whitegrid") df= pd.read_csv('df4.csv', parse_dates=['Date']) df = df.rename(columns = {"Date":"ds","Close":"y"}) df # len(df) from datetime import datetime p = 0.95 # Train around 90% of dataset cutoff = int((p*len(df)//100)*100) df_train = df[:cutoff].copy() df_test = df.drop(df_train.index).copy() print(df_train, df_test) ###Output ds y tby ffr fta eps div 0 2003-01-02 909.03 4.07 1.24 732202.0 40.40 1.79 1 2003-01-03 908.59 4.05 1.24 732202.0 40.40 1.79 2 2003-01-06 929.01 4.09 1.24 732202.0 40.40 1.79 3 2003-01-07 922.93 4.04 1.24 732202.0 40.40 1.79 4 2003-01-08 909.93 4.00 1.24 724902.0 40.40 1.79 ... ... ... ... ... ... ... ... 3995 2018-12-24 2351.10 2.74 2.27 4084274.0 138.43 1.96 3996 2018-12-26 2467.70 2.81 2.27 4075636.0 138.43 1.96 3997 2018-12-27 2488.83 2.77 2.27 4075636.0 138.43 1.96 3998 2018-12-28 2485.74 2.72 2.27 4075636.0 138.43 1.96 3999 2018-12-31 2506.85 2.69 2.27 4075636.0 139.58 2.09 [4000 rows x 7 columns] ds y tby ffr fta eps div 4000 2019-01-02 2510.03 2.66 2.40 4058378.0 139.58 2.09 4001 2019-01-03 2447.89 2.56 2.40 4058378.0 139.58 2.09 4002 2019-01-04 2531.94 2.67 2.40 4058378.0 139.58 2.09 4003 2019-01-07 2549.69 2.70 2.40 4058378.0 139.58 2.09 4004 2019-01-08 2574.41 2.73 2.40 4058378.0 139.58 2.09 ... ... ... ... ... ... ... ... 4515 2021-01-25 3855.36 1.05 0.09 7414942.0 95.72 1.58 4516 2021-01-26 3849.62 1.05 0.09 7414942.0 95.72 1.58 4517 2021-01-27 3750.77 1.04 0.09 7404926.0 95.72 1.58 4518 2021-01-28 3787.38 1.07 0.09 7404926.0 95.72 1.58 4519 2021-01-29 3714.24 1.11 0.09 7404926.0 95.72 1.58 [520 rows x 7 columns] ###Markdown Use prophet() to make predictions, we will split training as train_1 and train_2 with ratio 40% vs 60%, train_1 will be used to fit prophet(), then predict on train_2. Getting the predictions, feed the data into the Scenario 2 model, train again to get the parameters a,b,c,.... ###Code #prophet part from fbprophet import Prophet start = 1000 # the number of initial data for training pred_size =100 # predicted periods num_winds = int((df_train.shape[0]-start)/pred_size) #(4000-3000)/100 =30 pro_pred = [] # use accumulated data to predict the next pred_size data for i in range(num_winds): tmp_train = df_train.iloc[: start+ i*pred_size].copy() fbp = Prophet(daily_seasonality=True) # fit close price using fbprophet model fbp.fit(tmp_train[['ds','y']]) # predict pred_size futures and get the forecast price fut = fbp.make_future_dataframe(periods = pred_size,) tmp_forecast = fbp.predict(fut) # only require the forcast on test data of temporary training data pred = tmp_forecast[start+ i*pred_size:].yhat pro_pred.append(pred) pro_pred flat_pro_pred = [item for l1 in pro_pred for item in l1] df.columns def powerset_no_empty(s): power_set = [] x = len(s) for i in range(1 << x): power_set.append([s[j] for j in range(x) if (i & (1 << j))]) return power_set[1:] possible_features = powerset_no_empty(['tby', 'ffr', 'fta', 'eps', 'div']) print(len(possible_features)) possible_features from sklearn.linear_model import LinearRegression reg = LinearRegression(fit_intercept=False, normalize=True, copy_X = True) reg.fit(df_train[start:cutoff][possible_features[30]], df_train[start:cutoff]['y'] - flat_pro_pred) coef = [] t=30 for i in range(len(possible_features[t])): coef.append(np.round(reg.coef_[i],5)) print(coef) # Forecast the Test Data from fbprophet import Prophet test_time = int((1-p)* len(df)) fbp = Prophet(daily_seasonality=True) fbp.fit(df_train[['ds','y']]) fut = fbp.make_future_dataframe(periods = test_time,) forecast = fbp.predict(fut) pred_test = forecast[cutoff:cutoff+test_time].yhat pred_test = pred_test.ravel() len(pred_test) pp_test = pred_test.copy() # predicted price on testing data pp_train = flat_pro_pred.copy() # predicted price on training data for i in range(len(possible_features[t])): pp_test += coef[i] * df_test[df_test.columns[i+2]][:test_time].ravel() pp_train += coef[i] * df_train[df_train.columns[i+2]][start:].ravel() from sklearn.metrics import mean_squared_error as MSE # MSE for test data # Actual close price: df_test[:test_time].y # Predicted price by prophet: pred_test # Predicted price by tuning mse1 = MSE(df_test[:test_time].y,pred_test) # mse2 = MSE(df_test[:test_time].y, pp_test) print(mse1,mse2) # MSE for train data mse3 = MSE(df_train[start:].y, flat_pro_pred) mse4 = MSE(df_train[start:].y, pp_train) print(mse3,mse4) flat_pro_pred # df_train['pp']=pd.Series(np.append([np.nan for i in range(1000)], pp_train)) # plt.figure(figsize=(11,6)) # # plt.plot(range(1000,4000),df[1000:4000].fbsp,label='fb predicted price on test_data') # plt.plot(range(1000,4000),df_train[1000:].pp,label="fitted values by our model") # plt.plot(range(1000,4000), df_train[1000:].y ,label='ture price value') # plt.legend(fontsize=13) # plt.title("Fitting on the training data",fontsize=18) # plt.figure(figsize=(11,6)) # plt.plot(range(0,test_time),pd.Series(pred_test),label='fb predicted price on test_data') # plt.plot(range(0,test_time),pd.Series(pp_test),label='fitted value on test_data') # plt.plot(range(0,test_time), df_test[:test_time].y,label='true price value on test') # plt.legend(fontsize=13) # plt.title("Prediction on the testing data",fontsize=18) from sklearn.linear_model import LinearRegression reg = LinearRegression(normalize=True, copy_X = True) def get_X_y(df,features,target): # Returns X then y return np.array(df[features]), np.array(df[target]) from sklearn.metrics import mean_squared_error as MSE def get_mse(model, X, y): # get the prediction pred = model.predict(X) # Returns the mse return MSE(pred,y) X, y = get_X_y(df_train, possible_features, 'diff') clone_reg = clone(reg) clone_reg.fit(X,y) pred_ = clone_reg. mse = get_mse(clone_reg, X, y) ###Output _____no_output_____
Notebooks/Time_Series.ipynb
###Markdown Time SeriesTimestamps, períodos, intervalos ###Code import numpy as np import pandas as pd np.random.seed(12345) import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) PREVIOUS_MAX_ROWS = pd.options.display.max_rows pd.options.display.max_rows = 20 np.set_printoptions(precision=4, suppress=True) ###Output _____no_output_____ ###Markdown Operações com datas ###Code from datetime import datetime agora = datetime.now() utc = datetime.utcnow() agora.year agora.month agora.day agora # aterar hora agora.replace(minute=10, hour=10, second=10) # definir uma data evento = datetime(2021, 8, 1) evento # diferença entre datas retorna um obj do tipo timedelta delta = agora - evento print(delta) print(f"Desde o evento já se passaram {delta.days} dias e {delta.seconds // 3600} horas.") # soma retorna uma nova data e uma subtração retorna um delta evento + delta # dia da semana # usar tupla porque essa lista é imutavel dias = ("Segunda", "Terça", "Quarta", "Quinta", "Sexta", "Sabado", "Domingo") print(dias[agora.weekday()]) print(f"Dia que nasci: {dias[datetime(day=14, month=10, year=1985).weekday()]}") ###Output Quarta Dia que nasci: Segunda ###Markdown Conversão string e datetime strftime e strptime ###Code # data para > string print(f"Hoje (sem formatação): {agora}") hoje_formatado = agora.strftime("%d/%m/%Y") print(f"Hoje (formatado dd/mm/aaaa): {hoje_formatado}") hoje_formatado = agora.strftime("%m/%d/%y") print(f"Hoje (formatado mm/dd/aa): {hoje_formatado}") hoje_formatado = agora.strftime("%d/%B/%y") print(f"Hoje (formatado): {hoje_formatado}") # String para > data. É diferente de strftime(), strftime é método do objeto e strptime é metodo da classe datetime nascimento = datetime.strptime("10/31/2021", "%m/%d/%Y") print(nascimento) print(type(nascimento)) # converte uma lista de datas no formato str datestrs = ['7/6/2011', '8/6/2011', '18/10/2021'] datestrs = [datetime.strptime(dt, "%d/%m/%Y") for dt in datestrs] datestrs ###Output _____no_output_____ ###Markdown parse ###Code # Parse: uma alternativa para simplificar o mapeamento das strs para datas sem passar o formato from dateutil.parser import parse # faz a conversão mesmo com tipos de formatos diferentes datesparse = ['7/06/2011', '8/16/2011', '18/10/2021', 'Jan 31, 1997 10:45 PM', None] # daysfirst informa o padrão de reconhecimento # mesmo que o dia esteja no lugar do mês ele reconhece que é maior que 12 e trata como dia # precisa tratar os valores None senão retorna erro datesparse = [parse(dt, dayfirst=True) for dt in datesparse if dt != None] datesparse ###Output _____no_output_____ ###Markdown to_datetime ###Code # o parse com to_datetime funciona igual, mas trata os erros, inserie como NaT (Not a Time) # retorna um obj do tipo DatetimeIndex com algumas propriedades adicionais dates_todatetime = ['2/01/2011', '7/06/2011', '8/16/2011', '8/16/2011', '18/10/2021', 'Jan 31, 1997 10:45 PM', None] dates_todatetime = pd.to_datetime(dates_todatetime) dates_todatetime.name = "Indice_DateTime" # propriedades adicionais de um indice print(dates_todatetime) print("---------------------") print(type(dates_todatetime)) print(dates_todatetime.is_unique) print(dates_todatetime.name) print("---------------------") print(pd.isnull(dates_todatetime)) print("---------------------") # tratando o ultimo valor que é NaT com fillna dates_todatetime = dates_todatetime.fillna(datetime.now()) dates_todatetime ###Output DatetimeIndex(['2011-02-01 00:00:00', '2011-07-06 00:00:00', '2011-08-16 00:00:00', '2011-08-16 00:00:00', '2021-10-18 00:00:00', '1997-01-31 22:45:00', 'NaT'], dtype='datetime64[ns]', name='Indice_DateTime', freq=None) --------------------- <class 'pandas.core.indexes.datetimes.DatetimeIndex'> False Indice_DateTime --------------------- [False False False False False False True] --------------------- ###Markdown Series TemporaisSão series indexadas por datas ###Code dates_list = [datetime(2011, 2, 1), datetime(2011, 1, 5), datetime(2011, 1, 7), datetime(2011, 1, 8), datetime(2011, 1, 10), datetime(2011, 1, 12)] # series indexada por datas time_series = pd.Series(np.random.randn(6), index=dates_list, name="time_series") time_series.index # criando um novo ts usando a lista de dadas criadas anteriormente. time_series2 = pd.Series(np.random.randn(7), index=dates_todatetime, name="time_series2") time_series2.index ###Output _____no_output_____ ###Markdown operações entre series indexadas por datas ###Code # operações entre series indexadas por datas # ao fazer o alinhamento entre series pelo indice # pareamento das chaves e o calculo de soma ts_soma = time_series + time_series2 # no dia 01/02/2011 existe uma correspondencia entre as chaves ts_soma.dropna() #ts_soma[ts_soma.notnull()] ###Output _____no_output_____ ###Markdown Filtrar series temporais (slice) ###Code time_series # Podemos usar uma string interpretável # Filtrar apenas as datas maiores que 2011-01-07 time_series['2011-01-07':] # Filtrar dois dias específicos time_series[['2011-01-07', '2011-01-12']] # Filtrar usando um indice sequencial implicito time_series[2:4] # em series temporais mais longas um ano ou um mês podem ser usado como filtro de período # criar uma serie longa idx_datarange = pd.date_range(start="2020-01-01", end="2021-12-31") time_series_grande = pd.Series(np.arange(len(idx_datarange)), index=idx_datarange) time_series_grande # filtar todos os dias do ano 2021 time_series_grande["2021"] # filtrar um mês do ano time_series_grande["2021-12"] # filtrar um periodo time_series_grande["2021-12-01": "2021-12-05"] # remove todas as linhas "depois" de 2020-12-31 time_series_grande.truncate(after="2020-12-31") ###Output _____no_output_____ ###Markdown Indices de data duplicados ###Code dates = pd.DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/2/2000', '1/3/2000']) dup_ts = pd.Series(np.arange(5), index=dates) dup_ts # possiu indice duplicado dup_ts.index.is_unique # agregação para identificar o grau de duplicação para tratar ou agregar dup_ts.groupby(level=0).count() # atualiza a series apenas com os valores agregados. Agrega os duplicados usando a média dup_ts = dup_ts.groupby(level=0).mean() print(f"O índice é unico? {dup_ts.is_unique}\n") dup_ts ###Output O índice é unico? True ###Markdown Ranges de Datas ###Code # criando um range a partir de uma data inicial. As datas avançam pd.date_range(start='2012-04-01', periods=20) # criando um range de datas a partir da data final. As datas voltam pd.date_range(end='2012-06-01', periods=20) # Criando um range usando um período. freq="D" uma data por dia no período date_index = pd.date_range(start="2020-01-01", end="2021-12-31", freq="D") date_index # altera da freq="BM", a ultima data de cada mês do período. date_index = pd.date_range(start="2020-01-01", end="2021-12-31", freq="BM") date_index # normalize=True remove a parte do horário das datas que tenham horário date_index = pd.date_range(start=datetime.now(), periods=20, normalize=True) date_index # a cada 1h30min date_index = pd.date_range('2000-01-01', periods=10, freq='1h30min') date_index ###Output _____no_output_____ ###Markdown Time Zone ###Code import pytz # pytz.common_timezones # lista de timezones ultimos 5 print("US/Eastern" in pytz.common_timezones[-5:]) print(pytz.common_timezones[-5:]) # brasil [tz for tz in pytz.common_timezones if tz.startswith("America/Sao_Paulo")] # timezone por padrão é None print(date_index.tz) # timezone UTC é o central +0 qualquer mundança adiciona ou subtrai date_index = pd.date_range('2000-01-01', periods=10, freq='1h30min', tz="UTC") date_index # timezone Brasil é o central +2 date_index = pd.date_range('2000-01-01', periods=10, freq='1h30min', tz="America/Sao_Paulo") date_index # converte de uma timezone para outra região (Brasil > Nova York) # -5 horas timezone date_index.tz_convert("America/New_York") ###Output _____no_output_____ ###Markdown Timestamp ###Code # criando localizando e convertendo para outro utc stamp = pd.Timestamp('2011-03-12 04:00') stamp_utc = stamp.tz_localize('utc') stamp_utc.tz_convert('America/New_York') ###Output _____no_output_____ ###Markdown Reamostragem e conversão de frequênciaTransformar uma serie temporal que está em uma frequencia para outra- downsampling: frequencias mais altas para mais baixas- upsampling: Downsampling: resample() ###Code # frequencia diaria data_range_resample = pd.date_range('2000-01-01', periods=100, freq='D') ts = pd.Series(np.random.randn(len(data_range_resample)), index=data_range_resample) # semelhante a groupby. Agrupa no nivel de mes e aplica uma agregação mean ts.resample('M').mean() # ts.resample('M', kind='period').mean() ###Output _____no_output_____
app/notebooks/metal/shot_detection_weak_labels_downsampled.ipynb
###Markdown Train Metal LabelModel ###Code L_train_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/L_train_100_windows_downsampled.npz' L_dev_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/L_val_windows_downsampled.npz' Y_dev_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/Y_val_windows_downsampled.npy' L_test_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/L_test_windows_downsampled.npz' Y_test_path = '/lfs/1/danfu/esper/app/data/shot_detection_weak_labels/Y_test_windows_downsampled.npy' stride = 1 L_train = sp.sparse.load_npz(L_train_path).todense()[::stride] L_dev = sp.sparse.load_npz(L_dev_path).todense() Y_dev = np.load(Y_dev_path) L_test = sp.sparse.load_npz(L_test_path).todense() Y_test = np.load(Y_test_path) label_model = LabelModel(k=2, seed=123) label_model.train_model(L_train, class_balance=(0.15, 0.85), n_epochs=500, log_train_every=50) label_model.score((L_dev, Y_dev), metric=['accuracy','precision', 'recall', 'f1']) ###Output Computing O... Estimating \mu... [50 epo]: TRAIN:[loss=0.059] [100 epo]: TRAIN:[loss=0.043] [150 epo]: TRAIN:[loss=0.040] [200 epo]: TRAIN:[loss=0.039] [250 epo]: TRAIN:[loss=0.039] [300 epo]: TRAIN:[loss=0.038] [350 epo]: TRAIN:[loss=0.038] [400 epo]: TRAIN:[loss=0.038] [450 epo]: TRAIN:[loss=0.038] [500 epo]: TRAIN:[loss=0.038] Finished Training Accuracy: 0.954 Precision: 0.943 Recall: 0.726 F1: 0.821 y=1 y=2 l=1 199 12 l=2 75 1600 ###Markdown Tune Metal LabelModel ###Code from metal.tuners.random_tuner import RandomSearchTuner #label_model_everything_windows_tuned = LabelModel(k=2, seed=123) random_tuner = RandomSearchTuner(LabelModel, seed=123, validation_metric='f1') search_space = { 'seed' : [123], 'n_epochs': list(range(500, 2000, 100)), 'lr': {'range': [1e-5, .1], 'scale': 'log'}, 'l2': {'range': [1e-5, .1], 'scale': 'log'}, 'log_train_every': [100], 'class_balance': [ (i * .1, 1 - i * .1) for i in range(1, 10) ] # 'Y_dev': [Y_test_windows] } best_random_model = random_tuner.search(search_space, (L_dev, Y_dev), train_args= [L_train], train_kwargs = { # 'Y_dev': Y_test_windows # 'class_balance': (0.2, 0.8) }, init_kwargs={ 'k': 2 }, verbose=True) best_random_model.score((L_dev, Y_dev), metric=['accuracy','precision', 'recall', 'f1']) best_random_model.score((L_test, Y_test), metric=['accuracy','precision', 'recall', 'f1']) ###Output Accuracy: 0.908 Precision: 0.941 Recall: 0.597 F1: 0.730 y=1 y=2 l=1 222 14 l=2 150 1392 ###Markdown Save/Load Best Model ###Code torch.save(best_random_model, 'models/metal_labelmodel_downsampled.pth') model = torch.load('models/metal_labelmodel_downsampled.pth') ###Output _____no_output_____ ###Markdown Make Predictions for Everything and Save to Disk ###Code import numpy as np from scipy.sparse import csr_matrix import scipy.sparse as sparse import pickle import rekall from rekall.video_interval_collection import VideoIntervalCollection from rekall.interval_list import IntervalList from rekall.temporal_predicates import * from metal.label_model.baselines import MajorityLabelVoter ###Output _____no_output_____ ###Markdown Load Manually Annotated Data ###Code with open('../../data/manually_annotated_shots.pkl', 'rb') as f: shots = VideoIntervalCollection(pickle.load(f)) with open('../../data/shot_detection_folds.pkl', 'rb') as f: shot_detection_folds = pickle.load(f) clips = shots.dilate(1).coalesce().dilate(-1) shot_boundaries = shots.map( lambda intrvl: (intrvl.start, intrvl.start, intrvl.payload) ).set_union( shots.map(lambda intrvl: (intrvl.end + 1, intrvl.end + 1, intrvl.payload)) ).coalesce() boundary_frames = { video_id: [ intrvl.start for intrvl in shot_boundaries.get_intervallist(video_id).get_intervals() ] for video_id in shot_boundaries.get_allintervals() } video_ids = sorted(list(clips.get_allintervals().keys())) frames_per_video = { video_id: sorted([ f for interval in clips.get_intervallist(video_id).get_intervals() for f in range(interval.start, interval.end + 2) ]) for video_id in video_ids } ground_truth = { video_id: [ 1 if f in boundary_frames[video_id] else 2 for f in frames_per_video[video_id] ] for video_id in video_ids } ###Output 100%|██████████| 28/28 [00:00<00:00, 10820.02it/s] 100%|██████████| 28/28 [00:00<00:00, 33776.39it/s] ###Markdown Load Label Matrix with All Frames in it ###Code with open('../../data/shot_detection_weak_labels/all_labels.pkl', 'rb') as f: weak_labels_all_movies = pickle.load(f) ###Output _____no_output_____ ###Markdown Load Videos and Number of Frames Per Video ###Code with open('../../data/frame_counts.pkl', 'rb') as f: frame_counts = pickle.load(f) video_ids_all = sorted(list(frame_counts.keys())) video_ids_train = sorted(list(set(video_ids_all).difference(set(video_ids)))) ###Output _____no_output_____ ###Markdown Construct windows for each video ###Code # First, construct windows of 16 frames for each video windows = VideoIntervalCollection({ video_id: [ (f, f + 16, video_id) for f in range(0, frame_counts[video_id] - 16, 16) ] for video_id in video_ids_all }) ###Output _____no_output_____ ###Markdown Get weak labels for all windows ###Code # Label windows with the weak labels in our labeling functions def label_window(per_frame_weak_labels): if 1 in per_frame_weak_labels: return 1 if len([l for l in per_frame_weak_labels if l == 2]) >= len(per_frame_weak_labels) / 2: return 2 return 0 windows_with_weak_labels = windows.map( lambda window: ( window.start, window.end, [ label_window([ lf[window.payload][f-1] for f in range(window.start, window.end) ]) for lf in weak_labels_all_movies ] ) ) ###Output _____no_output_____ ###Markdown L matrix ###Code L_everything_windows = csr_matrix([ intrvl.payload for video_id in sorted(list(video_ids_all)) for intrvl in windows_with_weak_labels.get_intervallist(video_id).get_intervals() ]).todense() with open('../../data/shot_detection_weak_labels/L_everything_windows_downsampled.npy', 'wb') as f: np.save(f, L_everything_windows) with open('../../data/shot_detection_weak_labels/L_everything_windows_downsampled.npy', 'rb') as f: L_everything_windows = np.load(f) ###Output _____no_output_____ ###Markdown Predict Everything ###Code L_everything_windows.shape window_predictions_everything = model.predict_proba(L_everything_windows) window_predictions_everything.shape fig, ax = plt.subplots() ax.hist([ pred[0] for pred in window_predictions_everything ], bins=20) ax.set_xlim(0, 1) plt.show() window_nums = [ (video_id, intrvl.start, intrvl.end) for video_id in sorted(list(video_ids_all)) for intrvl in windows_with_weak_labels.get_intervallist(video_id).get_intervals() ] predictions_to_save_windows = [ (window_info, prediction) for window_info, prediction in zip(window_nums, window_predictions_everything) ] preds_np_windows = np.array(predictions_to_save_windows) preds_np_windows.shape # save predictions to disk with open('../../data/shot_detection_weak_labels/noisy_labels_all_windows_downsampled.npy', 'wb') as f: np.save(f, preds_np_windows) ###Output _____no_output_____
Copia di l08c03_moving_average.ipynb
###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Moving average Run in Google Colab View source on GitHub Setup ###Code import numpy as np import matplotlib.pyplot as plt import tensorflow as tf keras = tf.keras def plot_series(time, series, format="-", start=0, end=None, label=None): plt.plot(time[start:end], series[start:end], format, label=label) plt.xlabel("Time") plt.ylabel("Value") if label: plt.legend(fontsize=14) plt.grid(True) def trend(time, slope=0): return slope * time def seasonal_pattern(season_time): """Just an arbitrary pattern, you can change it if you wish""" return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): """Repeats the same pattern at each period""" season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) def white_noise(time, noise_level=1, seed=None): rnd = np.random.RandomState(seed) return rnd.randn(len(time)) * noise_level ###Output _____no_output_____ ###Markdown Trend and Seasonality ###Code time = np.arange(4 * 365 + 1) slope = 0.05 baseline = 10 amplitude = 40 series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) noise_level = 5 noise = white_noise(time, noise_level, seed=42) series += noise plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() ###Output _____no_output_____ ###Markdown Naive Forecast ###Code split_time = 1000 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] naive_forecast = series[split_time - 1:-1] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, start=0, end=150, label="Series") plot_series(time_valid, naive_forecast, start=1, end=151, label="Forecast") ###Output _____no_output_____ ###Markdown Now let's compute the mean absolute error between the forecasts and the predictions in the validation period: ###Code keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy() ###Output _____no_output_____ ###Markdown That's our baseline, now let's try a moving average. Moving Average ###Code def moving_average_forecast(series, window_size): """Forecasts the mean of the last few values. If window_size=1, then this is equivalent to naive forecast""" forecast = [] for time in range(len(series) - window_size): forecast.append(series[time:time + window_size].mean()) return np.array(forecast) def moving_average_forecast(series, window_size): """Forecasts the mean of the last few values. If window_size=1, then this is equivalent to naive forecast This implementation is *much* faster than the previous one""" mov = np.cumsum(series) mov[window_size:] = mov[window_size:] - mov[:-window_size] return mov[window_size - 1:-1] / window_size moving_avg = moving_average_forecast(series, 30)[split_time - 30:] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, label="Series") plot_series(time_valid, moving_avg, label="Moving average (30 days)") keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy() ###Output _____no_output_____ ###Markdown That's worse than naive forecast! The moving average does not anticipate trend or seasonality, so let's try to remove them by using differencing. Since the seasonality period is 365 days, we will subtract the value at time *t* – 365 from the value at time *t*. ###Code diff_series = (series[365:] - series[:-365]) diff_time = time[365:] plt.figure(figsize=(10, 6)) plot_series(diff_time, diff_series, label="Series(t) – Series(t–365)") plt.show() ###Output _____no_output_____ ###Markdown Focusing on the validation period: ###Code plt.figure(figsize=(10, 6)) plot_series(time_valid, diff_series[split_time - 365:], label="Series(t) – Series(t–365)") plt.show() ###Output _____no_output_____ ###Markdown Great, the trend and seasonality seem to be gone, so now we can use the moving average: ###Code diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:] plt.figure(figsize=(10, 6)) plot_series(time_valid, diff_series[split_time - 365:], label="Series(t) – Series(t–365)") plot_series(time_valid, diff_moving_avg, label="Moving Average of Diff") plt.show() ###Output _____no_output_____ ###Markdown Now let's bring back the trend and seasonality by adding the past values from t – 365: ###Code diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, label="Series") plot_series(time_valid, diff_moving_avg_plus_past, label="Forecasts") plt.show() keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy() ###Output _____no_output_____ ###Markdown Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise: ###Code diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-359], 11) + diff_moving_avg plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, label="Series") plot_series(time_valid, diff_moving_avg_plus_smooth_past, label="Forecasts") plt.show() keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy() ###Output _____no_output_____
notebook/PixieDust 4 - Add External Spark Packages.ipynb
###Markdown Add Spark packages and run inside your notebookPixieDust PackageManager lets you install spark packages inside your notebook. This is especailly useful when you're working in a hosted cloud environment without access to configuration files. Use PixieDust Package Manager to install:- a spark package from spark-packages.org- from maven search repository- a jar file directly from URL> **Note:** After you install a package, you must restart the kernel. View list of packagesTo see the packages installed on your system, run the following command: ###Code import pixiedust pixiedust.printAllPackages() ###Output _____no_output_____ ###Markdown Add a package from spark-packages.orgRun the following cell to install GraphFrames. ###Code pixiedust.installPackage("graphframes:graphframes:0") ###Output _____no_output_____ ###Markdown Restart your kernelFrom the menu at the top of this notebook, choose **Kernel > Restart**, then run the next cell. View updated list of packagesRun printAllPackages again to see that GraphFrames is now in your list: ###Code pixiedust.printAllPackages() ###Output _____no_output_____ ###Markdown Display a GraphFrames data sampleGraphGrames comes with sample data sets. Even if GraphFrames is already installed, running the install command loads the Python that comes along with the package and enables features like the one you're about to see. Run the following cell and PixieDust displays a sample graph data set called **friends**. On the upper left of the display, click the table dropdown and switch between views of nodes and edges. ###Code #import the Graphs example from graphframes.examples import Graphs #create the friends example graph g=Graphs(sqlContext).friends() #use the pixiedust display display(g) ###Output _____no_output_____ ###Markdown Install from mavenTo install a package from [Maven](https://maven.apache.org/), visist the project and find its `groupId` and `artifactId`, then enter it in the following install command. [Read more](https://pixiedust.github.io/pixiedust/packagemanager.htmlinstall-from-maven-search-repository). For example, the following cell installs Apache Commons: ###Code pixiedust.installPackage("org.apache.commons:commons-csv:0") ###Output _____no_output_____ ###Markdown Install a jar file directly from a URL To install a jar file that is not packaged in a maven repository, provide its URL. ###Code pixiedust.installPackage("https://github.com/ibm-watson-data-lab/spark.samples/raw/master/dist/streaming-twitter-assembly-1.6.jar") ###Output _____no_output_____ ###Markdown Follow the tutorialTo understand what you can do with this jar file, read David Taieb's latest [Realtime Sentiment Analysis of Twitter Hashtags with Spark](https://medium.com/ibm-watson-data-lab/real-time-sentiment-analysis-of-twitter-hashtags-with-spark-7ee6ca5c1585.2iblfu58c) tutorial. Uninstall a packageIt's just as easy to get rid of a package you installed. Just run the command `pixiedust.uninstallPackage(">")`. For example, you can uninstall Apache Commons: ###Code pixiedust.uninstallPackage("org.apache.commons:commons-csv:0") ###Output _____no_output_____ ###Markdown Add Spark packages and run inside your notebookPixieDust PackageManager lets you install spark packages inside your notebook. This is especailly useful when you're working in a hosted cloud environment without access to configuration files. Use PixieDust Package Manager to install:- a spark package from spark-packages.org- from maven search repository- a jar file directly from URL> **Note:** After you install a package, you must restart the kernel. View list of packagesTo see the packages installed on your system, run the following command: ###Code import pixiedust pixiedust.printAllPackages() ###Output _____no_output_____ ###Markdown Add a package from spark-packages.orgRun the following cell to install GraphFrames. ###Code pixiedust.installPackage("graphframes:graphframes:0") ###Output _____no_output_____ ###Markdown Restart your kernelFrom the menu at the top of this notebook, choose **Kernel > Restart**, then run the next cell. View updated list of packagesRun printAllPackages again to see that GraphFrames is now in your list: ###Code pixiedust.printAllPackages() ###Output _____no_output_____ ###Markdown Display a GraphFrames data sampleGraphGrames comes with sample data sets. Even if GraphFrames is already installed, running the install command loads the Python that comes along with the package and enables features like the one you're about to see. Run the following cell and PixieDust displays a sample graph data set called **friends**. On the upper left of the display, click the table dropdown and switch between views of nodes and edges. ###Code #import the Graphs example from graphframes.examples import Graphs #create the friends example graph g=Graphs(sqlContext).friends() #use the pixiedust display display(g) ###Output _____no_output_____ ###Markdown Install from mavenTo install a package from [Maven](https://maven.apache.org/), visist the project and find its `groupId` and `artifactId`, then enter it in the following install command. [Read more](https://ibm-watson-data-lab.github.io/pixiedust/packagemanager.htmlinstall-from-maven-search-repository). For example, the following cell installs Apache Commons: ###Code pixiedust.installPackage("org.apache.commons:commons-csv:0") ###Output _____no_output_____ ###Markdown Install a jar file directly from a URL To install a jar file that is not packaged in a maven repository, provide its URL. ###Code pixiedust.installPackage("https://github.com/ibm-watson-data-lab/spark.samples/raw/master/dist/streaming-twitter-assembly-1.6.jar") ###Output _____no_output_____ ###Markdown Follow the tutorialTo understand what you can do with this jar file, read David Taieb's latest [Realtime Sentiment Analysis of Twitter Hashtags with Spark](https://medium.com/ibm-watson-data-lab/real-time-sentiment-analysis-of-twitter-hashtags-with-spark-7ee6ca5c1585.2iblfu58c) tutorial. Uninstall a packageIt's just as easy to get rid of a package you installed. Just run the command `pixiedust.uninstallPackage(">")`. For example, you can uninstall Apache Commons: ###Code pixiedust.uninstallPackage("org.apache.commons:commons-csv:0") ###Output _____no_output_____
fairness_indicators/examples/Fairness_Indicators_Example_Colab.ipynb
###Markdown Fairness Indicators Example Colab OverviewIn this activity, you'll use Fairness Indicators to explore the Civil Comments dataset. Fairness Indicators is a suite of tools built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) that enable regular evaluation of fairness metrics in product pipelines. This [Introductory Video](https://www.youtube.com/watch?v=pHT-ImFXPQo) provides more details and context on the real-world scenario we are presenting here, one of primary motivations for creating Fairness Indicators.About the DatasetIn this exercise, you'll work with the [Civil Comments dataset](https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification), approximately 2 million public comments made public by the [Civil Comments platform](https://medium.com/@aja_15265/saying-goodbye-to-civil-comments-41859d3a2b1d) in 2017 for ongoing research. This effort was sponsored by Jigsaw, who have hosted competitions on Kaggle to help classify toxic comments as well as minimize unintended model bias. Each individual text comment in the dataset has a toxicity label. Within the data, a subset of comments are labeled with a variety of identity attributes, including categories for gender, sexual orientation, religion, and race or ethnicity.About the Tools[TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) is a library for evaluating both TensorFlow and non-TensorFlow machine learning models. It allows users to evaluate their models on large amounts of data in a distributed manner, computing in-graph and other metrics over different slices of data and visualized in notebooks. Fairness Indicators is built on top of TFMA. With Fairness Indicators, users will be able to: * Evaluate model performance, sliced across defined groups of users* Feel confident about results with confidence intervals and evaluations at multiple thresholdsFairness Indicators is packaged with [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) and [What-If Tool](https://pair-code.github.io/what-if-tool/) to allow users to:* Evaluate the distribution of datasets* Dive deep into individual slices to explore root causes and opportunities for improvement with the What-If Tool ImportingRun the following code to install the fairness_indicators library. This package contains the tools we'll be using in this exercise. Restart Runtime may be requested but is not necessary. ###Code !pip install fairness-indicators %tensorflow_version 2.x import os import tempfile import apache_beam as beam import numpy as np import pandas as pd from datetime import datetime import tensorflow_hub as hub import tensorflow as tf import tensorflow_model_analysis as tfma import tensorflow_data_validation as tfdv from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators from tensorflow_model_analysis.addons.fairness.view import widget_view from fairness_indicators.examples import util from witwidget.notebook.visualization import WitConfigBuilder from witwidget.notebook.visualization import WitWidget ###Output _____no_output_____ ###Markdown Download and Understand the Data In this exercise, you'll work with the Civil Comments dataset, approximately 2 million public comments made public by the Civil Comments platform in 2017. Additionally, a subset of comments have been labelled with a variety of identity attributes, representing the identities that are mentioned in the comment.We've hosted the dataset on Google Cloud Platform for convenience. Run the following code to download the data from GCP, the data will take about a minute to download and analyze.TensorFlow Data Validation is one tool you can use to analyze your data. You can use it to find potential problems in your data, such as missing values and data imbalances, that can lead to Fairness disparities. ###Code #@title Options for Downloading data #@markdown You can choose to download the original and process the data in #@markdown the colab, which may take minutes. By default, we will download the #@markdown data that we have already prepocessed for you. In the original #@markdown dataset, for each indentity annotation columns, the value represents #@markdown the percent of raters who thought the comment references the identity. #@markdown When processing the raw data, the threshold 0.5 is chosen and the #@markdown identities are grouped together by their categories. For example #@markdown if one comment has { male: 0.3, female: 1.0, transgender: 0.0, #@markdown heterosexual: 0.8, homosexual_gay_or_lesbian: 1.0 }, after the #@markdown processing, the data will be { gender: [female], #@markdown sexual_orientation: [heterosexual, homosexual_gay_or_lesbian] }. download_original_data = True #@param {type:"boolean"} if download_original_data: train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord') validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord') # The identity terms list will be grouped together by their categories # (see 'IDENTITY_COLUMNS') on threshould 0.5. Only the identity term column, # text column and label column will be kept after processing. train_tf_file = util.convert_comments_data(train_tf_file) validate_tf_file = util.convert_comments_data(validate_tf_file) else: train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord') validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord') stats = tfdv.generate_statistics_from_tfrecord(data_location=train_tf_file) tfdv.visualize_statistics(stats) ###Output _____no_output_____ ###Markdown There are several interesting things that we may want to note in this data. The first is that the toxicity label, which is what we are predicting, is unbalanced. Only 8% of examples in the training set are toxic, which means that a classifier could get 92% accuracy by predicting that all comments are non-toxic.For the fields relating to identity terms note that out of 1.08 million training examples, only around 6.6k examples deal with homosexuality, and those related to bisexuality are even more rare. This might indicate that performance on these slices may suffer due to lack of training data. Defining Constants Here, we define the feature map that will be used to parse the data. Each example will have a label, comment text, and identity features `sexual orientation`, `gender`, `religion`, `race`, and `disability` that are associated with the text. ###Code BASE_DIR = tempfile.gettempdir() TEXT_FEATURE = 'comment_text' LABEL = 'toxicity' FEATURE_MAP = { # Label: LABEL: tf.io.FixedLenFeature([], tf.float32), # Text: TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string), # Identities: 'sexual_orientation':tf.io.VarLenFeature(tf.string), 'gender':tf.io.VarLenFeature(tf.string), 'religion':tf.io.VarLenFeature(tf.string), 'race':tf.io.VarLenFeature(tf.string), 'disability':tf.io.VarLenFeature(tf.string), } ###Output _____no_output_____ ###Markdown Train the Model First, set up the input function to feed data into the model. Note that since we identified a class imbalance by our earlier TensorFlow Data Validation run, we will add a weight column to each example and upweight the toxic examples to account for this. We only use identity features during the evaluation phase, as only the comments are fed into the model at training time. ###Code def train_input_fn(): def parse_function(serialized): parsed_example = tf.io.parse_single_example( serialized=serialized, features=FEATURE_MAP) # Adds a weight column to deal with unbalanced classes. parsed_example['weight'] = tf.add(parsed_example[LABEL], 0.1) return (parsed_example, parsed_example[LABEL]) train_dataset = tf.data.TFRecordDataset( filenames=[train_tf_file]).map(parse_function).batch(512) return train_dataset ###Output _____no_output_____ ###Markdown Next, create a deep neural network model, and train it on the data: ###Code model_dir = os.path.join(BASE_DIR, 'train', datetime.now().strftime( "%Y%m%d-%H%M%S")) embedded_text_feature_column = hub.text_embedding_column( key=TEXT_FEATURE, module_spec='https://tfhub.dev/google/nnlm-en-dim128/1') classifier = tf.estimator.DNNClassifier( hidden_units=[500, 100], weight_column='weight', feature_columns=[embedded_text_feature_column], optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.003), loss_reduction=tf.losses.Reduction.SUM, n_classes=2, model_dir=model_dir) classifier.train(input_fn=train_input_fn, steps=1000) ###Output _____no_output_____ ###Markdown Run TensorFlow Model Analysis with Fairness Indicators Export Saved Model ###Code def eval_input_receiver_fn(): serialized_tf_example = tf.compat.v1.placeholder( dtype=tf.string, shape=[None], name='input_example_placeholder') # This *must* be a dictionary containing a single key 'examples', which # points to the input placeholder. receiver_tensors = {'examples': serialized_tf_example} features = tf.io.parse_example(serialized_tf_example, FEATURE_MAP) features['weight'] = tf.ones_like(features[LABEL]) return tfma.export.EvalInputReceiver( features=features, receiver_tensors=receiver_tensors, labels=features[LABEL]) tfma_export_dir = tfma.export.export_eval_savedmodel( estimator=classifier, export_dir_base=os.path.join(BASE_DIR, 'tfma_eval_model'), eval_input_receiver_fn=eval_input_receiver_fn) ###Output _____no_output_____ ###Markdown Compute Fairness Metrics Select the identity to compute metrics for and whether to run with confidence intervals in the panel on the right-hand side. Depending on your configurations, this step will take 2-10 minutes to run. ###Code #@title Fairness Indicators Computation Options tfma_eval_result_path = os.path.join(BASE_DIR, 'tfma_eval_result') #@markdown Modify the slice_selection for experiments on other identities. slice_selection = 'sexual_orientation' #@param ["sexual_orientation", "gender", "religion", "race", "disability"] #@markdown Confidence Intervals can help you make better decisions regarding your data, but as it requires computing multiple resamples, is slower particularly in the colab environment that cannot take advantage of parallelization. compute_confidence_intervals = False #@param {type:"boolean"} # Define slices that you want the evaluation to run on. slice_spec = [ tfma.slicer.SingleSliceSpec(), # Overall slice tfma.slicer.SingleSliceSpec(columns=[slice_selection]), ] # Add the fairness metrics. add_metrics_callbacks = [ tfma.post_export_metrics.fairness_indicators( thresholds=[0.1, 0.3, 0.5, 0.7, 0.9], labels_key=LABEL ) ] eval_shared_model = tfma.default_eval_shared_model( eval_saved_model_path=tfma_export_dir, add_metrics_callbacks=add_metrics_callbacks) # Run the fairness evaluation. with beam.Pipeline() as pipeline: _ = ( pipeline | 'ReadData' >> beam.io.ReadFromTFRecord(validate_tf_file) | 'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults( eval_shared_model=eval_shared_model, slice_spec=slice_spec, compute_confidence_intervals=compute_confidence_intervals, output_path=tfma_eval_result_path) ) eval_result = tfma.load_eval_result(output_path=tfma_eval_result_path) ###Output _____no_output_____ ###Markdown Render What-if Tool In this section, you'll use the [What-If Tool's ](https://pair-code.github.io/what-if-tool/)interactive visual interface to explore and manipulate data at a micro-level.On the right-hand panel in the visualization, you will see a scatter plot where each point represents one of the examples in the subset loaded into the tool. Click on one of the points. In the left-hand panel, you should now see details about this particular example. The comment text, ground truth toxicity, and applicable identities are shown. At the bottom of this left-hand panel, you see the inference results from the model you just trained.Modify the text of the example. You can then click the "Run inference" button to view how your changes caused the perceived toxicity prediction to change. ###Code DEFAULT_MAX_EXAMPLES = 1000 # Load 100000 examples in memory. When first rendered, # What-If Tool should only display 1000 of these due to browser constraints. def wit_dataset(file, num_examples=100000): dataset = tf.data.TFRecordDataset( filenames=[file]).take(num_examples) return [tf.train.Example.FromString(d.numpy()) for d in dataset] wit_data = wit_dataset(train_tf_file) config_builder = WitConfigBuilder(wit_data[:DEFAULT_MAX_EXAMPLES]).set_estimator_and_feature_spec( classifier, FEATURE_MAP).set_label_vocab(['non-toxicity', LABEL]).set_target_feature(LABEL) wit = WitWidget(config_builder) ###Output _____no_output_____ ###Markdown Render Fairness IndicatorsRender the Fairness Indicators widget with the exported evaluation results.Below you will see bar charts displaying performance of each slice of the data on selected metrics. You can adjust the baseline comparison slice as well as the displayed threshold(s) using the drop down menus at the top of the visualization. The Fairness Indicator widget is integrated with the What-If Tool rendered above. If you select one slice of the data in the bar chart, the What-If Tool will update to show you examples from the selected slice. When the data reloads in the What-If Tool above, try modifying **Color By** to **toxicity**. This can give you a visual understanding of the toxicity balance of examples by slice. ###Code event_handlers={'slice-selected': wit.create_selection_callback(wit_data, DEFAULT_MAX_EXAMPLES)} widget_view.render_fairness_indicator(eval_result, slicing_column=slice_selection, event_handlers=event_handlers ) ###Output _____no_output_____ ###Markdown Fairness Indicators Example Colab OverviewIn this activity, you'll use Fairness Indicators to explore the Civil Comments dataset. Fairness Indicators is a suite of tools built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) that enable regular evaluation of fairness metrics in product pipelines. This [Introductory Video](https://www.youtube.com/watch?v=pHT-ImFXPQo) provides more details and context on the real-world scenario we are presenting here, one of primary motivations for creating Fairness Indicators.About the DatasetIn this exercise, you'll work with the [Civil Comments dataset](https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification), approximately 2 million public comments made public by the [Civil Comments platform](https://medium.com/@aja_15265/saying-goodbye-to-civil-comments-41859d3a2b1d) in 2017 for ongoing research. This effort was sponsored by Jigsaw, who have hosted competitions on Kaggle to help classify toxic comments as well as minimize unintended model bias. Each individual text comment in the dataset has a toxicity label. Within the data, a subset of comments are labeled with a variety of identity attributes, including categories for gender, sexual orientation, religion, and race or ethnicity.About the Tools[TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) is a library for evaluating both TensorFlow and non-TensorFlow machine learning models. It allows users to evaluate their models on large amounts of data in a distributed manner, computing in-graph and other metrics over different slices of data and visualized in notebooks. Fairness Indicators is built on top of TFMA. With Fairness Indicators, users will be able to: * Evaluate model performance, sliced across defined groups of users* Feel confident about results with confidence intervals and evaluations at multiple thresholdsFairness Indicators is packaged with [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) and [What-If Tool](https://pair-code.github.io/what-if-tool/) to allow users to:* Evaluate the distribution of datasets* Dive deep into individual slices to explore root causes and opportunities for improvement with the What-If Tool ImportingRun the following code to install the fairness_indicators library. This package contains the tools we'll be using in this exercise. Restart Runtime may be requested but is not necessary. ###Code !pip install fairness-indicators %tensorflow_version 2.x import os import tempfile import apache_beam as beam import numpy as np import pandas as pd from datetime import datetime import tensorflow_hub as hub import tensorflow as tf import tensorflow_model_analysis as tfma import tensorflow_data_validation as tfdv from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators from tensorflow_model_analysis.addons.fairness.view import widget_view from fairness_indicators.examples import util from witwidget.notebook.visualization import WitConfigBuilder from witwidget.notebook.visualization import WitWidget ###Output _____no_output_____ ###Markdown Download and Understand the Data In this exercise, you'll work with the Civil Comments dataset, approximately 2 million public comments made public by the Civil Comments platform in 2017. Additionally, a subset of comments have been labelled with a variety of identity attributes, representing the identities that are mentioned in the comment.We've hosted the dataset on Google Cloud Platform for convenience. Run the following code to download the data from GCP, the data will take about a minute to download and analyze.TensorFlow Data Validation is one tool you can use to analyze your data. You can use it to find potential problems in your data, such as missing values and data imbalances, that can lead to Fairness disparities. ###Code #@title Options for Dowloading data #@markdown You can choose to download the original and process the data in #@markdown the colab, which may take minutes. By default, we will download the #@markdown data that we have already prepocessed for you. In the original #@markdown dataset, for each indentity annotation columns, the value represents #@markdown the percent of raters who thought the comment references the identity. #@markdown When processing the raw data, the threshold 0.5 is chosen and the #@markdown identities are grouped together by their categories. For example #@markdown if one comment has { male: 0.3, female: 1.0, transgender: 0.0, #@markdown heterosexual: 0.8, homosexual_gay_or_lesbian: 1.0 }, after the #@markdown processing, the data will be { gender: [female], #@markdown sexual_orientation: [heterosexual, homosexual_gay_or_lesbian] }. download_original_data = True #@param {type:"boolean"} if download_original_data: train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord') validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord') # The identity terms list will be grouped together by their categories # (see 'IDENTITY_COLUMNS') on threshould 0.5. Only the identity term column, # text column and label column will be kept after processing. train_tf_file = util.convert_comments_data(train_tf_file) validate_tf_file = util.convert_comments_data(validate_tf_file) else: train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord') validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord', 'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord') stats = tfdv.generate_statistics_from_tfrecord(data_location=train_tf_file) tfdv.visualize_statistics(stats) ###Output _____no_output_____ ###Markdown There are several interesting things that we may want to note in this data. The first is that the toxicity label, which is what we are predicting, is unbalanced. Only 8% of examples in the training set are toxic, which means that a classifier could get 92% accuracy by predicting that all comments are non-toxic.For the fields relating to identity terms note that out of 1.08 million training examples, only around 6.6k examples deal with homosexuality, and those related to bisexuality are even more rare. This might indicate that performance on these slices may suffer due to lack of training data. Defining Constants Here, we define the feature map that will be used to parse the data. Each example will have a label, comment text, and identity features `sexual orientation`, `gender`, `religion`, `race`, and `disability` that are associated with the text. ###Code BASE_DIR = tempfile.gettempdir() TEXT_FEATURE = 'comment_text' LABEL = 'toxicity' FEATURE_MAP = { # Label: LABEL: tf.io.FixedLenFeature([], tf.float32), # Text: TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string), # Identities: 'sexual_orientation':tf.io.VarLenFeature(tf.string), 'gender':tf.io.VarLenFeature(tf.string), 'religion':tf.io.VarLenFeature(tf.string), 'race':tf.io.VarLenFeature(tf.string), 'disability':tf.io.VarLenFeature(tf.string), } ###Output _____no_output_____ ###Markdown Train the Model First, set up the input function to feed data into the model. Note that since we identified a class imbalance by our earlier TensorFlow Data Validation run, we will add a weight column to each example and upweight the toxic examples to account for this. We only use identity features during the evaluation phase, as only the comments are fed into the model at training time. ###Code def train_input_fn(): def parse_function(serialized): parsed_example = tf.io.parse_single_example( serialized=serialized, features=FEATURE_MAP) # Adds a weight column to deal with unbalanced classes. parsed_example['weight'] = tf.add(parsed_example[LABEL], 0.1) return (parsed_example, parsed_example[LABEL]) train_dataset = tf.data.TFRecordDataset( filenames=[train_tf_file]).map(parse_function).batch(512) return train_dataset ###Output _____no_output_____ ###Markdown Next, create a deep neural network model, and train it on the data: ###Code model_dir = os.path.join(BASE_DIR, 'train', datetime.now().strftime( "%Y%m%d-%H%M%S")) embedded_text_feature_column = hub.text_embedding_column( key=TEXT_FEATURE, module_spec='https://tfhub.dev/google/nnlm-en-dim128/1') classifier = tf.estimator.DNNClassifier( hidden_units=[500, 100], weight_column='weight', feature_columns=[embedded_text_feature_column], optimizer=tf.optimizers.Adagrad(learning_rate=0.003), loss_reduction=tf.losses.Reduction.SUM, n_classes=2, model_dir=model_dir) classifier.train(input_fn=train_input_fn, steps=1000) ###Output _____no_output_____ ###Markdown Run TensorFlow Model Analysis with Fairness Indicators Export Saved Model ###Code def eval_input_receiver_fn(): serialized_tf_example = tf.compat.v1.placeholder( dtype=tf.string, shape=[None], name='input_example_placeholder') # This *must* be a dictionary containing a single key 'examples', which # points to the input placeholder. receiver_tensors = {'examples': serialized_tf_example} features = tf.io.parse_example(serialized_tf_example, FEATURE_MAP) features['weight'] = tf.ones_like(features[LABEL]) return tfma.export.EvalInputReceiver( features=features, receiver_tensors=receiver_tensors, labels=features[LABEL]) tfma_export_dir = tfma.export.export_eval_savedmodel( estimator=classifier, export_dir_base=os.path.join(BASE_DIR, 'tfma_eval_model'), eval_input_receiver_fn=eval_input_receiver_fn) ###Output _____no_output_____ ###Markdown Compute Fairness Metrics Select the identity to compute metrics for and whether to run with confidence intervals in the panel on the right-hand side. Depending on your configurations, this step will take 2-10 minutes to run. ###Code #@title Fairness Indicators Computation Options tfma_eval_result_path = os.path.join(BASE_DIR, 'tfma_eval_result') #@markdown Modify the slice_selection for experiments on other identities. slice_selection = 'sexual_orientation' #@param ["sexual_orientation", "gender", "religion", "race", "disability"] #@markdown Confidence Intervals can help you make better decisions regarding your data, but as it requires computing multiple resamples, is slower particularly in the colab environment that cannot take advantage of parallelization. compute_confidence_intervals = False #@param {type:"boolean"} # Define slices that you want the evaluation to run on. slice_spec = [ tfma.slicer.SingleSliceSpec(), # Overall slice tfma.slicer.SingleSliceSpec(columns=[slice_selection]), ] # Add the fairness metrics. add_metrics_callbacks = [ tfma.post_export_metrics.fairness_indicators( thresholds=[0.1, 0.3, 0.5, 0.7, 0.9], labels_key=LABEL ) ] eval_shared_model = tfma.default_eval_shared_model( eval_saved_model_path=tfma_export_dir, add_metrics_callbacks=add_metrics_callbacks) # Run the fairness evaluation. with beam.Pipeline() as pipeline: _ = ( pipeline | 'ReadData' >> beam.io.ReadFromTFRecord(validate_tf_file) | 'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults( eval_shared_model=eval_shared_model, slice_spec=slice_spec, compute_confidence_intervals=compute_confidence_intervals, output_path=tfma_eval_result_path) ) eval_result = tfma.load_eval_result(output_path=tfma_eval_result_path) ###Output _____no_output_____ ###Markdown Render What-if Tool In this section, you'll use the [What-If Tool's ](https://pair-code.github.io/what-if-tool/)interactive visual interface to explore and manipulate data at a micro-level.On the right-hand panel in the visualization, you will see a scatter plot where each point represents one of the examples in the subset loaded into the tool. Click on one of the points. In the left-hand panel, you should now see details about this particular example. The comment text, ground truth toxicity, and applicable identities are shown. At the bottom of this left-hand panel, you see the inference results from the model you just trained.Modify the text of the example. You can then click the "Run inference" button to view how your changes caused the perceived toxicity prediction to change. ###Code DEFAULT_MAX_EXAMPLES = 1000 # Load 100000 examples in memory. When first rendered, # What-If Tool should only display 1000 of these due to browser constraints. def wit_dataset(file, num_examples=100000): dataset = tf.data.TFRecordDataset( filenames=[train_tf_file]).take(num_examples) return [tf.train.Example.FromString(d.numpy()) for d in dataset] wit_data = wit_dataset(train_tf_file) config_builder = WitConfigBuilder(wit_data[:DEFAULT_MAX_EXAMPLES]).set_estimator_and_feature_spec( classifier, FEATURE_MAP).set_label_vocab(['non-toxicity', LABEL]).set_target_feature(LABEL) wit = WitWidget(config_builder) ###Output _____no_output_____ ###Markdown Render Fairness IndicatorsRender the Fairness Indicators widget with the exported evaluation results.Below you will see bar charts displaying performance of each slice of the data on selected metrics. You can adjust the baseline comparison slice as well as the displayed threshold(s) using the drop down menus at the top of the visualization. The Fairness Indicator widget is integrated with the What-If Tool rendered above. If you select one slice of the data in the bar chart, the What-If Tool will update to show you examples from the selected slice. When the data reloads in the What-If Tool above, try modifying **Color By** to **toxicity**. This can give you a visual understanding of the toxicity balance of examples by slice. ###Code event_handlers={'slice-selected': wit.create_selection_callback(wit_data, DEFAULT_MAX_EXAMPLES)} widget_view.render_fairness_indicator(eval_result, slicing_column=slice_selection, event_handlers=event_handlers) ###Output _____no_output_____
examples/guides/detailed-example.ipynb
###Markdown Detailed exampleThis overview of the most important functions repeats the previous 30-seconds-example, but in more detail and shows additional functionality and alternative steps. Authentificate & access project ###Code import up42 up42.authenticate(project_id="12345", project_api_key="12345") #up42.authenticate(cfg_file="config.json") project = up42.initialize_project() project ###Output _____no_output_____ ###Markdown Get information about the available blocks to later construct your workflow. ###Code up42.get_blocks(basic=True) ###Output _____no_output_____ ###Markdown Create or access the workflowYou can either create a new workflow, use project.get_workflows() to get all existing workflows within the project, or access an exisiting workflow directly via its workflow_id. A new workflow is created and filled with tasks ([Sentinel-2 data](https://marketplace.up42.com/block/018dfb34-fc19-4334-8125-14fd7535f979), [Land-Surface-Temperature](https://marketplace.up42.com/block/34767300-5caf-472b-a684-a351212b5c14)). The area of interest and workflow parameters are defined. After running the job, the results are downloaded and visualized. ###Code # Create a new, empty workflow. workflow = project.create_workflow(name="30-seconds-workflow", use_existing=False) workflow # Add workflow tasks input_tasks = ["Sentinel-2 L2A (GeoTIFF)", "Sharpening Filter"] workflow.add_workflow_tasks(input_tasks=input_tasks) # Check the added tasks. workflow.get_workflow_tasks(basic=True) # Alternative: Get all existing workflows within the project. all_workflows = project.get_workflows() workflow = all_workflows[0] workflow # Alternative: Directly access the existing workflow the id (has to exist within the accessed project). UP42_WORKFLOW_ID="7fb2ec8a-45be-41ad-a50f-98ba6b528b98" workflow = up42.initialize_workflow(workflow_id=UP42_WORKFLOW_ID) workflow ###Output _____no_output_____ ###Markdown Select the aoiThere are multiple ways to select an aoi, you can:- Provide aoi the geometry directly in code as a FeatureCollection, Feature, GeoDataFrame, shapely Polygon or list of bounds coordinates.- Use up42.draw_aoi() to draw the aoi and export it as a geojson.- Use up42.read_vector_file() to read a geojson, json, shapefile, kml or wkt file.- Use up42.get_example_aoi() to read multiple provided sample aois. ###Code aoi = [13.375966, 52.515068, 13.378314, 52.516639] aoi = up42.read_vector_file("data/aoi_berlin.geojson", as_dataframe=True) aoi.head(1) #aoi = up42.get_example_aoi(location="Berlin") #aoi #up42.draw_aoi() ###Output _____no_output_____ ###Markdown Select the workflow parametersThere are also multiple ways to construct the workflow input parameters, you can:- Provide the parameters directly in code as a json string.- Use .get_parameters_info() to get a an overview of all potential parameters for the selected workflow and information about the parameter defaults and ranges.- Use .get_input_parameters(aoi_type="bbox", aoi_geometry=aoi) to construct the parameters with the provided aoi and all default parameters. Selecting the aoi_type is independent from the provided aoi, you can e.g. provide a irregular Polygon and still select aoi_type="bbox", then the bounding box of the polygon will be selected. ###Code workflow.get_parameters_info() input_parameters = workflow.construct_parameters(geometry=aoi, geometry_operation="bbox", limit=1) # Further update the input_parameters manually input_parameters["esa-s2-l2a-gtiff:1"].update({"max_cloud_cover":10}) input_parameters ###Output _____no_output_____ ###Markdown Price estimation & Test Job ###Code workflow.estimate_job(input_parameters) # Run a test job to query data availability and check the configuration. test_job = workflow.test_job(input_parameters=input_parameters, track_status=True) test_results = test_job.get_results_json() print(test_results) ###Output _____no_output_____ ###Markdown Run the workflow & download results ###Code # Run the actual job. job = workflow.run_job(input_parameters=input_parameters, track_status=True) ###Output _____no_output_____ ###Markdown Download & Display results ###Code # Download job result (default downloads to Desktop). Only works after download is finished. results_fp = job.download_results() job.plot_results(figsize=(6,6)) #job.map_results(bands=[1]) ###Output _____no_output_____ ###Markdown Detailed exampleThis overview of the most important functions repeats the previous 30-seconds-example, but in more detail and shows additional functionality and alternative steps. Authentificate & access project ###Code import up42 up42.authenticate(project_id="12345", project_api_key="12345") #up42.authenticate(cfg_file="config.json") project = up42.initialize_project() project ###Output _____no_output_____ ###Markdown Get information about the available blocks to later construct your workflow. ###Code up42.get_blocks(basic=True) ###Output _____no_output_____ ###Markdown Create or access the workflowYou can either create a new workflow, use project.get_workflows() to get all existing workflows within the project, or access an exisiting workflow directly via its workflow_id. A new workflow is created and filled with tasks ([Sentinel-2 data](https://marketplace.up42.com/block/018dfb34-fc19-4334-8125-14fd7535f979), [Land-Surface-Temperature](https://marketplace.up42.com/block/34767300-5caf-472b-a684-a351212b5c14)). The area of interest and workflow parameters are defined. After running the job, the results are downloaded and visualized. ###Code # Create a new, empty workflow. workflow = project.create_workflow(name="30-seconds-workflow", use_existing=False) workflow # Add workflow tasks input_tasks = ["Sentinel-2 L2A Visual (GeoTIFF)", "Sharpening Filter"] workflow.add_workflow_tasks(input_tasks=input_tasks) # Check the added tasks. workflow.get_workflow_tasks(basic=True) # Alternative: Get all existing workflows within the project. all_workflows = project.get_workflows() workflow = all_workflows[0] workflow # Alternative: Directly access the existing workflow the id (has to exist within the accessed project). UP42_WORKFLOW_ID="7fb2ec8a-45be-41ad-a50f-98ba6b528b98" workflow = up42.initialize_workflow(workflow_id=UP42_WORKFLOW_ID) workflow ###Output _____no_output_____ ###Markdown Select the aoiThere are multiple ways to select an aoi, you can:- Provide aoi the geometry directly in code as a FeatureCollection, Feature, GeoDataFrame, shapely Polygon or list of bounds coordinates.- Use up42.draw_aoi() to draw the aoi and export it as a geojson.- Use up42.read_vector_file() to read a geojson, json, shapefile, kml or wkt file.- Use up42.get_example_aoi() to read multiple provided sample aois. ###Code aoi = [13.375966, 52.515068, 13.378314, 52.516639] aoi = up42.read_vector_file("data/aoi_berlin.geojson", as_dataframe=True) aoi.head(1) #aoi = up42.get_example_aoi(location="Berlin") #aoi #up42.draw_aoi() ###Output _____no_output_____ ###Markdown Select the workflow parametersThere are also multiple ways to construct the workflow input parameters, you can:- Provide the parameters directly in code as a json string.- Use .get_parameters_info() to get a an overview of all potential parameters for the selected workflow and information about the parameter defaults and ranges.- Use .get_input_parameters(aoi_type="bbox", aoi_geometry=aoi) to construct the parameters with the provided aoi and all default parameters. Selecting the aoi_type is independent from the provided aoi, you can e.g. provide a irregular Polygon and still select aoi_type="bbox", then the bounding box of the polygon will be selected. ###Code workflow.get_parameters_info() input_parameters = workflow.construct_parameters(geometry=aoi, geometry_operation="bbox", limit=1) # Further update the input_parameters manually input_parameters["esa-s2-l2a-gtiff-visual:1"].update({"max_cloud_cover":10}) input_parameters workflow ###Output _____no_output_____ ###Markdown Price estimation & Test Job ###Code workflow.estimate_job(input_parameters) # Run a test job to query data availability and check the configuration. test_job = workflow.test_job(input_parameters=input_parameters, track_status=True) test_results = test_job.get_results_json() print(test_results) ###Output _____no_output_____ ###Markdown Run the workflow & download results ###Code # Run the actual job. job = workflow.run_job(input_parameters=input_parameters, track_status=True) ###Output _____no_output_____ ###Markdown Download & Display results ###Code # Download job result (default downloads to Desktop). Only works after download is finished. results_fp = job.download_results() job.plot_results(figsize=(6,6)) #job.map_results(bands=[1]) ###Output _____no_output_____
mdm163-week-6-lab-Copy1.ipynb
###Markdown Lab 6 (Project Part 1): ###Code name = str(input(prompt="Please enter your name: ")) occupation = str(input(prompt="Please enter your occupation: ")) print("Your name is " + name + " and your occupation is " + occupation) ###Output Please enter your name: Mikey Please enter your occupation: CS
CW3.ipynb
###Markdown Classwork 3 Sakthi and Will September 20, 2016 Problem 1 Exercise 5.3This problem asked us to fill two arrays: the first, x, using linspace to just create an array of numbers, and the second, y, as a function of x. It asked us to do this using a vectorized function, as opposed to a for loop. In this case, we created an x array from -4 to 4, split into 41 segments. The return will be an array y which is a result of the function: $$y = \frac{1}{\sqrt{2\pi}}e^{\frac{1}{2}x^2}$$ ###Code fav.main() ###Output _____no_output_____ ###Markdown As you can see, the array has 41 entries, from y(-4) to y(4), just as desired. Problem 2 Exercise 5.9This problem asked us to plot the function $$y(t) = v_0t - \frac{1}{2}gt^2$$This function gives the height of an object given an initial velocity directly up into the air.It asked for a plot of this function first given $v_0 = 10$, and second given a set of $v_0$'s specified by user input. To do this, we created a function that asked the user to input values of $v_0$, created sets of data for $t$ and the corresponding $y(t)$ values, and plotted them all on one graph, as asked in the second part of the question. To plot just $v_0 = 10$ we didn't write a separate function into the file, we simply implemented the more general function and input only $10$ as a $v_0$ value. So, here is the plot for $v_0 = 10$: ###Code v = plotter.getV() l = plotter.getTY(v) plotter.plot1(v, l[0],l[1]) ###Output Enter your list of v_0's, separated by commas: 10 ###Markdown The plot above shows the height vs time of an object thrown directly upward at a velocity of 10m/s. Here is the plot for a more interesting set of $v_0$'s - the speed of a home run leaving the bat, the speed of sound waves through air, and the speed of a .22 leaving the barrel: ###Code v = plotter.getV() l = plotter.getTY(v) plotter.plot1(v, l[0],l[1]) ###Output Enter your list of v_0's, separated by commas: 49,343,460 ###Markdown Problem 3 Exercise 5.13This exercise asked us to plot the trajectory of an object using this function: $$f(x) = x \tan(\theta) - \frac{1}{2v_0^2} \frac{gx^2}{{\cos^2(\theta)}} + y_0$$The exercise asked us to read the input data for initial values directly from the command line, so we created a main() function which executes everything at once; the input, the function, and the plot. Here is a sample plot: ###Code pt.main() ###Output Please enter the starting height of the ball: 10 Please enter the starting angle you want your ball to be thrown at: 45 Please enter your initial velocity: 300 ###Markdown Problem 4 Exercise 5.14This exercise asked us to read data points from a file. The file was formatted as two columns, one for $x$, one for $y$. To do so, we used urllib2 as suggested by the Python documentation, and read from the url at which the file was located. Then we split each line with spaces as delimiters, and appended the lists for $x$ and $y$ coordinates accordingly. Then we created a function to plot a simple graph of $x$ vs $y$, and a function to return $y_{mean}$, $y_{max}$, and $y_{min}$. Here is the graph of $x$ vs $y$: ###Code l = rc.getData() mmm = rc.yData(l[1]) rc.plotData(l[0],l[1]) ###Output _____no_output_____ ###Markdown And here is the mean, max, and min of y: ###Code print ('Mean of y is: ' + str(mmm[0])) print ('Max of y is: ' + str(mmm[1])) print ('Min of y is: ' + str(mmm[2])) ###Output Mean of y is: 2.58191401076e-18 Max of y is: 1.0 Min of y is: -1.0
homework01.ipynb
###Markdown Задание 1 вычислить: $$7 \cdot \begin{pmatrix}5 & 10 \\7 & 12 \\11.3 & 5 \\25 & 30 \\\end{pmatrix} + 2 \cdot\begin{pmatrix}5 & 10 \\7 & 12 \\11.3 & 5 \\25 & 30 \\\end{pmatrix}$$ Решение: $$\begin{pmatrix}35 & 70 \\49 & 84 \\79.1 & 35 \\175 & 210 \\\end{pmatrix} + \begin{pmatrix}10 & 20 \\14 & 24 \\22.6 & 10 \\50 & 60 \\\end{pmatrix} = \begin{pmatrix}45 & 90 \\63 & 108 \\101.7 & 45 \\225 & 270 \\\end{pmatrix}$$ Задание 2.1 Решите систему уравнений: $$3x - 2y + 5z = 7 \\7x + 4y - 8z = 3 \\5x - 3y - 4z = -12$$ Система линейная, как и все уравнения в ней Решение методом Жордана-Гаусса: ###Code m = np.matrix([[3,-2,5,7],[7,4,-8,3],[5,-3,-4,-12]], dtype=float) m ###Output _____no_output_____ ###Markdown Умножим вторую и третью строки на 3: ###Code m[1] = m[1]*3 m[2] = m[2]*3 m ###Output _____no_output_____ ###Markdown из второй строки вычтем 1ю уноженную на 7, а из третьей первую умноженную на 5 ###Code m[1] = m[1] - m[0]*7 m[2] = m[2] - m[0]*5 m ###Output _____no_output_____ ###Markdown Разделим первую строку на 3, а вторую на 26 ###Code m[0] = m[0]/3 m[1] = m[1]/26 m ###Output _____no_output_____ ###Markdown из третьей строки вычтем вторую: ###Code m[2] = m[2]-m[1] m t = 1/m[2,2] t ###Output _____no_output_____ ###Markdown умножим третью на -0.028.. ###Code m[2] = m[2]*t m m[0,2], m[1,2] ###Output _____no_output_____ ###Markdown из первой вычтем третью, умноженную на 1.66.. а из второй третью, умноженную на -2.269... ###Code m[0] = m[0] - m[2]*m[0,2] m[1] = m[1] - m[2]*m[1,2] m m[0,1] ###Output _____no_output_____ ###Markdown из первой вычтем вторую, уноженную на -0.(6) ###Code m[0] = m[0] - m[1]*m[0,1] m #ответ x = 1 y = 3 z = 2 #проверим print(3*x - 2*y + 5*z) print(7*x + 4*y - 8*z) print(5*x - 3*y - 4*z) ###Output 7 3 -12 ###Markdown Решение правильное Задание 2.2 Решите систему уравнений: $$x^2 + yx - 9 = 0 \\x - y/5 = 0$$ Решение: выразим y из второго уравнения и подставим в первое $$x = y/5 \\y = 5x$$ $$x^2 + 5x^2 - 9 = 0 \\6x^2 = 9 \\x^2 = 3/2$$ $$x = \pm \sqrt{3/2}$$ ###Code x = np.sqrt(3/2) x x1 = x x2 = -x print(x1, x2) y1 = x1*5 y2 = x2*5 print(y1, y2) ###Output 6.123724356957945 -6.123724356957945 ###Markdown Проверим: ###Code f1 = x1**2 + y1*x1 - 9 f2 = x2**2 + y2*x2 - 9 print(f1, f2) #почти 0 - учитывая погрешности вычислений f1 = x1 - y1/5 f2 = x2 - y2/5 print(f1, f2) ###Output 0.0 0.0 ###Markdown Задание 3 Решите задачу:Площадь пола прямоугольной комнаты равна 48 м2,а его периметр равен 28 м. Найдите длину и ширину комнаты. Решение: $$x*y = 48 \\2(x+y) = 28, x+y = 14$$ Выразим y из одного уравнения и подставим в другое: $$y = 14 - x \\x * (14 - x) = 48$$ $$x^2 - 14x + 48 = 0$$ $$D = b^2 - 4ac \\x = \frac{-b \pm \sqrt{D}}{2a}$$ ###Code D = (-14)**2 - 4*48 D x1 = (14 + np.sqrt(4))/2 x1 x2 = (14 - np.sqrt(4))/2 x2 y1 = 14 - x1 y1 y2 = 14 - x2 y2 ###Output _____no_output_____ ###Markdown пары корней системы уравнения одинаковые, проверим: ###Code f1 = x1*y1 f2 = x1+y1 print(f1, f2) ###Output 48.0 14.0 ###Markdown Задание 4 Постройте на одном графике две кривые y(x) для функции двух переменной y(k,x)=cos(k∙x), взяв для одной кривой значение k=1, а для другой – любое другое k, не равное 1. ###Code from matplotlib import pyplot as plt %matplotlib inline x = np.linspace(-10, 10, 100) plt.plot(x, np.cos(x)) plt.plot(x, np.cos(2*x)) plt.show() ###Output _____no_output_____ ###Markdown $$C_n^k = \frac{n!}{k!(n-k)!}$$ $$A_n^k = \frac{n!}{(n-k)!}$$ $$P_n = n!$$ ###Code import numpy as np from math import factorial def C(n, k): return factorial(n)/(factorial(k)*factorial(n-k)) def A(n, k): return factorial(n)/(factorial(n-k)) def P(n): return factorial(n) ###Output _____no_output_____ ###Markdown Задание 1 Из колоды в 52 карты извлекаются случайным образом 4 карты. 1.1 Найти вероятность того, что все карты – крести вариант 1 ###Code prob = C(13, 4)/C(52,4) prob ###Output _____no_output_____ ###Markdown вариант 2 поочередно вытащим 4 карты крести: ###Code prob = (13/52)*(12/51)*(11/50)*(10/49) prob ###Output _____no_output_____ ###Markdown 1. 2 Найти вероятность, что среди 4-х карт окажется хотя бы один туз. сначала найдем вероятность вытащить 4 карты без тузов ###Code p1 = C(48, 4)/C(52, 4) p1 prob = 1 - p1 prob # вариант 2 prob = 1 - (48/52)*(47/51)*(46/50)*(45/49) prob ###Output _____no_output_____ ###Markdown Задание 2 На входной двери подъезда установлен кодовый замок, содержащий десять кнопок с цифрами от 0 до 9. Код содержит три цифры, которые нужно нажать одновременно. Какова вероятность того, что человек, не знающий код, откроет дверь с первой попытки? ###Code prob = 1/C(10,3) prob ###Output _____no_output_____ ###Markdown Задание 3 В ящике имеется 15 деталей, из которых 9 окрашены. Рабочий случайным образом извлекает 3 детали. Какова вероятность того, что все извлеченные детали окрашены? ###Code prob = (9/15)*(8/14)*(7/13) prob prob = C(9,3)/C(15,3) prob ###Output _____no_output_____ ###Markdown Задание 4 В лотерее 100 билетов. Из них 2 выигрышных. Какова вероятность того, что 2 приобретенных билета окажутся выигрышными? ###Code prob = (2/100)*(1/99) prob prob = C(2,2)/C(100,2) prob ###Output _____no_output_____
notebooks/1_data_wrangling.ipynb
###Markdown 1. Data Wrangling In this part of notebook I will cover how to wrangle data so we can extract the feature easily. We will try to label each event whether it is belong to users subscription phase. example: | userId | upgrade_time | downgrade_time | ...event | |--------|--------------|----------------|----------| | 1111 | 2020-12-05 | 2020-12-29 | ...event | | 2222 | 2020-11-12 | null | ...event | | 3333 | 2020-10-15 | 2020-10-29 | ...event | the null value in downgrade time means that the user isn't churning Import Needed Library and Initialiaze PySpark ###Code from pyspark.sql import SparkSession import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark import SparkContext, SparkConf spark = SparkSession.builder.appName('sparkify') \ .config('spark.driver.maxResultSize', '3g') \ .getOrCreate() ###Output _____no_output_____ ###Markdown Load the Dataset from Google Cloud Storage ###Code # Load dataset from GCS and change ts from bigint to datetime format df = spark.read.parquet('gs://udacity-dsnd/sparkify_event_data.parquet/') df = df.withColumnRenamed("ts","ts_temp").withColumn("ts", (F.col("ts_temp") / 1000).cast(T.TimestampType())).drop("ts_temp") df.cache() df.printSchema() ###Output root |-- artist: string (nullable = true) |-- auth: string (nullable = true) |-- firstName: string (nullable = true) |-- gender: string (nullable = true) |-- itemInSession: long (nullable = true) |-- lastName: string (nullable = true) |-- length: double (nullable = true) |-- level: string (nullable = true) |-- location: string (nullable = true) |-- method: string (nullable = true) |-- page: string (nullable = true) |-- registration: long (nullable = true) |-- sessionId: long (nullable = true) |-- song: string (nullable = true) |-- status: long (nullable = true) |-- userAgent: string (nullable = true) |-- userId: string (nullable = true) |-- ts: timestamp (nullable = true) ###Markdown Wrangle Dataframe to Get Event Labeled Dataframe First, we want to find when each user upgrading and downgrading subscription ###Code # up_df is data when user upgrading # | userId | ts | page | # | 234124 | 2018-1-1 12:00:00 | Submit Upgrade | up_df = df.select(["userId", "ts"]) \ .filter(df.page == "Submit Upgrade") \ .withColumnRenamed('ts', 'up_ts') # down_df is data when user upgrading # | userId | ts | page | # | 234124 | 2018-1-5 12:00:00 | Submit Downgrade | down_df = df.select(["userId", "ts"]) \ .filter(df.page == "Submit Downgrade") \ .withColumnRenamed('ts', 'down_ts') \ .withColumnRenamed("userId", "userIdTemp") ###Output _____no_output_____ ###Markdown Second, We query to get every upgrade event and the following downgrade event time ###Code # key_df join up_df and down_df to create dataframe when user upgrade and following downgrade in the same row like below # | userId | up_ts | down_ts | isChurn | # | 234124 | 2018-1-2 12:00:00 | 2018-1-5 12:00:00 | True | # | 234124 | 2018-1-6 12:00:00 | 2018-1-9 12:00:00 | True | key_df = up_df.join(down_df, (down_df.userIdTemp == up_df.userId) & (down_df.down_ts > up_df.up_ts), how="left") \ .drop(F.col("userIdTemp")) \ .groupBy(F.col("userId"), up_df.up_ts) \ .agg(F.min(down_df.down_ts)) \ .withColumnRenamed("max(userId)", "userId") \ .withColumn("down_ts", F.when(F.col("min(down_ts)").isNull(), '2099-12-31 00:00:00') \ .otherwise(F.col("min(down_ts)"))) \ .withColumn("isChurn", F.when(F.col("min(down_ts)").isNull(), False).otherwise(True)) \ .orderBy(up_df.up_ts) key_df.cache() key_df.show() # save the result then read it again to reduce query complexity key_df.drop("min(down_ts)").write.parquet("gs://udacity-dsnd/key_df.parquet") key_df = spark.read.parquet("hdfs:///user/key_df.parquet") key_df.printSchema() ###Output root |-- userId: string (nullable = true) |-- up_ts: timestamp (nullable = true) |-- down_ts: string (nullable = true) |-- isChurn: boolean (nullable = true) ###Markdown Last, we query to label every event with value from key_df and save it to GCS ###Code # label every event that fall between key_df's up_ts and down_ts with the same userId and save it to GCS # the resulting table will look like below # | userId | up_ts | down_ts | isChurn | event | # | 234124 | 2018-1-5 12:00:00 | 2018-1-5 12:00:00 | True | event1 | # | 234124 | 2018-1-5 12:00:00 | 2018-1-5 12:00:00 | True | event2 | df = df.withColumnRenamed("userId", "userIdTemp") key_df.join(df, (key_df.up_ts <= df.ts) & (df.ts <= key_df.down_ts) & (key_df.userId == df.userIdTemp),how='left') \ .write.parquet('gs://udacity-dsnd/event_labeled.parquet') ###Output _____no_output_____
test-features.ipynb
###Markdown Welcome to pyRT - The Python Raytracer https://github.com/martinchristen/pyRTThe goal of pyRT is teaching computer graphics.One part of pyrt is the virtual framebuffer where you can draw Pixels using standard algorithms such as Bresenham's line drawing algorithm.From Version 0.5.0 an additional goal is better Jupyter integration, this is now done in RGBImage.Dependencies:This notebook requires pyrt, pillow, numpy, and numba 1. Virtual Framebuffer for Pixel Operations ###Code from pyrt.renderer import RGBImage from pyrt.math import Vec2, Vec3 import random ###Output _____no_output_____ ###Markdown 1.2 Animated Virtual Framebuffer in Jupyter ###Code w = 320 h = 240 image = RGBImage(w, h) image.clear(Vec3(0.0,0.0,0.4)) for i in range(5000): position = Vec2(random.randint(0, w - 1), random.randint(0, h - 1)) color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) image.drawPoint(position, color, 1) image.framebuffer() for i in range(100): pos1 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1)) pos2 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1)) color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) image.drawLine(pos1, pos2, color, 2) image.update(fps=30) ###Output _____no_output_____ ###Markdown 1.3 Loading Images ###Code from pyrt.renderer import loadimage image2 = loadimage("data/worldmap/world600.jpg") image2.framebuffer("world") image2.drawCircleFilled(Vec2(300,150), radius=10, color=Vec3(1,0,0), fillcolor=Vec3(1,1,0), size=1) image2.update("world") for i in range(100): pos = Vec2(random.randint(0, image2.width - 1), random.randint(0, image2.height - 1)) radius = random.randint(2,20) color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) fillcolor = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) image2.drawCircleFilled(pos, radius, color, fillcolor, 1) image2.update("world", fps=30) ###Output _____no_output_____ ###Markdown 1.4 Example: Display Recent Earthquakes on Mapdata from USGS: https://earthquake.usgs.gov/earthquakes/feed/v1.0/geojson.php ###Code import requests import json from pyrt.renderer import RGBImage, loadimage from pyrt.math import Vec2, Vec3 data = requests.get("https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_week.geojson") with open("earthquakes.geojson","wb") as file: file.write(data.content) with open("earthquakes.geojson") as json_file: data = json.load(json_file) image3 = loadimage("data/worldmap/world600.jpg") for element in data["features"]: mag = element["properties"]["mag"] coord = element["geometry"]["coordinates"] x = int(image3.width*((coord[0] + 180) / 360)) y = int(image3.height*((coord[1] + 90) / 180)) if mag>4.5: image3.drawCircleFilled(Vec2(x,y), int(mag), Vec3(0,0,0), Vec3(1,0,0), 1) image3 ###Output _____no_output_____ ###Markdown 1.5 Procedural Images ###Code from numba import jit import numpy as np import math from pyrt.renderer import RGBImage, loadimage from pyrt.math import Vec2, Vec3 from pyrt.math import clamp3, cross3, step from pyrt.math import SimplexNoise, TileableNoise from pyrt.math import mod w = 256 h = 256 image = RGBImage(w, h) rgb = Vec3() noise = SimplexNoise() ###Output _____no_output_____ ###Markdown 1.5.1 Stripes and Checkerboard ###Code for x in range(w): for y in range(h): xx = x/w # range [0,1[ yy = y/h # range [0,1[ if mod(6.0*xx, 1.0) < 0.5: # if 6.0*xx % 1.0 < 0.5: rgb[0] = rgb[1] = rgb[2] = 0 else: rgb[0] = rgb[1] = rgb[2] = 255 image.data[y][x][0] = rgb[0] image.data[y][x][1] = rgb[1] image.data[y][x][2] = rgb[2] image for x in range(w): for y in range(h): xx = x/w # range [0,1[ yy = y/h # range [0,1[ if mod(8.0*yy, 1.0) < 0.5: # if 8.0*yy % 1.0 < 0.5: rgb[0] = rgb[1] = rgb[2] = 0 else: rgb[0] = rgb[1] = rgb[2] = 255 image.data[y][x][0] = rgb[0] image.data[y][x][1] = rgb[1] image.data[y][x][2] = rgb[2] image for x in range(w): for y in range(h): xx = x/w # range [0,1[ yy = y/h # range [0,1[ if (mod(4.0*xx, 1.0) < 0.5) ^ (mod(4.0*yy, 1.0) < 0.5): rgb[0] = rgb[1] = rgb[2] = 0 else: rgb[0] = rgb[1] = rgb[2] = 255 image.data[y][x][0] = rgb[0] image.data[y][x][1] = rgb[1] image.data[y][x][2] = rgb[2] image ###Output _____no_output_____ ###Markdown 1.5.2 Mandelbrot ###Code def CreateMandelbrotImage(w=256, h=256, maxiteration=200): image = RGBImage(w, h) rgb = Vec3() for x in range(w): for y in range(h): xx = 2*(x/w-0.5)-0.5 yy = 2*(y/h-0.5) xpos = 0.0 ypos = 0.0 iteration = 0.0 while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration): xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy iteration += 1.0 rgb[0] = (iteration % 20.0) / 20.0 rgb[1] = (iteration % 10.0) / 10.0 rgb[2] = (iteration % 20.0) / 20.0 image.data[y][x][0] = int(255*rgb[0]) image.data[y][x][1] = int(255*rgb[1]) image.data[y][x][2] = int(255*rgb[2]) return image %%time CreateMandelbrotImage(256,256,200) ###Output _____no_output_____ ###Markdown Using Numba to speed up:Please note that pyrt functions can't be used with numba at this time, we create an external numpy array for the framebuffer. ###Code @jit def CreateMandelbrotImageNumba(w=256, h=256, maxiteration=200): imagedata = np.zeros((w, h, 3), dtype=np.uint8) rgb = np.zeros(3, dtype=np.float_) for x in range(w): for y in range(h): xx = 2*(x/w-0.5)-0.5 yy = 2*(y/h-0.5) xpos = 0.0 ypos = 0.0 iteration = 0.0 while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration): xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy iteration += 1.0 rgb[0] = (iteration % 20.0) / 20.0 rgb[1] = (iteration % 10.0) / 10.0 rgb[2] = (iteration % 20.0) / 20.0 imagedata[y][x][0] = int(255*rgb[0]) imagedata[y][x][1] = int(255*rgb[1]) imagedata[y][x][2] = int(255*rgb[2]) return imagedata %%time w = 256 h = 256 data = CreateMandelbrotImageNumba(w,h,200) image = RGBImage(w, h, init_memory=False) image.data = data image ###Output _____no_output_____ ###Markdown 1.5.3 Perlin Noise ###Code for x in range(w): for y in range(h): xx = x/w yy = y/h n = noise.noise2(10*xx,20*yy) rgb[0] = 0 rgb[1] = n*n*256 rgb[2] = abs(n*256) rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255)) image.data[y][x][0] = int(rgb[0]) image.data[y][x][1] = int(rgb[1]) image.data[y][x][2] = int(rgb[2]) image ###Output _____no_output_____ ###Markdown Perlin Noise 3D ###Code z=100 for x in range(w): for y in range(h): xx = x/w yy = y/h n = abs(noise.noise3(5*xx,5*yy,z))*256 if n<=50: rgb[0] = 60 rgb[1] = 0 rgb[2] = 0 else: rgb[0] = 0 rgb[1] = n rgb[2] = n rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255)) image.data[y][x][0] = int(rgb[0]) image.data[y][x][1] = int(rgb[1]) image.data[y][x][2] = int(rgb[2]) image ###Output _____no_output_____ ###Markdown Cumulative Noise ###Code for x in range(w): for y in range(h): xx = x/w yy = y/h n = abs(noise.noise2(3*xx*xx,2*yy*yy))*256 q = abs(noise.noise2(12*xx,12*yy))*256 rgb[0] = n rgb[1] = q rgb[2] = n rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255)) image.data[y][x][0] = int(rgb[0]) image.data[y][x][1] = int(rgb[1]) image.data[y][x][2] = int(rgb[2]) image ###Output _____no_output_____ ###Markdown Welcome to pyRT - The Python Raytracer https://github.com/martinchristen/pyRTThe goal of pyRT is teaching computer graphics.One part of pyrt is the virtual framebuffer where you can draw Pixels using standard algorithms such as Bresenham's line drawing algorithm.From Version 0.5.0 an additional goal is better Jupyter integration, this is now done in RGBImage.Dependencies:This notebook requires pyrt, pillow, numpy, and numba 1. Virtual Framebuffer for Pixel Operations ###Code from pyrt.renderer import RGBImage from pyrt.math import Vec2, Vec3 import random ###Output _____no_output_____ ###Markdown 1.2 Animated Virtual Framebuffer in Jupyter ###Code w = 320 h = 240 image = RGBImage(w, h) image.clear(Vec3(0.0,0.0,0.4)) for i in range(5000): position = Vec2(random.randint(0, w - 1), random.randint(0, h - 1)) color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) image.drawPoint(position, color, 1) image.framebuffer() for i in range(100): pos1 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1)) pos2 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1)) color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) image.drawLine(pos1, pos2, color, 2) image.update(fps=30) ###Output _____no_output_____ ###Markdown 1.3 Loading Images ###Code from pyrt.renderer import loadimage image2 = loadimage("data/worldmap/world600.jpg") image2.framebuffer("world") image2.drawCircleFilled(Vec2(300,150), radius=10, color=Vec3(1,0,0), fillcolor=Vec3(1,1,0), size=1) image2.update("world") for i in range(100): pos = Vec2(random.randint(0, image2.width - 1), random.randint(0, image2.height - 1)) radius = random.randint(2,20) color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) fillcolor = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)) image2.drawCircleFilled(pos, radius, color, fillcolor, 1) image2.update("world", fps=30) ###Output _____no_output_____ ###Markdown 1.4 Example: Display Recent Earthquakes on Mapdata from USGS: https://earthquake.usgs.gov/earthquakes/feed/v1.0/geojson.php ###Code import requests import json from pyrt.renderer import RGBImage, loadimage from pyrt.math import Vec2, Vec3 data = requests.get("https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_week.geojson") with open("earthquakes.geojson","wb") as file: file.write(data.content) with open("earthquakes.geojson") as json_file: data = json.load(json_file) image3 = loadimage("data/worldmap/world600.jpg") for element in data["features"]: mag = element["properties"]["mag"] coord = element["geometry"]["coordinates"] x = int(image3.width*((coord[0] + 180) / 360)) y = int(image3.height*((coord[1] + 90) / 180)) if mag>4.5: image3.drawCircleFilled(Vec2(x,y), int(mag), Vec3(0,0,0), Vec3(1,0,0), 1) image3 ###Output _____no_output_____ ###Markdown 1.5 Procedural Images ###Code from numba import jit import numpy as np import math from pyrt.renderer import RGBImage, loadimage from pyrt.math import Vec2, Vec3 from pyrt.math import clamp3, cross3, step from pyrt.math import SimplexNoise, TileableNoise from pyrt.math import mod w = 256 h = 256 image = RGBImage(w, h) rgb = Vec3() noise = SimplexNoise() ###Output _____no_output_____ ###Markdown 1.5.1 Stripes and Checkerboard ###Code for x in range(w): for y in range(h): xx = x/w # range [0,1[ yy = y/h # range [0,1[ if mod(6.0*xx, 1.0) < 0.5: # if 6.0*xx % 1.0 < 0.5: rgb[0] = rgb[1] = rgb[2] = 0 else: rgb[0] = rgb[1] = rgb[2] = 255 image.data[y][x][0] = rgb[0] image.data[y][x][1] = rgb[1] image.data[y][x][2] = rgb[2] image for x in range(w): for y in range(h): xx = x/w # range [0,1[ yy = y/h # range [0,1[ if mod(8.0*yy, 1.0) < 0.5: # if 8.0*yy % 1.0 < 0.5: rgb[0] = rgb[1] = rgb[2] = 0 else: rgb[0] = rgb[1] = rgb[2] = 255 image.data[y][x][0] = rgb[0] image.data[y][x][1] = rgb[1] image.data[y][x][2] = rgb[2] image for x in range(w): for y in range(h): xx = x/w # range [0,1[ yy = y/h # range [0,1[ if (mod(4.0*xx, 1.0) < 0.5) ^ (mod(4.0*yy, 1.0) < 0.5): rgb[0] = rgb[1] = rgb[2] = 0 else: rgb[0] = rgb[1] = rgb[2] = 255 image.data[y][x][0] = rgb[0] image.data[y][x][1] = rgb[1] image.data[y][x][2] = rgb[2] image ###Output _____no_output_____ ###Markdown 1.5.2 Mandelbrot ###Code def CreateMandelbrotImage(w=256, h=256, maxiteration=200): image = RGBImage(w, h) rgb = Vec3() for x in range(w): for y in range(h): xx = 2*(x/w-0.5)-0.5 yy = 2*(y/h-0.5) xpos = 0.0 ypos = 0.0 iteration = 0.0 while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration): xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy iteration += 1.0 rgb[0] = (iteration % 20.0) / 20.0 rgb[1] = (iteration % 10.0) / 10.0 rgb[2] = (iteration % 20.0) / 20.0 image.data[y][x][0] = int(255*rgb[0]) image.data[y][x][1] = int(255*rgb[1]) image.data[y][x][2] = int(255*rgb[2]) return image %%time CreateMandelbrotImage(256,256,200) ###Output _____no_output_____ ###Markdown Using Numba to speed up:Please note that pyrt functions can't be used with numba at this time, we create an external numpy array for the framebuffer. ###Code @jit def CreateMandelbrotImageNumba(w=256, h=256, maxiteration=200): imagedata = np.zeros((w, h, 3), dtype=np.uint8) rgb = np.zeros(3, dtype=np.float_) for x in range(w): for y in range(h): xx = 2*(x/w-0.5)-0.5 yy = 2*(y/h-0.5) xpos = 0.0 ypos = 0.0 iteration = 0.0 while (xpos*xpos + ypos*ypos < 4) and (iteration < maxiteration): xpos, ypos = xpos*xpos - ypos*ypos + xx, 2.0*xpos*ypos + yy iteration += 1.0 rgb[0] = (iteration % 20.0) / 20.0 rgb[1] = (iteration % 10.0) / 10.0 rgb[2] = (iteration % 20.0) / 20.0 imagedata[y][x][0] = int(255*rgb[0]) imagedata[y][x][1] = int(255*rgb[1]) imagedata[y][x][2] = int(255*rgb[2]) return imagedata %%time w = 256 h = 256 data = CreateMandelbrotImageNumba(w,h,200) image = RGBImage(w, h, init_memory=False) image.data = data image ###Output _____no_output_____ ###Markdown 1.5.3 Perlin Noise ###Code for x in range(w): for y in range(h): xx = x/w yy = y/h n = noise.noise2(10*xx,20*yy) rgb[0] = 0 rgb[1] = n*n*256 rgb[2] = abs(n*256) rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255)) image.data[y][x][0] = int(rgb[0]) image.data[y][x][1] = int(rgb[1]) image.data[y][x][2] = int(rgb[2]) image ###Output _____no_output_____ ###Markdown Perlin Noise 3D ###Code z=100 for x in range(w): for y in range(h): xx = x/w yy = y/h n = abs(noise.noise3(5*xx,5*yy,z))*256 if n<=50: rgb[0] = 60 rgb[1] = 0 rgb[2] = 0 else: rgb[0] = 0 rgb[1] = n rgb[2] = n rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255)) image.data[y][x][0] = int(rgb[0]) image.data[y][x][1] = int(rgb[1]) image.data[y][x][2] = int(rgb[2]) image ###Output _____no_output_____ ###Markdown Cumulative Noise ###Code for x in range(w): for y in range(h): xx = x/w yy = y/h n = abs(noise.noise2(3*xx*xx,2*yy*yy))*256 q = abs(noise.noise2(12*xx,12*yy))*256 rgb[0] = n rgb[1] = q rgb[2] = n rgb = clamp3(rgb, Vec3(0,0,0), Vec3(255,255,255)) image.data[y][x][0] = int(rgb[0]) image.data[y][x][1] = int(rgb[1]) image.data[y][x][2] = int(rgb[2]) image ###Output _____no_output_____
Getting_information_from_stock.ipynb
###Markdown ###Code #Getting information from stock import pandas as pd import numpy as np import yfinance as yf import datetime as dt from pandas_datareader import data as pdr yf.pdr_override() stock=input('Enter a stock ticker symbol: ') print(stock) startyear=2021 startmonth=1 startday=1 start=dt.datetime(startyear, startmonth, startday) now=dt.datetime.now() df=pdr.get_data_yahoo(stock, start, now) print(df) pip install yfinance ###Output _____no_output_____
VGG16_in_Keras.ipynb
###Markdown ###Code !wget --no-check-certificate \ https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \ -O cats_and_dogs_filtered.zip ! unzip cats_and_dogs_filtered.zip import keras,os from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPool2D , Flatten from keras.preprocessing.image import ImageDataGenerator import numpy as np trdata = ImageDataGenerator() traindata = trdata.flow_from_directory(directory="cats_and_dogs_filtered/train",target_size=(224,224)) tsdata = ImageDataGenerator() testdata = tsdata.flow_from_directory(directory="cats_and_dogs_filtered/validation", target_size=(224,224)) model = Sequential() model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Flatten()) model.add(Dense(units=4096,activation="relu")) model.add(Dense(units=4096,activation="relu")) model.add(Dense(units=2, activation="softmax")) from tensorflow.keras.optimizers import Adam # - Works from keras.optimizers import adam_v2 from keras.models import Sequential from keras.layers import Dense from keras.models import model_from_json import numpy import os opt = adam_v2.Adam(learning_rate=0.001 ) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['acc', 'mse']) model.summary() from keras.callbacks import ModelCheckpoint, EarlyStopping checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto') hist = model.fit_generator(steps_per_epoch=10,generator=traindata, validation_data= testdata, validation_steps=10,epochs=10,callbacks=[checkpoint,early]) import matplotlib.pyplot as plt plt.plot(hist.history["acc"]) plt.plot(hist.history['val_acc']) plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title("model accuracy") plt.ylabel("Accuracy") plt.xlabel("Epoch") plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"]) plt.show() from keras.preprocessing import image img = image.load_img("/content/cats_and_dogs_filtered/validation/dogs/dog.2000.jpg",target_size=(224,224)) img = np.asarray(img) plt.imshow(img) img = np.expand_dims(img, axis=0) from keras.models import load_model saved_model = load_model("vgg16_1.h5") output = saved_model.predict(img) if output[0][0] > output[0][1]: print("cat") else: print('dog') ###Output _____no_output_____
cnn_tpu.ipynb
###Markdown Pytorch hands-on (CNN on TPU)Adapted from [here](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) ###Code !rm -r ./log %tensorflow_version 2.x %load_ext tensorboard %tensorboard --logdir ./log import os assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator' os.environ["XLA_USE_BF16"] = "1" ###Output _____no_output_____ ###Markdown Installing Pytorch/XLA ###Code VERSION = "20200325" #@param ["1.5" , "20200325", "nightly"] !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py !python pytorch-xla-env-setup.py --version $VERSION from time import time import torch from torch import nn from torch import optim import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter import torchvision import torchvision.transforms as transforms import torch_xla import torch_xla.core.xla_model as xm ###Output _____no_output_____ ###Markdown Load image data ###Code def get_data(batch_size: int=64): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') return trainloader, testloader, classes ###Output _____no_output_____ ###Markdown CNN model ###Code class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5).bfloat16() self.pool = nn.MaxPool2d(2, 2).bfloat16() self.conv2 = nn.Conv2d(6, 16, 5).bfloat16() self.fc1 = nn.Linear(16 * 5 * 5, 120).bfloat16() self.fc2 = nn.Linear(120, 84).bfloat16() self.fc3 = nn.Linear(84, 10).bfloat16() def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x ###Output _____no_output_____ ###Markdown Define functions Training ###Code def train_tpu(model: nn.Module, trainloader, log_dir: str, device): model.to(device) loss = nn.CrossEntropyLoss() opt = optim.Adam(model.parameters(), lr=0.001) writer = SummaryWriter(log_dir) running_loss = 0.0 prev_time = time() n_minibatches = 0 for epoch in range(4): for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs = data[0].to(device) labels = data[1].to(device) # zero the parameter gradients opt.zero_grad() # forward + backward + optimize outputs = model(inputs) loss_value = loss(outputs, labels) loss_value.backward() # opt.step() # For CPU/GPU xm.optimizer_step(opt, barrier=True) # Note: Cloud TPU-specific code! writer.add_scalar("loss_value", loss_value, n_minibatches) n_minibatches += 1 # print statistics running_loss += loss_value.item() if i % 100 == 99: # print every 100 mini-batches print('[{}, {:5d}] loss: {:.3f}, elapsed time: {:.1f} [sec]'.format( epoch + 1, i + 1, running_loss / 2000, time() - prev_time)) running_loss = 0.0 prev_time = time() ###Output _____no_output_____ ###Markdown Prediction ###Code def evaluate(model: nn.Module, testloader, device): correct = 0 total = 0 with torch.no_grad(): for data in testloader: inputs = data[0].to(device) labels = data[1].to(device) outputs = model(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) ###Output _____no_output_____ ###Markdown Training and evaluation of the model on TPU ###Code trainloader, testloader, classes = get_data() model = Net() dev = xm.xla_device() train_tpu(model, trainloader, "./log/2", dev) evaluate(model, testloader, dev) model.to("cpu") torch.save({ "model": model.state_dict(), }, "./model_tpu.pt") ###Output _____no_output_____ ###Markdown Load trained model ###Code trainloader, testloader, classes = get_data() model = Net() dev = xm.xla_device() checkpoint = torch.load("./model_tpu.pt") model.load_state_dict(checkpoint["model"]) model.to(dev) evaluate(model, testloader, dev) ###Output _____no_output_____
notebooks/Logistic_Regression.ipynb
###Markdown Logistic Regression ###Code import pandas as pd import numpy as np import seaborn as sns from matplotlib import pyplot as plt import warnings from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn import metrics import seaborn as sn import matplotlib.pyplot as plt import statsmodels.api as sm from sklearn.feature_selection import RFE warnings.filterwarnings('ignore') results = pd.DataFrame(columns=["data", "regularization", "package", "accuracy", "precision", "recall", "f1", "rSquared", "AUC"]) # store results hcc_median = pd.read_csv('../data/raw/median.csv') hcc_mean = pd.read_csv('../data/raw/mean.csv') hcc_mode = pd.read_csv('../data/raw/mode.csv') hcc_iterative = pd.read_csv('../data/raw/iterative.csv') def get_data(data_name): if data_name == 'median': data = hcc_median elif data_name == 'mean': data = hcc_mean elif data_name == 'mode': data = hcc_mode else: data = hcc_iterative X = data.drop(['Class'], axis=1) # get independent variable y = data['Class'] # get dependent variable # split data 70% to 30% X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) return X_train, X_test, y_train, y_test ###Output _____no_output_____ ###Markdown Feature Selection using Recursive Feature Elimination(RFE) ###Code def feature_selection(X_train, X_test, y_train): model = LogisticRegression() #rfe = RFECV(estimator=model, step=1, cv=7) rfe = RFE(estimator=model, n_features_to_select = 35, step=1) rfe = rfe.fit(X_train, y_train) columns = X_train.columns[rfe.support_] X_train = rfe.transform(X_train) X_test = rfe.transform(X_test) X_train = pd.DataFrame(X_train, columns = columns) X_test = pd.DataFrame(X_test, columns = columns) return X_train, X_test, y_train ### R^2 for SkLearn def full_log_likelihood(w, X, y): score = np.dot(X, w).reshape(1, X.shape[0]) return np.sum(-np.log(1 + np.exp(score))) + np.sum(y * score) def null_log_likelihood(w, X, y): z = np.array([w if i == 0 else 0.0 for i, w in enumerate(w.reshape(1, X.shape[1])[0])]).reshape(X.shape[1], 1) score = np.dot(X, z).reshape(1, X.shape[0]) return np.sum(-np.log(1 + np.exp(score))) + np.sum(y * score) def mcfadden_rsquare(w, X, y): return 1.0 - (full_log_likelihood(w, X, y) / null_log_likelihood(w, X, y)) def mcfadden_adjusted_rsquare(w, X, y): k = float(X.shape[1]) return 1.0 - ((full_log_likelihood(w, X, y) - k) / null_log_likelihood(w, X, y)) ###Output _____no_output_____ ###Markdown Using StatsModels ###Code data_list = ['mean', 'mode', 'median', 'iterative'] for data in data_list: X_train, X_test, y_train, y_test = get_data(data) X_train, X_test, y_train = feature_selection(X_train, X_test, y_train) print('\n') print(data.upper(), ' IMPUTED DATASET') ## run logistic regression using stat models logistic_sm = sm.Logit(y_train.values.reshape(-1,1), X_train).fit() print(logistic_sm.summary()) y_pred = logistic_sm.predict(X_test) y_pred = (y_pred >= 0.5).astype(int).to_numpy() print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred))) print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred))) print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred))) confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted']) sn.heatmap(confusion_matrix, annot=True) plt.show() ## save data for comparison results = results.append(pd.DataFrame([{"data" : data, "regularization": "default", "package": "StatsModels", "accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2), "precision": np.round(metrics.precision_score(y_test, y_pred), 2), "recall": np.round(metrics.recall_score(y_test, y_pred), 2), "f1": np.round(metrics.f1_score(y_test, y_pred), 2), "rSquared": np.round(logistic_sm.prsquared, 2), "AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True) ###Output MEAN IMPUTED DATASET Optimization terminated successfully. Current function value: 0.228057 Iterations 10 Logit Regression Results ============================================================================== Dep. Variable: y No. Observations: 115 Model: Logit Df Residuals: 80 Method: MLE Df Model: 34 Date: Mon, 26 Oct 2020 Pseudo R-squ.: 0.6525 Time: 13:39:36 Log-Likelihood: -26.227 converged: True LL-Null: -75.482 Covariance Type: nonrobust LLR p-value: 3.406e-08 ================================================================================================== coef std err z P>|z| [0.025 0.975] -------------------------------------------------------------------------------------------------- Gender 3.6851 2.226 1.655 0.098 -0.678 8.048 Symptoms -2.9455 1.445 -2.039 0.041 -5.777 -0.114 Alcohol 2.6879 1.788 1.504 0.133 -0.816 6.192 Hepatitis B Surface Antigen -1.2358 2.347 -0.527 0.598 -5.836 3.364 Hepatitis B Core Antibody 1.9883 1.847 1.077 0.282 -1.632 5.608 Hepatitis C Virus Antibody -1.6356 1.607 -1.018 0.309 -4.786 1.514 Cirrhosis -4.3147 2.956 -1.460 0.144 -10.107 1.478 Endemic Countries 9.2102 7.797 1.181 0.238 -6.072 24.492 Smoking -0.8083 1.822 -0.444 0.657 -4.380 2.763 Diabetes -4.4885 1.780 -2.521 0.012 -7.978 -0.999 Obesity 3.5196 2.656 1.325 0.185 -1.686 8.726 Arterial Hypertension 5.1655 2.149 2.404 0.016 0.954 9.377 Chronic Renal Insufficiency -1.3153 1.600 -0.822 0.411 -4.451 1.821 Nonalcoholic Steatohepatitis 2.2509 3.333 0.675 0.499 -4.281 8.783 Esophageal Varices 3.0143 2.482 1.215 0.224 -1.849 7.878 Splenomegaly -0.3097 1.416 -0.219 0.827 -3.085 2.466 Portal Vein Thrombosis -0.2625 1.267 -0.207 0.836 -2.745 2.220 Liver Metastasis -0.8768 1.342 -0.653 0.514 -3.507 1.754 Age at diagnosis -0.1460 0.073 -1.995 0.046 -0.289 -0.003 Performance Status -0.0389 0.567 -0.069 0.945 -1.150 1.073 Encefalopathy degree 0.4672 1.609 0.290 0.772 -2.687 3.621 Ascites degree -2.7644 1.396 -1.980 0.048 -5.501 -0.027 International Normalised Ratio -5.3986 2.570 -2.100 0.036 -10.437 -0.361 Haemoglobin 0.0915 0.227 0.404 0.686 -0.353 0.536 Mean Corpuscular Volume 0.1796 0.087 2.059 0.040 0.009 0.351 Albumin 1.0345 1.028 1.006 0.314 -0.981 3.050 Total Bilirubin 0.3828 0.463 0.827 0.408 -0.524 1.290 Alanine transaminase 0.0488 0.022 2.205 0.027 0.005 0.092 Aspartate transaminase -0.0446 0.018 -2.513 0.012 -0.079 -0.010 Total Proteins -0.0206 0.084 -0.245 0.806 -0.185 0.144 Creatinine 0.8390 0.734 1.142 0.253 -0.601 2.279 Major dimension of nodule -0.1951 0.131 -1.493 0.136 -0.451 0.061 Direct Bilirubin -0.6758 0.684 -0.989 0.323 -2.016 0.664 Iron 0.0775 0.039 2.004 0.045 0.002 0.153 Oxygen Saturation -0.1447 0.073 -1.970 0.049 -0.289 -0.001 ================================================================================================== Possibly complete quasi-separation: A fraction 0.18 of observations can be perfectly predicted. This might indicate that there is complete quasi-separation. In this case some parameters will not be identified. Accuracy (mean): 0.72 Precision (mean): 0.78 Recall (mean): 0.72 ###Markdown Using ScikitLearn ###Code data_list = ['mean', 'mode', 'median', 'iterative'] for data in data_list: X_train, X_test, y_train, y_test = get_data(data) X_train, X_test, y_train = feature_selection(X_train, X_test, y_train) print('\n') print(data.upper(), ' IMPUTED DATASET') ## run logistic regression using sklearn logistic = LogisticRegression(fit_intercept=False) logistic = logistic.fit(X_train,y_train) y_pred = logistic.predict_proba(X_test)[::, 1] y_pred = (y_pred >= 0.5).astype(int) w = np.array(logistic.coef_).transpose() # printing values = np.append(logistic.intercept_, logistic.coef_) # get the names of the values names = np.append('intercept', X_train.columns) table_ = pd.DataFrame(values, index = names, columns=['coef']) table_['exp_coef'] = np.exp(table_['coef']) print(table_) print('\n') print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred))) print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred))) print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred))) confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted']) sn.heatmap(confusion_matrix, annot=True) plt.show() y_pred_proba = logistic.predict_proba(X_test)[::,1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) auc = metrics.roc_auc_score(y_test, y_pred_proba) plt.plot(fpr,tpr,label="data, auc="+str(auc)) plt.legend(loc=4) plt.show() ## save data for comparison results = results.append(pd.DataFrame([{"data" : data, "regularization": "default", "package": "SkLearn", "accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2), "precision": np.round(metrics.precision_score(y_test, y_pred), 2), "recall": np.round(metrics.recall_score(y_test, y_pred), 2), "f1": np.round(metrics.f1_score(y_test, y_pred), 2), "rSquared": np.round(mcfadden_rsquare(w, X_test, y_pred), 2), "AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True) ###Output MEAN IMPUTED DATASET coef exp_coef intercept 0.000000 1.000000 Gender 0.431590 1.539703 Symptoms -0.586055 0.556518 Alcohol 0.385811 1.470806 Hepatitis B Surface Antigen 0.516934 1.676879 Hepatitis B Core Antibody 0.204852 1.227344 Hepatitis C Virus Antibody -0.481773 0.617687 Cirrhosis -0.086655 0.916994 Endemic Countries 0.204813 1.227295 Smoking 0.385643 1.470559 Diabetes -0.550968 0.576392 Obesity 0.169974 1.185274 Arterial Hypertension 0.807599 2.242516 Chronic Renal Insufficiency -0.131133 0.877101 Nonalcoholic Steatohepatitis 0.063077 1.065108 Esophageal Varices -0.150364 0.860395 Splenomegaly -0.071636 0.930870 Portal Vein Thrombosis -0.307422 0.735340 Liver Metastasis -0.309711 0.733659 Age at diagnosis -0.027569 0.972807 Performance Status -0.507842 0.601793 Encefalopathy degree -0.263032 0.768717 Ascites degree -0.560014 0.571201 International Normalised Ratio -0.841142 0.431218 Haemoglobin 0.061740 1.063686 Mean Corpuscular Volume 0.049255 1.050488 Albumin 0.169465 1.184671 Total Bilirubin 0.136933 1.146751 Alanine transaminase 0.018462 1.018633 Aspartate transaminase -0.015600 0.984521 Total Proteins -0.029252 0.971172 Creatinine -0.164660 0.848182 Major dimension of nodule -0.162438 0.850069 Direct Bilirubin -0.376935 0.685960 Iron 0.045785 1.046849 Oxygen Saturation -0.067500 0.934728 Accuracy (mean): 0.74 Precision (mean): 0.75 Recall (mean): 0.83 ###Markdown Regularizations Using StatsModels ###Code data_list = ['mean', 'mode', 'median', 'iterative'] for data in data_list: X_train, X_test, y_train, y_test = get_data(data) X_train, X_test, y_train = feature_selection(X_train, X_test, y_train) for i in [0, 1]: print('\n') print(data.upper(), ' IMPUTED DATASET using ', 'Lasso' if i == 1 else 'Ridge') ## run logistic regression using stat models logistic_sm = sm.Logit(y_train.values.reshape(-1,1), X_train).fit_regularized(L1_wt = i) # if L1_wt = 1, Lasso: 0, Ridge print(logistic_sm.summary()) y_pred = logistic_sm.predict(X_test) y_pred = (y_pred >= 0.5).astype(int).to_numpy() print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred))) print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred))) print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred))) confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted']) sn.heatmap(confusion_matrix, annot=True) plt.show() ## save data for comparison results = results.append(pd.DataFrame([{"data" : data, "regularization": 'Lasso' if i == 1 else 'Ridge', "package": "StatsModels", "accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2), "precision": np.round(metrics.precision_score(y_test, y_pred), 2), "recall": np.round(metrics.recall_score(y_test, y_pred), 2), "f1": np.round(metrics.f1_score(y_test, y_pred), 2), "rSquared": np.round(logistic_sm.prsquared, 2), "AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True) ###Output MEAN IMPUTED DATASET using Ridge Optimization terminated successfully. (Exit mode 0) Current function value: 0.2280574133501607 Iterations: 210 Function evaluations: 232 Gradient evaluations: 210 Logit Regression Results ============================================================================== Dep. Variable: y No. Observations: 115 Model: Logit Df Residuals: 80 Method: MLE Df Model: 34 Date: Mon, 26 Oct 2020 Pseudo R-squ.: 0.6525 Time: 13:39:41 Log-Likelihood: -26.227 converged: True LL-Null: -75.482 Covariance Type: nonrobust LLR p-value: 3.406e-08 ================================================================================================== coef std err z P>|z| [0.025 0.975] -------------------------------------------------------------------------------------------------- Gender 3.6844 2.226 1.655 0.098 -0.678 8.047 Symptoms -2.9454 1.445 -2.039 0.041 -5.777 -0.114 Alcohol 2.6880 1.788 1.504 0.133 -0.816 6.192 Hepatitis B Surface Antigen -1.2357 2.347 -0.527 0.599 -5.836 3.364 Hepatitis B Core Antibody 1.9882 1.847 1.076 0.282 -1.632 5.608 Hepatitis C Virus Antibody -1.6360 1.607 -1.018 0.309 -4.786 1.514 Cirrhosis -4.3147 2.955 -1.460 0.144 -10.107 1.478 Endemic Countries 9.2101 7.797 1.181 0.238 -6.072 24.492 Smoking -0.8079 1.822 -0.443 0.658 -4.380 2.764 Diabetes -4.4881 1.780 -2.521 0.012 -7.977 -0.999 Obesity 3.5197 2.656 1.325 0.185 -1.686 8.726 Arterial Hypertension 5.1649 2.149 2.404 0.016 0.953 9.376 Chronic Renal Insufficiency -1.3154 1.600 -0.822 0.411 -4.451 1.821 Nonalcoholic Steatohepatitis 2.2512 3.333 0.676 0.499 -4.280 8.783 Esophageal Varices 3.0142 2.482 1.215 0.224 -1.849 7.878 Splenomegaly -0.3094 1.416 -0.218 0.827 -3.085 2.466 Portal Vein Thrombosis -0.2630 1.266 -0.208 0.836 -2.745 2.219 Liver Metastasis -0.8767 1.342 -0.653 0.514 -3.507 1.754 Age at diagnosis -0.1460 0.073 -1.995 0.046 -0.289 -0.003 Performance Status -0.0389 0.567 -0.069 0.945 -1.150 1.073 Encefalopathy degree 0.4670 1.609 0.290 0.772 -2.687 3.621 Ascites degree -2.7641 1.396 -1.980 0.048 -5.501 -0.027 International Normalised Ratio -5.3983 2.570 -2.100 0.036 -10.436 -0.360 Haemoglobin 0.0915 0.227 0.404 0.686 -0.353 0.536 Mean Corpuscular Volume 0.1795 0.087 2.058 0.040 0.009 0.351 Albumin 1.0346 1.028 1.006 0.314 -0.981 3.050 Total Bilirubin 0.3827 0.463 0.827 0.408 -0.524 1.290 Alanine transaminase 0.0488 0.022 2.205 0.027 0.005 0.092 Aspartate transaminase -0.0446 0.018 -2.513 0.012 -0.079 -0.010 Total Proteins -0.0206 0.084 -0.245 0.806 -0.185 0.144 Creatinine 0.8390 0.734 1.142 0.253 -0.601 2.278 Major dimension of nodule -0.1951 0.131 -1.493 0.136 -0.451 0.061 Direct Bilirubin -0.6757 0.684 -0.989 0.323 -2.015 0.664 Iron 0.0775 0.039 2.004 0.045 0.002 0.153 Oxygen Saturation -0.1447 0.073 -1.970 0.049 -0.289 -0.001 ================================================================================================== Possibly complete quasi-separation: A fraction 0.18 of observations can be perfectly predicted. This might indicate that there is complete quasi-separation. In this case some parameters will not be identified. Accuracy (mean): 0.72 Precision (mean): 0.78 Recall (mean): 0.72 ###Markdown Using SkLearn ###Code data_list = ['mean', 'mode', 'median', 'iterative'] for data in data_list: X_train, X_test, y_train, y_test = get_data(data) X_train, X_test, y_train = feature_selection(X_train, X_test, y_train) penalties = ['l1', 'l2', 'elasticnet'] for penalty in penalties: if penalty == 'l1': solver = 'liblinear' name = 'Lasso' l1_ratio = None multi_class = 'auto' elif penalty == 'l2': solver = 'lbfgs' name = 'Ridge' l1_ratio = None multi_class = 'auto' elif penalty == 'elasticnet': solver='saga' name = 'ElasticNet' l1_ratio = 0.5 multi_class = 'ovr' print('\n') print(data.upper(), ' IMPUTED DATASET using ', name) ## run logistic regression using sklearn logistic = LogisticRegression(fit_intercept=False, penalty=penalty, solver=solver, multi_class=multi_class, l1_ratio = l1_ratio) logistic = logistic.fit(X_train,y_train) y_pred = logistic.predict_proba(X_test)[::, 1] y_pred = (y_pred >= 0.5).astype(int) w = np.array(logistic.coef_).transpose() # printing values = np.append(logistic.intercept_, logistic.coef_) # get the names of the values names = np.append('intercept', X_train.columns) table_ = pd.DataFrame(values, index = names, columns=['coef']) table_['exp_coef'] = np.exp(table_['coef']) print(table_) print('\n') print("Accuracy ({}): {:.2f}".format(data, metrics.accuracy_score(y_test, y_pred))) print("Precision ({}): {:.2f}".format(data, metrics.precision_score(y_test, y_pred))) print("Recall ({}): {:.2f}".format(data, metrics.recall_score(y_test, y_pred))) confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted']) sn.heatmap(confusion_matrix, annot=True) plt.show() y_pred_proba = logistic.predict_proba(X_test)[::,1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) auc = metrics.roc_auc_score(y_test, y_pred_proba) plt.plot(fpr,tpr,label="data, auc="+str(auc)) plt.legend(loc=4) plt.show() ## save data for comparison results = results.append(pd.DataFrame([{"data" : data, "regularization": name, "package": "SkLearn", "accuracy": np.round(metrics.accuracy_score(y_test, y_pred), 2), "precision": np.round(metrics.precision_score(y_test, y_pred), 2), "recall": np.round(metrics.recall_score(y_test, y_pred), 2), "f1": np.round(metrics.f1_score(y_test, y_pred), 2), "rSquared": np.round(mcfadden_rsquare(w, X_test, y_pred), 2), "AUC": np.round(metrics.roc_auc_score(y_test, y_pred), 2)}]), ignore_index=True) results_sklearn = results[results.package == 'SkLearn'] final_sklearn = results_sklearn.pivot(index=['data', 'regularization'], columns="package", values=['accuracy', 'precision', 'recall', 'f1', 'rSquared', 'AUC']) final_sklearn.columns = final_sklearn.columns.swaplevel(0,1) final_sklearn results_statsmodels = results[results.package == 'StatsModels'] final_statsmodels = results_statsmodels.pivot(index=['data', 'regularization'], columns="package", values=['accuracy', 'precision', 'recall', 'f1', 'rSquared', 'AUC']) final_statsmodels.columns = final_statsmodels.columns.swaplevel(0,1) final_statsmodels ###Output _____no_output_____ ###Markdown Обучение базовой модели логистической регрессии ###Code import pandas as pd import numpy as np import os import pickle from sklearn.linear_model import SGDClassifier from sklearn.preprocessing import StandardScaler from google.colab import drive drive.mount('/content/drive') os.chdir('/content/drive/Shared drives/Кредитные риски') from CreditRisks.metrics_library.Metrics import * from CreditRisks.metrics_library.rosstat_utils import * DIR_OUT='ReadyModels/SGD_final/' ###Output _____no_output_____ ###Markdown Считывание данных ###Code df = pd.read_csv('Датасеты/revision_003/companies_ready_train.csv', dtype=RESULT_DTYPES) y_train = df['target'] X_train = df.drop(columns=['inn', 'year_-1', 'year_0', 'target']) df.head() df1 = pd.read_csv('Датасеты/revision_003/companies_ready_test.csv', dtype=RESULT_DTYPES) y_test = df1['target'] X_test = df1.drop(columns=['inn', 'year_-1', 'year_0', 'target']) df1.head() ###Output _____no_output_____ ###Markdown Предобработка данных Добавление новых признаков ###Code methodCols = ['financialDebt', 'CreditLeverage', 'FinancialIndependence', 'DebtBurden', 'CoverageDebtWithAccumulatedProfit', 'ReturnAssetsNetProfit', 'ReturnAssetsOperatingProfit', 'OperatingMargin', 'NetProfitMargin', 'LiabilityCoverageOperatingProfit', 'OperatingProfitFinancialDebtRatio', 'FinancialDebtRevenueRatio', 'CurrentLiquidity', 'QuickLiquidity', 'InstantLiquidity', 'LevelOfOperatingAssets', 'turnoverDebtorDebt', 'turnoverReserves', 'turnoverCreditDebt', 'FinancialCycle', 'AssetTurnover'] def addFeatures(X:pd.Series)->pd.Series: X['financialDebt'] = X['year_0_15003'] + X['year_0_14003'] + X['year_0_12503'] financialDebt = X['financialDebt'] X['CreditLeverage'] = X['year_0_13003'] / X['year_0_15003'] X['FinancialIndependence'] = X['year_0_13003'] / X['year_0_16003'] X['DebtBurden'] = financialDebt / X['year_0_16003'] X['CoverageDebtWithAccumulatedProfit'] = X['year_0_13003'] / financialDebt X['ReturnAssetsNetProfit'] = X['year_0_24003'] / X['year_0_16003'] X['ReturnAssetsOperatingProfit'] = X['year_0_22003'] / X['year_0_16003'] X['OperatingMargin'] = X['year_0_22003'] / pd.concat([X['year_0_21103'], financialDebt], axis=1).max(axis=1) X['NetProfitMargin'] = X['year_0_24003'] / pd.concat([X['year_0_21103'], financialDebt], axis=1).max(axis=1) # impotant X['LiabilityCoverageOperatingProfit'] = X['year_0_22003'] / (X['year_0_14003'] + X['year_0_15003']) X['OperatingProfitFinancialDebtRatio'] = X['year_0_22003'] / financialDebt X['FinancialDebtRevenueRatio'] = financialDebt / X['year_0_21103'] # impotant X['CurrentLiquidity'] = X['year_0_12003'] / X['year_0_15003'] X['QuickLiquidity'] = (X['year_0_12003'] - X['year_0_12103']) / X['year_0_15003'] X['InstantLiquidity'] = X['year_0_12503'] / X['year_0_15003'] # impotant X['LevelOfOperatingAssets'] = (X['year_0_12103'] + X['year_0_12303'] - X['year_0_15203']) / X['year_0_21103'] X['turnoverDebtorDebt'] = 365 * (X['year_0_12303'] + X['year_0_12304']) / (2 * X['year_0_21103']) X['turnoverReserves'] = 365 * (X['year_0_12103'] + X['year_0_12104']) / (2 * X['year_0_21103']) X['turnoverCreditDebt'] = 365 * (X['year_0_15203'] + X['year_0_15204']) / (2 * X['year_0_21103']) X['FinancialCycle'] = X['turnoverDebtorDebt'] + X['turnoverReserves'] - X['turnoverCreditDebt'] X['AssetTurnover'] = X['year_0_21103'] / X['year_0_16003'] for col in methodCols: m = X.loc[X[col] != np.inf, col].max() X[col].replace(np.inf,m,inplace=True) _m = X.loc[X[col] != -np.inf, col].min() X[col].replace(-np.inf,_m,inplace=True) X[col].replace(np.nan,0,inplace=True) return X X_train = addFeatures(X_train) X_test = addFeatures(X_test) ###Output _____no_output_____ ###Markdown Стандартизация, винзоризация и выброс категориальных признаков ###Code class Winsorizator(): def __init__(self, left, right): assert 0 <= left <= 1 and 0 <= right <= 1 self.left = left self.right = right self.data = None def fit(self, X:pd.DataFrame): self.data = X.quantile([self.left, self.right], axis=0) def transform(self, X:pd.DataFrame): X.clip(self.data.iloc[0], self.data.iloc[1], axis='columns', inplace=True) def fit_transform(self, X): self.fit(X) return self.transform(X) categorical_all = ['region','year_-1_okopf', 'year_-1_okfs', 'year_-1_okved', 'year_-1_type', 'year_0_okopf', 'year_0_okfs', 'year_0_okved', 'year_0_type'] sc = StandardScaler() winz = Winsorizator(0.3, 0.7) X_train_1 = X_train.drop(columns=categorical_all).astype(np.float32) winz.fit_transform(X_train_1) X_train_1 = pd.DataFrame(sc.fit_transform(X_train_1), columns=X_train_1.columns, index=X_train_1.index) X_test_1 = X_test.drop(columns=categorical_all).astype(np.float32) winz.transform(X_test_1) X_test_1 = pd.DataFrame(sc.transform(X_test_1), columns=X_test_1.columns, index=X_test_1.index) with open(f'{DIR_OUT}StandardScaler.pkl','wb') as f: pickle.dump(sc, f) with open(f'{DIR_OUT}Winsorizator.pkl','wb') as f: pickle.dump(winz, f) ###Output _____no_output_____ ###Markdown Обучение ###Code lr = SGDClassifier(random_state=42, loss='log', verbose=1, max_iter=1000).fit(X_train_1, y_train) with open(f'{DIR_OUT}SGD_model.pkl','wb') as f: pickle.dump(lr, f) ###Output _____no_output_____ ###Markdown Результаты ###Code predict = lr.predict_proba(X_test_1)[:,1] import CreditRisks.metrics_library.profits as metric plt_roc(y_test, predict) plt_pr(y_test, predict) metric.plt_profit_2_experimental(y_test, predict, percent_space=[0.10, 0.15, 0.20, 0.25, 0.35], title='Алгоритм SGD') metric.plt_popularity(predict, title='Алгоритм SGD') ###Output _____no_output_____ ###Markdown My version ###Code # First, get the regression coefficents coeffs = [] n = X_train.shape[0] for i in range(X_train.shape[1]): p = n*(X_train[:, i]**2).sum() - X_train[:, i].sum()**2 a = ( n*(X_train[:, i]*y_train).sum() - X_train[:, i].sum()*y_train.sum() ) / p b = ( y_train.sum()*(X_train[:, i]**2).sum() - X_train[:, i].sum()*(X_train[:, i]*y_train).sum() ) / p coeffs.append([a, b]) def predict(X_test): ret = 1 / ( 1 + np.exp(sum(-a*X_test[:, i] - b for i in range(X_test.shape[1]))) ) ret[ret > .5] = 1 ret[ret <= .5] = 0 return ret ###Output _____no_output_____
ModelTrain_Data.ipynb
###Markdown Importing relevant packages for data processing. ###Code import numpy as np from random import randint from sklearn.utils import shuffle from sklearn.preprocessing import MinMaxScaler train_labels = [] train_samples = [] ###Output _____no_output_____ ###Markdown Data creationCreating our own example data set. *As motivation for this data, let's assume that an experimental drug was tested on individuals ranging from age 13 to 100 in a clinical trial. The trial had 2100 partcipants. Half of the participants were under 65 years old, and the other half was 65 years of age or older.* GoalUltimately, we want to build a model to tell us whether or not a patient will experience side effects solely based on the patient's age. ###Code for i in range(50): # The ~5% of younger individuals who experienced side effects random_younger = randint(13,64) train_samples.append(random_younger) train_labels.append(1) # The ~5% of older individuals who did not experience side effects random_older = randint(65,100) train_samples.append(random_older) train_labels.append(0) for i in range(1000): # The 95% of younger individuals who did not experience side effects. random_younger = randint(13,64) train_samples.append(random_younger) train_labels.append(0) # The 95% of older individuals who did experience side effects. random_older = randint(65,100) train_samples.append(random_older) train_labels.append(1) ###Output _____no_output_____ ###Markdown Data processingNow we convert both lists into numpy arrays. ###Code train_labels = np.array(train_labels) train_samples = np.array(train_samples) train_labels, train_samples = shuffle(train_labels, train_samples) ###Output _____no_output_____ ###Markdown **We'll use scikit-learn's MinMaxScaler class to scale all the data down from a scale ranging from 13 to 100 to be on a scale from 0 to 1** ###Code scaler = MinMaxScaler(feature_range=(0,1)) scaled_train_samples = scaler.fit_transform(train_samples.reshape(-1,1)) x_val = scaled_train_samples[0:101] y_val = train_labels[0:101] valid_set = (x_val, y_val) #for i in scaled_train_samples: # print(i) ###Output _____no_output_____
study_roadmaps/2_transfer_learning_roadmap/5_exploring_model_families/2_vgg/1.1) Intro to vgg network - mxnet backend.ipynb
###Markdown Goals Train a architectural heritage site classifier using vgg16 Understand what lies inside vgg network What is vgg Readings on vgg 1) Points from https://towardsdatascience.com/vgg-neural-networks-the-next-step-after-alexnet-3f91fa9ffe2c - VGG addresses another very important aspect of CNNs: depth - All of VGG’s hidden layers use ReLU - Unlike 11x11 kernels of alexnet, it uses smaller ones 1x1 and 3x3 kernels 2) Points from https://becominghuman.ai/what-is-the-vgg-neural-network-a590caa72643 - Intuitively, more layer is better. However, the authors found that VGG-16 is better than VGG-19 - Authors introduce multi-scale evaluationin the paper 3) Read more here - - https://arxiv.org/abs/1409.1556 - https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ - https://www.cs.toronto.edu/~frossard/post/vgg16/ - https://d2l.ai/chapter_convolutional-modern/vgg.html Table of Contents [Install](0) [Load experiment with vgg base architecture](1) [Visualize vgg](2) [Train the classifier](3) [Run inference on trained classifier](4) Install Monk Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` Install Monk Manually (Not recommended) Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` Dataset - Architectural Heritage site Classification - https://old.datahub.io/dataset/architectural-heritage-elements-image-dataset ###Code ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC" -O architectural_heritage.zip && rm -rf /tmp/cookies.txt ! unzip -qq architectural_heritage.zip ###Output _____no_output_____ ###Markdown Imports ###Code #Using mxnet-gluon backend # When installed using pip from monk.gluon_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.gluon_prototype import prototype ###Output _____no_output_____ ###Markdown Load experiment with vgg base architecture Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it ###Code gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro"); ###Output Mxnet Version: 1.5.1 Experiment Details Project: Project Experiment: vgg-intro Dir: /home/ubuntu/Desktop/monk_pip_test/monk_v1/study_roadmaps/2_transfer_learning_roadmap/5_exploring_model_families/2_vgg/workspace/Project/vgg-intro/ ###Markdown This creates files and directories as per the following structure workspace | |--------Project | | |-----vgg-intro | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) Set dataset and select the model Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs Sample Dataset folder structure architectural_heritage | |-----train |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) | | |-----val |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) ###Code gtf.Default(dataset_path="architectural_heritage/train", model_name="vgg16", freeze_base_network=False, num_epochs=5); ###Output Dataset Details Train path: architectural_heritage/train Val path: None CSV train path: None CSV val path: None Label Type: single Dataset Params Input Size: 224 Batch Size: 4 Data Shuffle: True Processors: 8 Train-val split: 0.7 Pre-Composed Train Transforms [{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}] Pre-Composed Val Transforms [{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}] Dataset Numbers Num train images: 7164 Num val images: 3071 Num classes: 10 Model Params Model name: vgg16 Use Gpu: True Use pretrained: True Freeze base network: False Model Details Loading pretrained model Downloading /home/ubuntu/.mxnet/models/vgg16-e660d456.zip from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/models/vgg16-e660d456.zip... ###Markdown From the summary above - Model Params Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Visualize vgg ###Code gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082); ###Output Using Netron To Visualize Not compatible on kaggle Compatible only for Jupyter Notebooks Serving 'model-symbol.json' at http://localhost:8082 ###Markdown vgg block - 1 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg_block1_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This block has 3 layers - conv -> relu vgg block - 2 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg_block2_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This block has 3 layers - conv -> relu -> max_pool vgg fully connected chain ###Code from IPython.display import Image Image(filename='imgs/vgg_block_fc_mxnet.png') ###Output _____no_output_____ ###Markdown vgg Network - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg16_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This network - has 9 type-1 blocks - has 5 type-2 blocks - post these blocks the type-3 (fc) block exists Train the classifier ###Code #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ###Output Training Start Epoch 1/5 ---------- ###Markdown Run inference on trained classifier ###Code gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro", eval_infer=True); output = gtf.Infer(img_name = "architectural_heritage/test/test1.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test1.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test2.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test2.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test3.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test3.jpg') ###Output Prediction Image name: architectural_heritage/test/test3.jpg Predicted class: dome(outer) Predicted score: 0.999998152256012 ###Markdown Goals Train a architectural heritage site classifier using vgg16 Understand what lies inside vgg network What is vgg Readings on vgg 1) Points from https://towardsdatascience.com/vgg-neural-networks-the-next-step-after-alexnet-3f91fa9ffe2c - VGG addresses another very important aspect of CNNs: depth - All of VGG’s hidden layers use ReLU - Unlike 11x11 kernels of alexnet, it uses smaller ones 1x1 and 3x3 kernels 2) Points from https://becominghuman.ai/what-is-the-vgg-neural-network-a590caa72643 - Intuitively, more layer is better. However, the authors found that VGG-16 is better than VGG-19 - Authors introduce multi-scale evaluationin the paper 3) Read more here - - https://arxiv.org/abs/1409.1556 - https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ - https://www.cs.toronto.edu/~frossard/post/vgg16/ - https://d2l.ai/chapter_convolutional-modern/vgg.html Table of Contents [0. Install](0) [1. Load experiment with vgg base architecture](1) [2. Visualize vgg](2) [3. Train the classifier](3) [4. Run inference on trained classifier](5) Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version) ###Code !git clone https://github.com/Tessellate-Imaging/monk_v1.git # If using Colab install using the commands below !cd monk_v1/installation/Misc && pip install -r requirements_colab.txt # If using Kaggle uncomment the following command #!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt # Select the requirements file as per OS and CUDA version when using a local system or cloud #!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt ###Output _____no_output_____ ###Markdown Dataset - Architectural Heritage site Classification - https://old.datahub.io/dataset/architectural-heritage-elements-image-dataset ###Code ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC" -O architectural_heritage.zip && rm -rf /tmp/cookies.txt ! unzip -qq architectural_heritage.zip ###Output _____no_output_____ ###Markdown Imports ###Code # Monk import os import sys sys.path.append("monk_v1/monk/"); #Using mxnet-gluon backend from gluon_prototype import prototype ###Output _____no_output_____ ###Markdown Load experiment with vgg base architecture Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it ###Code gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro"); ###Output Mxnet Version: 1.5.0 Experiment Details Project: Project Experiment: vgg-intro Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.0_blocks/study_roadmap/change_post_num_layers/6_transfer_learning_model_params/1_exploring_model_families/2_vgg/workspace/Project/vgg-intro/ ###Markdown This creates files and directories as per the following structure workspace | |--------Project | | |-----vgg-intro | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) Set dataset and select the model Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs Sample Dataset folder structure architectural_heritage | |-----train |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) | | |-----val |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) ###Code gtf.Default(dataset_path="architectural_heritage/train", model_name="vgg16", freeze_base_network=False, num_epochs=5); ###Output Dataset Details Train path: architectural_heritage/train Val path: None CSV train path: None CSV val path: None Dataset Params Input Size: 224 Batch Size: 4 Data Shuffle: True Processors: 4 Train-val split: 0.7 Pre-Composed Train Transforms [{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}] Pre-Composed Val Transforms [{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}] Dataset Numbers Num train images: 7164 Num val images: 3071 Num classes: 10 Model Params Model name: vgg16 Use Gpu: True Use pretrained: True Freeze base network: False Model Details Loading pretrained model Model Loaded on device Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Optimizer Name: sgd Learning rate: 0.01 Params: {'lr': 0.01, 'momentum': 0, 'weight_decay': 0, 'momentum_dampening_rate': 0, 'clipnorm': 0.0, 'clipvalue': 0.0} Learning rate scheduler Name: steplr Params: {'step_size': 1, 'gamma': 0.98, 'last_epoch': -1} Loss Name: softmaxcrossentropy Params: {'weight': None, 'batch_axis': 0, 'axis_to_sum_over': -1, 'label_as_categories': True, 'label_smoothing': False} Training params Num Epochs: 5 Display params Display progress: True Display progress realtime: True Save Training logs: True Save Intermediate models: True Intermediate model prefix: intermediate_model_ ###Markdown From the summary above - Model Params Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Visualize vgg ###Code gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082); ###Output Using Netron To Visualize Not compatible on kaggle Compatible only for Jupyter Notebooks Stopping http://localhost:8082 Serving 'model-symbol.json' at http://localhost:8082 ###Markdown vgg block - 1 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg_block1_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This block has 3 layers - conv -> relu vgg block - 2 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg_block2_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This block has 3 layers - conv -> relu -> max_pool vgg fully connected chain ###Code from IPython.display import Image Image(filename='imgs/vgg_block_fc_mxnet.png') ###Output _____no_output_____ ###Markdown vgg Network - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg16_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This network - has 9 type-1 blocks - has 5 type-2 blocks - post these blocks the type-3 (fc) block exists Train the classifier ###Code #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ###Output Training Start Epoch 1/5 ---------- ###Markdown Run inference on trained classifier ###Code gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro", eval_infer=True); output = gtf.Infer(img_name = "architectural_heritage/test/test1.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test1.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test2.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test2.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test3.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test3.jpg') ###Output Prediction Image name: architectural_heritage/test/test3.jpg Predicted class: dome(outer) Predicted score: 15.606271743774414 ###Markdown Goals Train a architectural heritage site classifier using vgg16 Understand what lies inside vgg network What is vgg Readings on vgg 1) Points from https://towardsdatascience.com/vgg-neural-networks-the-next-step-after-alexnet-3f91fa9ffe2c - VGG addresses another very important aspect of CNNs: depth - All of VGG’s hidden layers use ReLU - Unlike 11x11 kernels of alexnet, it uses smaller ones 1x1 and 3x3 kernels 2) Points from https://becominghuman.ai/what-is-the-vgg-neural-network-a590caa72643 - Intuitively, more layer is better. However, the authors found that VGG-16 is better than VGG-19 - Authors introduce multi-scale evaluationin the paper 3) Read more here - - https://arxiv.org/abs/1409.1556 - https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ - https://www.cs.toronto.edu/~frossard/post/vgg16/ - https://d2l.ai/chapter_convolutional-modern/vgg.html Table of Contents [0. Install](0) [1. Load experiment with vgg base architecture](1) [2. Visualize vgg](2) [3. Train the classifier](3) [4. Run inference on trained classifier](5) Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version) ###Code !git clone https://github.com/Tessellate-Imaging/monk_v1.git # Select the requirements file as per OS and CUDA version !cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt ###Output _____no_output_____ ###Markdown Dataset - Architectural Heritage site Classification - https://old.datahub.io/dataset/architectural-heritage-elements-image-dataset ###Code ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MFu7cnxwDM7LWKgeLggMLvWIBW_-YCWC" -O architectural_heritage.zip && rm -rf /tmp/cookies.txt ! unzip -qq architectural_heritage.zip ###Output _____no_output_____ ###Markdown Imports ###Code # Monk import os import sys sys.path.append("monk_v1/monk/"); #Using mxnet-gluon backend from gluon_prototype import prototype ###Output _____no_output_____ ###Markdown Load experiment with vgg base architecture Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it ###Code gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro"); ###Output Mxnet Version: 1.5.0 Experiment Details Project: Project Experiment: vgg-intro Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.0_blocks/study_roadmap/change_post_num_layers/6_transfer_learning_model_params/1_exploring_model_families/2_vgg/workspace/Project/vgg-intro/ ###Markdown This creates files and directories as per the following structure workspace | |--------Project | | |-----vgg-intro | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) Set dataset and select the model Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs Sample Dataset folder structure architectural_heritage | |-----train |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) | | |-----val |------dome | |------img1.jpg |------img2.jpg |------.... (and so on) |------altal | |------img1.jpg |------img2.jpg |------.... (and so on) |------.... (and so on) ###Code gtf.Default(dataset_path="architectural_heritage/train", model_name="vgg16", freeze_base_network=False, num_epochs=5); ###Output Dataset Details Train path: architectural_heritage/train Val path: None CSV train path: None CSV val path: None Dataset Params Input Size: 224 Batch Size: 4 Data Shuffle: True Processors: 4 Train-val split: 0.7 Pre-Composed Train Transforms [{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}] Pre-Composed Val Transforms [{'RandomHorizontalFlip': {'p': 0.8}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}] Dataset Numbers Num train images: 7164 Num val images: 3071 Num classes: 10 Model Params Model name: vgg16 Use Gpu: True Use pretrained: True Freeze base network: False Model Details Loading pretrained model Model Loaded on device Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Optimizer Name: sgd Learning rate: 0.01 Params: {'lr': 0.01, 'momentum': 0, 'weight_decay': 0, 'momentum_dampening_rate': 0, 'clipnorm': 0.0, 'clipvalue': 0.0} Learning rate scheduler Name: steplr Params: {'step_size': 1, 'gamma': 0.98, 'last_epoch': -1} Loss Name: softmaxcrossentropy Params: {'weight': None, 'batch_axis': 0, 'axis_to_sum_over': -1, 'label_as_categories': True, 'label_smoothing': False} Training params Num Epochs: 5 Display params Display progress: True Display progress realtime: True Save Training logs: True Save Intermediate models: True Intermediate model prefix: intermediate_model_ ###Markdown From the summary above - Model Params Model name: vgg16 Num of potentially trainable layers: 16 Num of actual trainable layers: 16 Visualize vgg ###Code gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082); ###Output Using Netron To Visualize Not compatible on kaggle Compatible only for Jupyter Notebooks Stopping http://localhost:8082 Serving 'model-symbol.json' at http://localhost:8082 ###Markdown vgg block - 1 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg_block1_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This block has 3 layers - conv -> relu vgg block - 2 - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg_block2_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This block has 3 layers - conv -> relu -> max_pool vgg fully connected chain ###Code from IPython.display import Image Image(filename='imgs/vgg_block_fc_mxnet.png') ###Output _____no_output_____ ###Markdown vgg Network - Creating network and blocks using monk from scratch will be dealt in different roadmap series ###Code from IPython.display import Image Image(filename='imgs/vgg16_mxnet.png') ###Output _____no_output_____ ###Markdown Properties - This network - has 9 type-1 blocks - has 5 type-2 blocks - post these blocks the type-3 (fc) block exists Train the classifier ###Code #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ###Output Training Start Epoch 1/5 ---------- ###Markdown Run inference on trained classifier ###Code gtf = prototype(verbose=1); gtf.Prototype("Project", "vgg-intro", eval_infer=True); output = gtf.Infer(img_name = "architectural_heritage/test/test1.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test1.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test2.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test2.jpg') output = gtf.Infer(img_name = "architectural_heritage/test/test3.jpg"); from IPython.display import Image Image(filename='architectural_heritage/test/test3.jpg') ###Output Prediction Image name: architectural_heritage/test/test3.jpg Predicted class: dome(outer) Predicted score: 15.606271743774414
day12_Convolutional_neural_networks/12_cnn_seminar_solved.ipynb
###Markdown 12: Convolutional neural networks Inspiration for this notebook is taken from YSDA materials__Colab is highly recommended to work with this notebook__ About CNNsConvolutional layers extract features - quantitative representations of some attributes. After the extraction you can use these features for classification, for example. Convolution: Pooling: Deeper layer $\to$ more complex features. Task: Cats vs. Dogs Classification Let's try to build a small convolutional neural network capable of separating cat images from dog images. Datasets in pyTorch Generally, when you have to deal with image, text, audio or video data, you can use standard python packages that load data into a numpy array. Then you can convert this array into a torch.*Tensor.- For images, packages such as *Pillow*, *OpenCV* are useful- For audio, packages such as *scipy* and *librosa*- For text, either raw *Python* or *Cython* based loading, or *NLTK* and *SpaCy* are useful We are dealing with images, so let's have a look at image data loading in pyTorch for [Dogs vs. Cats](https://www.kaggle.com/c/dogs-vs-cats) classification competition.The link for data downloading is in the cell below.Training set size is reduced for performace. If you have enough computational resources, use [this link](https://www.dropbox.com/s/h2vhfxb0j3eazu1/train.zip) for downloading instead of the latter one. ###Code # Uncomment only on google collab # from google.colab import drive # drive.mount('/content/drive') # Training set with 11K images ! wget -nc https://www.dropbox.com/s/gqdo90vhli893e0/data.zip ! unzip -n -qq data.zip -d data ###Output File ‘data.zip’ already there; not retrieving. ###Markdown Now let's look at the way datasets are processed in PyTorch. ###Code import os import time from matplotlib import pyplot as plt import numpy as np from tqdm import tqdm_notebook as tqdm import torch import torchvision import torch.nn as nn import torch.nn.functional as F from torchvision import transforms from torchsummary import summary device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") device ###Output _____no_output_____ ###Markdown Images should be stored class-wise in PC memory: each image class have to be represented as a subfolder with the corresponding image data. `ImageFolder` takes the path to 'root' directory of such structure, e.g. DATA_PATH:- DATA_PATH/dog/xxx.png- DATA_PATH/dog/xxy.png- DATA_PATH/dog/xxz.png- DATA_PATH/cat/123.png- DATA_PATH/cat/nsdf3.png- DATA_PATH/cat/asd932_.png Dataset images are of different size.Batch generator expects a batch of tensors of the same dimensions, thus we need to rescale images in the dataset during data loading.Let's see at the image size distributions. ###Code ### Let's have a cell with global hyperparameters for the CNNs in this notebook # Path to a directory with image dataset and subfolders for training, validation and final testing DATA_PATH = 'data' # PATH TO THE DATASET # Number of threads for data loader NUM_WORKERS = 4 # Image size: even though image sizes are bigger than 96, we use this to speed up training SIZE_H = SIZE_W = 96 N_CHANNELS = 3 # Number of classes in the dataset NUM_CLASSES = 2 # Epochs: number of passes over the training data, we use it this small to reduce training babysitting time EPOCH_NUM = 30 # Batch size: for batch gradient descent optimization, usually selected as 2**K elements BATCH_SIZE = 128 # Images mean and std channelwise image_mean = [0.485, 0.456, 0.406] image_std = [0.229, 0.224, 0.225] # Last layer (embeddings) size for CNN models EMBEDDING_SIZE = 256 ###Output _____no_output_____ ###Markdown Let's define a transformer to be used as image preprocessing step prior to creating pyTorch image dataset ###Code transformer = transforms.Compose([ transforms.Resize((SIZE_H, SIZE_W)), # scaling images to fixed size transforms.ToTensor(), # converting to tensors transforms.Normalize(image_mean, image_std) # normalize image data per-channel ]) ###Output _____no_output_____ ###Markdown Create an ImageFolder instance to be used during training, validation and testing phases. ###Code # Define the network architecture from torch import nn, optim import torch.nn.functional as F model = nn.Sequential(nn.Linear(784, 256), nn.ReLU(), nn.Dropout(0.2), nn.Linear(256, 128), nn.ReLU(), nn.Dropout(0.2), nn.Linear(128, 64), nn.ReLU(), nn.Dropout(0.2), nn.Linear(64, 10), nn.LogSoftmax(dim = 1) ) train_dataset = torchvision.datasets.ImageFolder(os.path.join(DATA_PATH, 'train_11k'), transform=transformer) val_dataset = torchvision.datasets.ImageFolder(os.path.join(DATA_PATH, 'val'), transform=transformer) test_dataset = torchvision.datasets.ImageFolder(os.path.join(DATA_PATH, 'test_labeled'), transform=transformer) ###Output _____no_output_____ ###Markdown Save sample num for further use ###Code n_train, n_val, n_test = len(train_dataset), len(val_dataset), len(test_dataset) ###Output _____no_output_____ ###Markdown Now let's create a DataLoader instance, which uses ImageFolder instance to generate batches of data. ###Code train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, ) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, ) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, ) ###Output _____no_output_____ ###Markdown Let's create a helper function to vizualize images from our data loaders (and also make sure data was properly loaded). ###Code def plot_from_loader(loader): data_batch, label_batch = next(iter(loader)) grid_size = (3, 3) f, axarr = plt.subplots(*grid_size) f.set_size_inches(15,10) class_names = loader.dataset.classes for i in range(grid_size[0] * grid_size[1]): # read images from batch to numpy.ndarray and change axes order [H, W, C] -> [H, W, C] batch_image_ndarray = np.transpose(data_batch[i].numpy(), [1, 2, 0]) # inverse normalization for image data values back to [0,1] and clipping the values for correct pyplot.imshow() src = np.clip(image_std * batch_image_ndarray + image_mean, 0, 1) # display batch samples with labels sample_title = 'Label = %d (%s)' % (label_batch[i], class_names[label_batch[i]]) axarr[i // grid_size[0], i % grid_size[0]].imshow(src) axarr[i // grid_size[0], i % grid_size[0]].set_title(sample_title) plot_from_loader(train_loader) ###Output _____no_output_____ ###Markdown Building training pipeline Training function is same to which we used on previous seminar ###Code def train_model(model, train_loader, val_loader, loss_fn, opt, n_epochs: int): train_loss = [] val_loss = [] val_accuracy = [] top_val_accuracy = -1 best_model = None for epoch in range(n_epochs): ep_train_loss = [] ep_val_loss = [] ep_val_accuracy = [] start_time = time.time() model.train(True) # enable dropout / batch_norm training behavior for X_batch, y_batch in train_loader: # move data to target device X_batch, y_batch = X_batch.to(device), y_batch.to(device) # train on batch: compute loss, calc grads, perform optimizer step and zero the grads predicts = model(X_batch) loss = loss_fn(predicts, y_batch) loss.backward() opt.step() opt.zero_grad() ep_train_loss.append(loss.item()) model.train(False) # disable dropout / use averages for batch_norm with torch.no_grad(): for X_batch, y_batch in val_loader: # move data to target device X_batch, y_batch = X_batch.to(device), y_batch.to(device) # compute predictions preds = model(X_batch) ep_val_loss.append(loss_fn(preds, y_batch).item()) y_pred = preds.max(1)[1].data ep_val_accuracy.append((y_pred == y_batch).to(torch.float32).mean().item()) # print the results for this epoch: print(f'Epoch {epoch + 1} of {n_epochs} took {time.time() - start_time:.3f}s') train_loss.append(np.mean(ep_train_loss)) val_loss.append(np.mean(ep_val_loss)) val_accuracy.append(np.mean(ep_val_accuracy)) print(f"\t training loss: {train_loss[-1]:.6f}") print(f"\tvalidation loss: {val_loss[-1]:.6f}") print(f"\tvalidation accuracy: {val_accuracy[-1]:.3f}") if val_accuracy[-1] > top_val_accuracy: best_model = model return train_loss, val_loss, val_accuracy, best_model @torch.no_grad() def test_model(model, test_loader, subset='test'): model.train(False) # disable dropout / use averages for batch_norm test_batch_acc = [] for X_batch, y_batch in test_loader: logits = model(X_batch.to(device)) y_pred = logits.max(1)[1].data test_batch_acc.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() )) test_accuracy = np.mean(test_batch_acc) print("Results:") print(f" {subset} accuracy: {test_accuracy * 100:.2f} %") if test_accuracy > 0.9: print(" Amazing!") elif test_accuracy > 0.7: print(" Good!") else: print(" We need more magic! Follow instructons below") return test_accuracy ###Output _____no_output_____ ###Markdown Task 0: Multi-layer fully-connected networkLook at the NN structure proposed below.We will use this model as a baseline for classification task.As you already know, fully-connetcted networks are not translation invariant and perform worse on image data, so resulting accuracy will be lower than for convolutional neural networks. ###Code device model = nn.Sequential( nn.Flatten(), nn.Linear(N_CHANNELS * SIZE_H * SIZE_W, 256), nn.BatchNorm1d(256), nn.ReLU(), nn.Dropout(0.1), nn.Linear(256, 128), nn.ReLU(), nn.Dropout(0.05), nn.Linear(128, NUM_CLASSES), nn.Softmax(dim=1) ) model = model.to(device) ###Output _____no_output_____ ###Markdown Print model summary for sanity check: ###Code summary(model, (N_CHANNELS, SIZE_H, SIZE_W), device=str(device)) ###Output ---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Flatten-1 [-1, 27648] 0 Linear-2 [-1, 256] 7,078,144 BatchNorm1d-3 [-1, 256] 512 ReLU-4 [-1, 256] 0 Dropout-5 [-1, 256] 0 Linear-6 [-1, 128] 32,896 ReLU-7 [-1, 128] 0 Dropout-8 [-1, 128] 0 Linear-9 [-1, 2] 258 Softmax-10 [-1, 2] 0 ================================================================ Total params: 7,111,810 Trainable params: 7,111,810 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.11 Forward/backward pass size (MB): 0.22 Params size (MB): 27.13 Estimated Total Size (MB): 27.46 ---------------------------------------------------------------- ###Markdown Training on minibatches* We got 11k images (22k for full train set), that's way too many for a full-batch SGD. Let's train on minibatches instead* For visualization purposes we propose to plot train/val loss graphs and validation score distribution for CNN predictions over images of cats (class_0) and dogs (class_1). ###Code opt = torch.optim.Adam(model.parameters(), lr=3e-4) loss_fn = nn.CrossEntropyLoss() train_loss, val_loss, val_accuracy, best_model = train_model(model, train_loader, val_loader, loss_fn, opt, 5) def plot_train_process(train_loss, val_loss, val_accuracy): fig, axes = plt.subplots(1, 2, figsize=(15, 5)) axes[0].set_title('Loss') axes[0].plot(train_loss, label='train') axes[0].plot(val_loss, label='validation') axes[0].legend() axes[1].set_title('Validation accuracy') axes[1].plot(val_accuracy) plot_train_process(train_loss, val_loss, val_accuracy) best_model = model model ###Output _____no_output_____ ###Markdown Evaluate the best model using test set ###Code val_stats = test_model(best_model, val_loader, 'validation') test_stats = test_model(best_model, test_loader, 'test') ###Output Results: validation accuracy: 63.64 % We need more magic! Follow instructons below Results: test accuracy: 65.52 % We need more magic! Follow instructons below ###Markdown Task I: small convolution net First step**conv-pool-conv-pool-dense-dense-everybody!**Let's create a mini-convolutional network with roughly such architecture:* Input layer* 4 classic convolutional blocks `convolution->relu->pool`: * 3x3 convolution with 32 -> 32 -> 64 -> 128 filters and _ReLU_ activation * 2x2 pooling (or set previous convolution stride to 3) * Flatten* 30% Dropout * Dense layer with 128 neurons and _ReLU_ activation* 30% dropout* Output dense layer.__Convolutional layers__ in torch are just like all other layers, but with a specific set of parameters:__`...`____`model.add_module('conv1', nn.Conv2d(in_channels=3, out_channels=128, kernel_size=3)) convolution`____`model.add_module('pool1', nn.MaxPool2d(2)) max pooling 2x2`____`...`__Once you're done (and compute_loss no longer raises errors), train it with __Adam__ optimizer with learning_rate=3e-4 (Karpathy Constant)If everything is right, you should get at least __75%__ validation accuracy.__HACK_OF_THE_DAY__ :the number of channels must be in the order of the number of class_labels__HACK_OF_THE_DAY_2__ : you may set stride=2 for Conv2d layers to increase learning speed, but keep in mind tensor shapes__HACK_OF_THE_DAY_3__ : it might be useful to use 'VGG-like' structure as a baseline for this task: * every CNN layer with 2x2 maxpooling / stride=2 should be followed by increasing the number of output channels x2 * before the fully-connected layer the tensor H and W should be relatively small (less than 10) * in other words, the less H and W of tensor are, the more should you increase C in order to keep more information ###Code model_cnn = nn.Sequential( nn.Conv2d(3, 32, 3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(32, 64, 3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(64, 64, 3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(64, 128, 3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(128, 128, 3, stride=2, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d(1), nn.Dropout(0.3), nn.Flatten(), nn.Linear(128, EMBEDDING_SIZE), nn.Dropout(0.3), nn.Linear(EMBEDDING_SIZE, NUM_CLASSES, bias=False), nn.Softmax(dim=1), ) model_cnn.to(device) ###Output _____no_output_____ ###Markdown __Hint:__ If you don't want to compute shapes by hand, just plug in any shape (e.g. 1 unit) and run compute_loss. You will see something like this:__`RuntimeError: size mismatch, m1: [5 x 1960], m2: [1 x 64] at /some/long/path/to/torch/operation`__See the __1960__ there? That's your actual input shape. Let's see the basic structure of our model and at the same time perform a sanity check for tensor dimensions. ###Code summary(model_cnn, (3, SIZE_H, SIZE_W), device='cuda') ###Output ---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 32, 48, 48] 896 ReLU-2 [-1, 32, 48, 48] 0 Conv2d-3 [-1, 64, 48, 48] 18,496 ReLU-4 [-1, 64, 48, 48] 0 Conv2d-5 [-1, 64, 24, 24] 36,928 ReLU-6 [-1, 64, 24, 24] 0 Conv2d-7 [-1, 128, 24, 24] 73,856 ReLU-8 [-1, 128, 24, 24] 0 Conv2d-9 [-1, 128, 12, 12] 147,584 ReLU-10 [-1, 128, 12, 12] 0 AdaptiveAvgPool2d-11 [-1, 128, 1, 1] 0 Dropout-12 [-1, 128, 1, 1] 0 Flatten-13 [-1, 128] 0 Linear-14 [-1, 256] 33,024 Dropout-15 [-1, 256] 0 Linear-16 [-1, 2] 512 Softmax-17 [-1, 2] 0 ================================================================ Total params: 311,296 Trainable params: 311,296 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.11 Forward/backward pass size (MB): 5.35 Params size (MB): 1.19 Estimated Total Size (MB): 6.64 ---------------------------------------------------------------- ###Markdown TrainingWe may use the same training pipeline, that we defined above, as it does not depend on model structure. ###Code model_cnn = model_cnn.to(device) opt = torch.optim.Adam(model_cnn.parameters(), lr=1e-3) opt.zero_grad() ckpt_name_cnn='model_cnn.ckpt' train_loss, val_loss, val_accuracy, best_model = train_model(\ model_cnn,\ train_loader,\ val_loader, \ loss_fn, \ opt, \ 10) ###Output Epoch 1 of 10 took 8.736s training loss: 0.663875 validation loss: 0.664546 validation accuracy: 0.581 Epoch 2 of 10 took 8.630s training loss: 0.630285 validation loss: 0.647163 validation accuracy: 0.624 Epoch 3 of 10 took 8.533s training loss: 0.620599 validation loss: 0.624910 validation accuracy: 0.659 Epoch 4 of 10 took 8.588s training loss: 0.589383 validation loss: 0.611298 validation accuracy: 0.674 Epoch 5 of 10 took 8.491s training loss: 0.583851 validation loss: 0.620155 validation accuracy: 0.668 Epoch 6 of 10 took 8.655s training loss: 0.572094 validation loss: 0.656461 validation accuracy: 0.627 Epoch 7 of 10 took 8.454s training loss: 0.559292 validation loss: 0.590685 validation accuracy: 0.710 Epoch 8 of 10 took 8.563s training loss: 0.555741 validation loss: 0.606333 validation accuracy: 0.691 Epoch 9 of 10 took 8.550s training loss: 0.540066 validation loss: 0.587310 validation accuracy: 0.713 Epoch 10 of 10 took 8.461s training loss: 0.544028 validation loss: 0.596310 validation accuracy: 0.701 ###Markdown **A kind reminder again:** don't wait for too many epochs. You can interrupt training after 5-20 epochs once validation accuracy stops going up. Evaluation ###Code val_stats = test_model(best_model, val_loader, 'validation') test_stats = test_model(best_model, test_loader, 'test') plot_train_process(train_loss, val_loss, val_accuracy) ###Output _____no_output_____ ###Markdown Task 2: Fine-tuning In practice it is easier to use pre-trained NNWe may see, that our current model performs quite well even after a small number of training epochs.But for more complicated image classification or other computer vision tasks, it may be difficult to train CNN model from scratch.State-of-the-art models consist of huge number of layers (100-200 convolutional blocks) and require powerful hardware to converge.Thankfully, there are lots of pre-trained models available to be used for your own task, only slightly changing some of the final layers to your data.This is called fine-tuning.Let's try to load a pre-trained [ResNet-18](https://arxiv.org/abs/1512.03385) model from torch archive and fine-tune its final layers. ResNet (Shortcut + Batch Normalization) ###Code # Load pre-trained model model_resnet18 = torchvision.models.resnet18(pretrained=True) # Disable gradient updates for all the layers except the final layer for p in model_resnet18.parameters(): p.requires_grad = False # Parameters of newly constructed modules have requires_grad=True by default num_ftrs = model_resnet18.fc.in_features model_resnet18.fc = nn.Linear(num_ftrs, NUM_CLASSES, bias=False) # Use available device for calculations model_resnet18 = model_resnet18.to(device) summary(model_resnet18, (3, SIZE_H, SIZE_W)) ###Output ---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 64, 48, 48] 9,408 BatchNorm2d-2 [-1, 64, 48, 48] 128 ReLU-3 [-1, 64, 48, 48] 0 MaxPool2d-4 [-1, 64, 24, 24] 0 Conv2d-5 [-1, 64, 24, 24] 36,864 BatchNorm2d-6 [-1, 64, 24, 24] 128 ReLU-7 [-1, 64, 24, 24] 0 Conv2d-8 [-1, 64, 24, 24] 36,864 BatchNorm2d-9 [-1, 64, 24, 24] 128 ReLU-10 [-1, 64, 24, 24] 0 BasicBlock-11 [-1, 64, 24, 24] 0 Conv2d-12 [-1, 64, 24, 24] 36,864 BatchNorm2d-13 [-1, 64, 24, 24] 128 ReLU-14 [-1, 64, 24, 24] 0 Conv2d-15 [-1, 64, 24, 24] 36,864 BatchNorm2d-16 [-1, 64, 24, 24] 128 ReLU-17 [-1, 64, 24, 24] 0 BasicBlock-18 [-1, 64, 24, 24] 0 Conv2d-19 [-1, 128, 12, 12] 73,728 BatchNorm2d-20 [-1, 128, 12, 12] 256 ReLU-21 [-1, 128, 12, 12] 0 Conv2d-22 [-1, 128, 12, 12] 147,456 BatchNorm2d-23 [-1, 128, 12, 12] 256 Conv2d-24 [-1, 128, 12, 12] 8,192 BatchNorm2d-25 [-1, 128, 12, 12] 256 ReLU-26 [-1, 128, 12, 12] 0 BasicBlock-27 [-1, 128, 12, 12] 0 Conv2d-28 [-1, 128, 12, 12] 147,456 BatchNorm2d-29 [-1, 128, 12, 12] 256 ReLU-30 [-1, 128, 12, 12] 0 Conv2d-31 [-1, 128, 12, 12] 147,456 BatchNorm2d-32 [-1, 128, 12, 12] 256 ReLU-33 [-1, 128, 12, 12] 0 BasicBlock-34 [-1, 128, 12, 12] 0 Conv2d-35 [-1, 256, 6, 6] 294,912 BatchNorm2d-36 [-1, 256, 6, 6] 512 ReLU-37 [-1, 256, 6, 6] 0 Conv2d-38 [-1, 256, 6, 6] 589,824 BatchNorm2d-39 [-1, 256, 6, 6] 512 Conv2d-40 [-1, 256, 6, 6] 32,768 BatchNorm2d-41 [-1, 256, 6, 6] 512 ReLU-42 [-1, 256, 6, 6] 0 BasicBlock-43 [-1, 256, 6, 6] 0 Conv2d-44 [-1, 256, 6, 6] 589,824 BatchNorm2d-45 [-1, 256, 6, 6] 512 ReLU-46 [-1, 256, 6, 6] 0 Conv2d-47 [-1, 256, 6, 6] 589,824 BatchNorm2d-48 [-1, 256, 6, 6] 512 ReLU-49 [-1, 256, 6, 6] 0 BasicBlock-50 [-1, 256, 6, 6] 0 Conv2d-51 [-1, 512, 3, 3] 1,179,648 BatchNorm2d-52 [-1, 512, 3, 3] 1,024 ReLU-53 [-1, 512, 3, 3] 0 Conv2d-54 [-1, 512, 3, 3] 2,359,296 BatchNorm2d-55 [-1, 512, 3, 3] 1,024 Conv2d-56 [-1, 512, 3, 3] 131,072 BatchNorm2d-57 [-1, 512, 3, 3] 1,024 ReLU-58 [-1, 512, 3, 3] 0 BasicBlock-59 [-1, 512, 3, 3] 0 Conv2d-60 [-1, 512, 3, 3] 2,359,296 BatchNorm2d-61 [-1, 512, 3, 3] 1,024 ReLU-62 [-1, 512, 3, 3] 0 Conv2d-63 [-1, 512, 3, 3] 2,359,296 BatchNorm2d-64 [-1, 512, 3, 3] 1,024 ReLU-65 [-1, 512, 3, 3] 0 BasicBlock-66 [-1, 512, 3, 3] 0 AdaptiveAvgPool2d-67 [-1, 512, 1, 1] 0 Linear-68 [-1, 2] 1,024 ================================================================ Total params: 11,177,536 Trainable params: 1,024 Non-trainable params: 11,176,512 ---------------------------------------------------------------- Input size (MB): 0.11 Forward/backward pass size (MB): 11.54 Params size (MB): 42.64 Estimated Total Size (MB): 54.28 ---------------------------------------------------------------- ###Markdown Training (only for final layer) ###Code # Observe that only parameters of final layer are being optimized as opposed to before opt_resnet = torch.optim.Adam(model_resnet18.fc.parameters(), lr=1e-3) ckpt_name_resnet18='model_resnet_18_finetune.ckpt' train_loss, val_loss, val_accuracy, best_model_resnet18 = train_model(\ model_resnet18,\ train_loader,\ val_loader, \ loss_fn, \ opt_resnet, \ 10) ###Output Epoch 1 of 10 took 8.535s training loss: 0.599295 validation loss: 0.550771 validation accuracy: 0.710 Epoch 2 of 10 took 8.579s training loss: 0.512135 validation loss: 0.530608 validation accuracy: 0.727 Epoch 3 of 10 took 8.509s training loss: 0.494768 validation loss: 0.516600 validation accuracy: 0.737 Epoch 4 of 10 took 8.552s training loss: 0.475611 validation loss: 0.497050 validation accuracy: 0.759 Epoch 5 of 10 took 9.114s training loss: 0.461734 validation loss: 0.482886 validation accuracy: 0.762 Epoch 6 of 10 took 8.844s training loss: 0.445673 validation loss: 0.462270 validation accuracy: 0.779 Epoch 7 of 10 took 8.735s training loss: 0.431096 validation loss: 0.447927 validation accuracy: 0.789 Epoch 8 of 10 took 8.610s training loss: 0.414886 validation loss: 0.435849 validation accuracy: 0.797 Epoch 9 of 10 took 8.547s training loss: 0.403419 validation loss: 0.424735 validation accuracy: 0.799 Epoch 10 of 10 took 8.612s training loss: 0.394624 validation loss: 0.418891 validation accuracy: 0.805 ###Markdown Evaluation ###Code val_stats = test_model(best_model, val_loader, 'validation') test_stats = test_model(best_model, test_loader, 'test') plot_train_process(train_loss, val_loss, val_accuracy) ###Output _____no_output_____ ###Markdown Use your own image ###Code from skimage.io import imread from skimage.transform import resize src_1_fp = r"img/example_1.png" src_2_fp = r"img/example_2.png" src_1 = imread(src_1_fp) src_2 = imread(src_2_fp) resized_1 = resize(src_1, (SIZE_H, SIZE_W), mode='reflect') resized_2 = resize(src_2, (SIZE_H, SIZE_W), mode='reflect') # convert to torch.Tensor tensor_1 = torch.Tensor(np.transpose((resized_1/255 - image_mean) / image_std, [2,0,1])[np.newaxis,:,:,:]).to(device) tensor_2 = torch.Tensor(np.transpose((resized_2/255 - image_mean) / image_std, [2,0,1])[np.newaxis,:,:,:]).to(device) # 'cat' scores score_1 = F.softmax(best_model_resnet18(tensor_1), 1)[0][0].detach().cpu().numpy() score_2 = F.softmax(best_model_resnet18(tensor_2), 1)[0][0].detach().cpu().numpy() get_label = lambda x: ('cat' if x > 0.5 else 'dog') + ': {:.4f}'.format(x) plt.figure(figsize=(10,5)) plt.subplot(121) plt.imshow(src_1) plt.title(get_label(score_1)) plt.subplot(122) plt.imshow(src_2) plt.title(get_label(score_2)) plt.show() ###Output _____no_output_____ ###Markdown Task 3: adding normalization and different model initialization Let's get back to hard work* Improve the task 1 CNN architecture with the following: * Add batch norm (with default params) between convolution and ReLU * nn.BatchNorm*d (1d for dense, 2d for conv) * usually better to put them after linear/conv but before nonlinearity* Re-train the network with the same optimizer, it should get at least __80%__ validation accuracy at peak.* Use the following model class to simplify the inferenceTo know more about **batch_norm** and **data covariate shift**https://towardsdatascience.com/batch-normalization-in-neural-networks-1ac91516821chttps://www.youtube.com/watch?v=nUUqwaxLnWs ###Code # Custom model class def conv_block_3x3(in_channels, out_channels, stride=1): return nn.Sequential( # YOUR CODE HERE # CONV 3x3 -> BN -> ReLU # YOUR CODE ENDS HERE ) class MyModel(torch.nn.Module): def __init__(self, in_feature): super(MyModel, self).__init__() self.model = nn.Sequential( # YOUR CODE HERE: CONV_BLOCKS -> GLOBAL_POOLING (MAX/AVERAGE) nn.AdaptiveMaxPool2d(1), nn.Flatten() ) self.dropout = nn.Dropout(p=0.3) self.fc = nn.Sequential( # YOUR CODE HERE: FC->BN->RELU ) self.pred = nn.Sequential( nn.Linear(EMBEDDING_SIZE, NUM_CLASSES, bias=False) ) def forward(self, x): x = self.model(x) x = self.dropout(x) x = self.fc(x) x = self.dropout(x) x = self.pred(x) return x # outputs are here for convenience model_cnn_norm = MyModel(3) model_cnn_norm.to(device) summary(model_cnn_norm, (3, SIZE_H, SIZE_W), device='cuda') ###Output _____no_output_____ ###Markdown Training ###Code model_cnn_norm = model_cnn_norm.to(device) opt = torch.optim.Adam(model_cnn_norm.parameters(), lr=1e-3) ckpt_name_cnn_norm='model_cnn_norm.ckpt' model_cnn_norm, opt = train_model(model_cnn_norm, train_batch_gen, val_batch_gen, opt, ckpt_name=ckpt_name_cnn_norm) ###Output _____no_output_____ ###Markdown Evaluation ###Code best_model_cnn_norm = None with open(ckpt_name_cnn_norm, 'rb') as f: best_model_cnn_norm = torch.load(f) val_stats = test_model(best_model_cnn_norm, val_batch_gen, 'val') test_stats = test_model(best_model_cnn_norm, test_batch_gen, 'test') if val_stats['f1_score'] > 0.8 and test_stats['f1_score'] > 0.8: print('You have achieved the baseline for this task.') else: print('Train for some more time or change CNN architecture.') ###Output _____no_output_____ ###Markdown Task 4: Data Augmentation (bonus area)There's a powerful torch tool for image preprocessing useful to do data preprocessing and augmentation.Here's how it works: we define a pipeline that* makes random crops of data (augmentation)* randomly changes image color (augmentation)* randomly flips image horizontally (augmentation)* then normalizes it (preprocessing) ###Code transformer_augmented = transforms.Compose([ # YOUR CODE HERE transforms.ToTensor(), transforms.Normalize(image_mean, image_std)] ) # Load dataset using ImageFolder using transformer with augmentation # Note: We do not use augmentation for validation or testing train_dataset_aug = # YOUR CODE HERE: creade dataset using the transformer above train_aug_batch_gen = torch.utils.data.DataLoader(train_dataset_aug, batch_size=BATCH``` ```_SIZE, shuffle=True, num_workers=NUM_WORKERS) ###Output _____no_output_____ ###Markdown Let's look at some image examples ###Code plot_from_batch_generator(train_aug_batch_gen) ###Output _____no_output_____ ###Markdown Note that we did not change test_dataset, as we do not need to augment image data in it. Let's retrain our model, saving it to another variable Training ###Code model_cnn_aug = MyModel(3).to(device) opt = torch.optim.Adam(model_cnn_aug.parameters(), lr=1e-3) ckpt_name_aug='model_cnn_aug.ckpt' model_cnn_aug, opt = train_model(model_cnn_aug, train_aug_batch_gen, val_batch_gen, opt, ckpt_name=ckpt_name_aug, n_epochs=2 * EPOCH_NUM) ###Output _____no_output_____ ###Markdown Evaluation ###Code best_model_cnn_aug=None with open(ckpt_name_aug, 'rb') as f: best_model_cnn_aug = torch.load(f) val_stats = test_model(best_model_cnn_aug, val_batch_gen, 'val') test_stats = test_model(best_model_cnn_aug, test_batch_gen, 'test') if val_stats['f1_score'] > 0.9 and test_stats['f1_score'] > 0.9: print('You have achieved the baseline for this task.') else: print('Train for some more time or change augmentation scheme.') best_model_cnn_aug=None with open(ckpt_name_aug, 'rb') as f: best_model_cnn_aug = torch.load(f) val_stats = test_model(best_model_cnn_aug, val_batch_gen, 'val') test_stats = test_model(best_model_cnn_aug, test_batch_gen, 'test') ###Output _____no_output_____
examples/02_model_hybrid/ncf_deep_dive.ipynb
###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports ###Code import sys import os import shutil import papermill as pm import scrapbook as sb import pandas as pd import numpy as np import tensorflow as tf tf.get_logger().setLevel('ERROR') # only show error messages from recommenders.utils.timer import Timer from recommenders.models.ncf.ncf_singlenode import NCF from recommenders.models.ncf.dataset import Dataset as NCFDataset from recommenders.datasets import movielens from recommenders.datasets.python_splitters import python_chrono_split from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, recall_at_k, get_top_k_items) from recommenders.utils.constants import SEED as DEFAULT_SEED print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) print("Tensorflow version: {}".format(tf.__version__)) # top k items to recommend TOP_K = 10 # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # Model parameters EPOCHS = 100 BATCH_SIZE = 256 SEED = DEFAULT_SEED # Set None for non-deterministic results ###Output _____no_output_____ ###Markdown 1 Matrix factorization algorithmNCF is a neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces a neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because we have a binary classification task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obtain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `recommenders/models/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopt the leave-one-out evaluation.For each user, we held out his/her last interaction as the test set and utilized the remaining data for training. Since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`.We also show an alternative evaluation method, splitting the data chronologically using `python_chrono_split` to achieve a 75/25% training and test split. ###Code df = movielens.load_pandas_df( size=MOVIELENS_DATA_SIZE, header=["userID", "itemID", "rating", "timestamp"] ) df.head() train, test = python_chrono_split(df, 0.75) ###Output _____no_output_____ ###Markdown Filter out any users or items in the test set that do not appear in the training set. ###Code test = test[test["userID"].isin(train["userID"].unique())] test = test[test["itemID"].isin(train["itemID"].unique())] ###Output _____no_output_____ ###Markdown Create a test set containing the last interaction for each user as for the leave-one-out evaluation. ###Code leave_one_out_test = test.groupby("userID").last().reset_index() ###Output _____no_output_____ ###Markdown Write datasets to csv files. ###Code train_file = "./train.csv" test_file = "./test.csv" leave_one_out_test_file = "./leave_one_out_test.csv" train.to_csv(train_file, index=False) test.to_csv(test_file, index=False) leave_one_out_test.to_csv(leave_one_out_test_file, index=False) ###Output _____no_output_____ ###Markdown 3.2 Functions of NCF Dataset Important functions of the Dataset class for NCF:`train_loader(batch_size, shuffle_size)`, generate training batches of size `batch_size`. Positive examples are loaded from the training file and negative samples are added in memory. 'shuffle_size' determines the number of rows that are read into memory before the examples are shuffled. By default, the function will attempt to load all data before performing the shuffle. If memory constraints are encountered when using large datasets, try reducing 'shuffle_size'.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns data like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol. ###Code data = NCFDataset(train_file=train_file, test_file=leave_one_out_test_file, seed=SEED, overwrite_test_file_full=True) ###Output Indexing ./train.csv ... Indexing ./leave_one_out_test.csv ... Indexing ./leave_one_out_test_full.csv ... ###Markdown 3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 615.3995804620008 seconds for training. ###Markdown 3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe: ###Code predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)] for (_, row) in test.iterrows()] predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction']) predictions.head() ###Output _____no_output_____ ###Markdown 3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') ###Output MAP: 0.048144 NDCG: 0.198384 Precision@K: 0.176246 Recall@K: 0.098700 ###Markdown 3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks. ###Code k = TOP_K ndcgs = [] hit_ratio = [] for b in data.test_loader(): user_input, item_input, labels = b output = model.predict(user_input, item_input, is_list=True) output = np.squeeze(output) rank = sum(output >= output[0]) if rank <= k: ndcgs.append(1 / np.log(rank + 1)) hit_ratio.append(1) else: ndcgs.append(0) hit_ratio.append(0) eval_ndcg = np.mean(ndcgs) eval_hr = np.mean(hit_ratio) print("HR:\t%f" % eval_hr) print("NDCG:\t%f" % eval_ndcg) ###Output HR: 0.506893 NDCG: 0.401163 ###Markdown 3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="GMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/GMF") model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="MLP", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/MLP") ###Output Took 507.5963159920029 seconds for training. ###Markdown 3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 616.8741841240007 seconds for training. ###Markdown 3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map2, "NDCG:\t%f" % eval_ndcg2, "Precision@K:\t%f" % eval_precision2, "Recall@K:\t%f" % eval_recall2, sep='\n') # Record results with papermill for tests sb.glue("map", eval_map) sb.glue("ndcg", eval_ndcg) sb.glue("precision", eval_precision) sb.glue("recall", eval_recall) sb.glue("map2", eval_map2) sb.glue("ndcg2", eval_ndcg2) sb.glue("precision2", eval_precision2) sb.glue("recall2", eval_recall2) ###Output _____no_output_____ ###Markdown 3.5.4 Delete pre-trained directory ###Code save_dir = ".pretrain" if os.path.exists(save_dir): shutil.rmtree(save_dir) print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir))) ###Output Did '.pretrain' exist?: False ###Markdown 3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') ###Output MAP: 0.046273 NDCG: 0.190750 Precision@K: 0.173277 Recall@K: 0.096688 ###Markdown 3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item, ###Code k = TOP_K ndcgs = [] hit_ratio = [] for b in data.test_loader(): user_input, item_input, labels = b output = model.predict(user_input, item_input, is_list=True) output = np.squeeze(output) rank = sum(output >= output[0]) if rank <= k: ndcgs.append(1 / np.log(rank + 1)) hit_ratio.append(1) else: ndcgs.append(0) hit_ratio.append(0) eval_ndcg = np.mean(ndcgs) eval_hr = np.mean(hit_ratio) print("HR:\t%f" % eval_hr) print("NDCG:\t%f" % eval_ndcg) ###Output HR: 0.488564 NDCG: 0.383339 ###Markdown 3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="GMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/GMF") model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="MLP", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/MLP") ###Output Took 566.8783325639997 seconds for training. ###Markdown 3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 655.1110815689999 seconds for training. ###Markdown 3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map2, "NDCG:\t%f" % eval_ndcg2, "Precision@K:\t%f" % eval_precision2, "Recall@K:\t%f" % eval_recall2, sep='\n') # Record results with papermill for tests sb.glue("map", eval_map) sb.glue("ndcg", eval_ndcg) sb.glue("precision", eval_precision) sb.glue("recall", eval_recall) sb.glue("map2", eval_map2) sb.glue("ndcg2", eval_ndcg2) sb.glue("precision2", eval_precision2) sb.glue("recall2", eval_recall2) ###Output _____no_output_____ ###Markdown 3.5.4 Delete pre-trained directory ###Code save_dir = ".pretrain" if os.path.exists(save_dir): shutil.rmtree(save_dir) print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir))) ###Output Did '.pretrain' exist?: False ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports ###Code import sys import os import shutil import papermill as pm import scrapbook as sb import pandas as pd import numpy as np import tensorflow as tf tf.get_logger().setLevel('ERROR') # only show error messages from reco_utils.common.timer import Timer from reco_utils.recommender.ncf.ncf_singlenode import NCF from reco_utils.recommender.ncf.dataset import Dataset as NCFDataset from reco_utils.dataset import movielens from reco_utils.dataset.python_splitters import python_chrono_split from reco_utils.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, recall_at_k, get_top_k_items) from reco_utils.common.constants import SEED as DEFAULT_SEED print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) print("Tensorflow version: {}".format(tf.__version__)) # top k items to recommend TOP_K = 10 # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # Model parameters EPOCHS = 100 BATCH_SIZE = 256 SEED = DEFAULT_SEED # Set None for non-deterministic results ###Output _____no_output_____ ###Markdown 1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `reco_utils/recommender/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`. ###Code df = movielens.load_pandas_df( size=MOVIELENS_DATA_SIZE, header=["userID", "itemID", "rating", "timestamp"] ) df.head() train, test = python_chrono_split(df, 0.75) ###Output _____no_output_____ ###Markdown 3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol. ###Code data = NCFDataset(train=train, test=test, seed=SEED) ###Output _____no_output_____ ###Markdown 3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 663.2377220259996 seconds for training. ###Markdown 3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe: ###Code predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)] for (_, row) in test.iterrows()] predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction']) predictions.head() ###Output _____no_output_____ ###Markdown 3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') ###Output MAP: 0.046273 NDCG: 0.190750 Precision@K: 0.173277 Recall@K: 0.096688 ###Markdown 3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item, ###Code k = TOP_K ndcgs = [] hit_ratio = [] for b in data.test_loader(): user_input, item_input, labels = b output = model.predict(user_input, item_input, is_list=True) output = np.squeeze(output) rank = sum(output >= output[0]) if rank <= k: ndcgs.append(1 / np.log(rank + 1)) hit_ratio.append(1) else: ndcgs.append(0) hit_ratio.append(0) eval_ndcg = np.mean(ndcgs) eval_hr = np.mean(hit_ratio) print("HR:\t%f" % eval_hr) print("NDCG:\t%f" % eval_ndcg) ###Output HR: 0.488564 NDCG: 0.383339 ###Markdown 3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="GMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/GMF") model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="MLP", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/MLP") ###Output Took 566.8783325639997 seconds for training. ###Markdown 3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 655.1110815689999 seconds for training. ###Markdown 3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map2, "NDCG:\t%f" % eval_ndcg2, "Precision@K:\t%f" % eval_precision2, "Recall@K:\t%f" % eval_recall2, sep='\n') # Record results with papermill for tests sb.glue("map", eval_map) sb.glue("ndcg", eval_ndcg) sb.glue("precision", eval_precision) sb.glue("recall", eval_recall) sb.glue("map2", eval_map2) sb.glue("ndcg2", eval_ndcg2) sb.glue("precision2", eval_precision2) sb.glue("recall2", eval_recall2) ###Output _____no_output_____ ###Markdown 3.5.4 Delete pre-trained directory ###Code save_dir = ".pretrain" if os.path.exists(save_dir): shutil.rmtree(save_dir) print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir))) ###Output Did '.pretrain' exist?: False ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports ###Code import sys sys.path.append("../../") import os import shutil import papermill as pm import scrapbook as sb import pandas as pd import numpy as np import tensorflow as tf from reco_utils.common.timer import Timer from reco_utils.recommender.ncf.ncf_singlenode import NCF from reco_utils.recommender.ncf.dataset import Dataset as NCFDataset from reco_utils.dataset import movielens from reco_utils.dataset.python_splitters import python_chrono_split from reco_utils.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, recall_at_k, get_top_k_items) from reco_utils.common.constants import SEED as DEFAULT_SEED print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) print("Tensorflow version: {}".format(tf.__version__)) # top k items to recommend TOP_K = 10 # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # Model parameters EPOCHS = 100 BATCH_SIZE = 256 SEED = DEFAULT_SEED # Set None for non-deterministic results ###Output _____no_output_____ ###Markdown 1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `reco_utils/recommender/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`. ###Code df = movielens.load_pandas_df( size=MOVIELENS_DATA_SIZE, header=["userID", "itemID", "rating", "timestamp"] ) df.head() train, test = python_chrono_split(df, 0.75) ###Output _____no_output_____ ###Markdown 3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol. ###Code data = NCFDataset(train=train, test=test, seed=SEED) ###Output _____no_output_____ ###Markdown 3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 663.2377220259996 seconds for training. ###Markdown 3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe: ###Code predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)] for (_, row) in test.iterrows()] predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction']) predictions.head() ###Output _____no_output_____ ###Markdown 3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') ###Output MAP: 0.046273 NDCG: 0.190750 Precision@K: 0.173277 Recall@K: 0.096688 ###Markdown 3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item, ###Code k = TOP_K ndcgs = [] hit_ratio = [] for b in data.test_loader(): user_input, item_input, labels = b output = model.predict(user_input, item_input, is_list=True) output = np.squeeze(output) rank = sum(output >= output[0]) if rank <= k: ndcgs.append(1 / np.log(rank + 1)) hit_ratio.append(1) else: ndcgs.append(0) hit_ratio.append(0) eval_ndcg = np.mean(ndcgs) eval_hr = np.mean(hit_ratio) print("HR:\t%f" % eval_hr) print("NDCG:\t%f" % eval_ndcg) ###Output HR: 0.488564 NDCG: 0.383339 ###Markdown 3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="GMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/GMF") model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="MLP", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/MLP") ###Output Took 566.8783325639997 seconds for training. ###Markdown 3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 655.1110815689999 seconds for training. ###Markdown 3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map2, "NDCG:\t%f" % eval_ndcg2, "Precision@K:\t%f" % eval_precision2, "Recall@K:\t%f" % eval_recall2, sep='\n') # Record results with papermill for tests sb.glue("map", eval_map) sb.glue("ndcg", eval_ndcg) sb.glue("precision", eval_precision) sb.glue("recall", eval_recall) sb.glue("map2", eval_map2) sb.glue("ndcg2", eval_ndcg2) sb.glue("precision2", eval_precision2) sb.glue("recall2", eval_recall2) ###Output _____no_output_____ ###Markdown 3.5.4 Delete pre-trained directory ###Code save_dir = ".pretrain" if os.path.exists(save_dir): shutil.rmtree(save_dir) print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir))) ###Output Did '.pretrain' exist?: False ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports ###Code import sys import os import shutil import papermill as pm import scrapbook as sb import pandas as pd import numpy as np import tensorflow as tf tf.get_logger().setLevel('ERROR') # only show error messages from recommenders.utils.timer import Timer from recommenders.models.ncf.ncf_singlenode import NCF from recommenders.models.ncf.dataset import Dataset as NCFDataset from recommenders.datasets import movielens from recommenders.datasets.python_splitters import python_chrono_split from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, recall_at_k, get_top_k_items) from recommenders.utils.constants import SEED as DEFAULT_SEED print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) print("Tensorflow version: {}".format(tf.__version__)) # top k items to recommend TOP_K = 10 # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # Model parameters EPOCHS = 100 BATCH_SIZE = 256 SEED = DEFAULT_SEED # Set None for non-deterministic results ###Output _____no_output_____ ###Markdown 1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `recommenders/models/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`. ###Code df = movielens.load_pandas_df( size=MOVIELENS_DATA_SIZE, header=["userID", "itemID", "rating", "timestamp"] ) df.head() train, test = python_chrono_split(df, 0.75) ###Output _____no_output_____ ###Markdown 3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol. ###Code data = NCFDataset(train=train, test=test, seed=SEED) ###Output _____no_output_____ ###Markdown 3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 663.2377220259996 seconds for training. ###Markdown 3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe: ###Code predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)] for (_, row) in test.iterrows()] predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction']) predictions.head() ###Output _____no_output_____ ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports ###Code import sys import os import shutil import papermill as pm import scrapbook as sb import pandas as pd import numpy as np import tensorflow as tf tf.get_logger().setLevel('ERROR') # only show error messages from recommenders.utils.timer import Timer from recommenders.models.ncf.ncf_singlenode import NCF from recommenders.models.ncf.dataset import Dataset as NCFDataset from recommenders.datasets import movielens from recommenders.datasets.python_splitters import python_chrono_split from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, recall_at_k, get_top_k_items) from recommenders.utils.constants import SEED as DEFAULT_SEED print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) print("Tensorflow version: {}".format(tf.__version__)) # top k items to recommend TOP_K = 10 # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # Model parameters EPOCHS = 100 BATCH_SIZE = 256 SEED = DEFAULT_SEED # Set None for non-deterministic results ###Output _____no_output_____ ###Markdown 1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `recommenders/models/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`. ###Code df = movielens.load_pandas_df( size=MOVIELENS_DATA_SIZE, header=["userID", "itemID", "rating", "timestamp"] ) df.head() train, test = python_chrono_split(df, 0.75) ###Output _____no_output_____ ###Markdown 3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol. ###Code data = NCFDataset(train=train, test=test, seed=SEED) ###Output _____no_output_____ ###Markdown 3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 663.2377220259996 seconds for training. ###Markdown 3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe: ###Code predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)] for (_, row) in test.iterrows()] predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction']) predictions.head() ###Output _____no_output_____ ###Markdown 3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') ###Output MAP: 0.046273 NDCG: 0.190750 Precision@K: 0.173277 Recall@K: 0.096688 ###Markdown 3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item, ###Code k = TOP_K ndcgs = [] hit_ratio = [] for b in data.test_loader(): user_input, item_input, labels = b output = model.predict(user_input, item_input, is_list=True) output = np.squeeze(output) rank = sum(output >= output[0]) if rank <= k: ndcgs.append(1 / np.log(rank + 1)) hit_ratio.append(1) else: ndcgs.append(0) hit_ratio.append(0) eval_ndcg = np.mean(ndcgs) eval_hr = np.mean(hit_ratio) print("HR:\t%f" % eval_hr) print("NDCG:\t%f" % eval_ndcg) ###Output HR: 0.488564 NDCG: 0.383339 ###Markdown 3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="GMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/GMF") model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="MLP", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/MLP") ###Output Took 566.8783325639997 seconds for training. ###Markdown 3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 655.1110815689999 seconds for training. ###Markdown 3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map2, "NDCG:\t%f" % eval_ndcg2, "Precision@K:\t%f" % eval_precision2, "Recall@K:\t%f" % eval_recall2, sep='\n') # Record results with papermill for tests sb.glue("map", eval_map) sb.glue("ndcg", eval_ndcg) sb.glue("precision", eval_precision) sb.glue("recall", eval_recall) sb.glue("map2", eval_map2) sb.glue("ndcg2", eval_ndcg2) sb.glue("precision2", eval_precision2) sb.glue("recall2", eval_recall2) ###Output _____no_output_____ ###Markdown 3.5.4 Delete pre-trained directory ###Code save_dir = ".pretrain" if os.path.exists(save_dir): shutil.rmtree(save_dir) print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir))) ###Output Did '.pretrain' exist?: False ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Neural Collaborative Filtering (NCF)This notebook serves as an introduction to Neural Collaborative Filtering (NCF), which is an innovative algorithm based on deep neural networks to tackle the key problem in recommendation — collaborative filtering — on the basis of implicit feedback. 0 Global Settings and Imports ###Code import sys sys.path.append("../../") import os import shutil import papermill as pm import scrapbook as sb import pandas as pd import numpy as np import tensorflow as tf tf.get_logger().setLevel('ERROR') # only show error messages from reco_utils.common.timer import Timer from reco_utils.recommender.ncf.ncf_singlenode import NCF from reco_utils.recommender.ncf.dataset import Dataset as NCFDataset from reco_utils.dataset import movielens from reco_utils.dataset.python_splitters import python_chrono_split from reco_utils.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, recall_at_k, get_top_k_items) from reco_utils.common.constants import SEED as DEFAULT_SEED print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) print("Tensorflow version: {}".format(tf.__version__)) # top k items to recommend TOP_K = 10 # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # Model parameters EPOCHS = 100 BATCH_SIZE = 256 SEED = DEFAULT_SEED # Set None for non-deterministic results ###Output _____no_output_____ ###Markdown 1 Matrix factorization algorithmNCF is new neural matrix factorization model, which ensembles Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP) to unify the strengths of linearity of MF and non-linearity of MLP for modelling the user–item latent structures. NCF can be demonstrated as a framework for GMF and MLP, which is illustrated as below: This figure shows how to utilize latent vectors of items and users, and then how to fuse outputs from GMF Layer (left) and MLP Layer (right). We will introduce this framework and show how to learn the model parameters in following sections. 1.1 The GMF modelIn ALS, the ratings are modeled as follows:$$\hat { r } _ { u , i } = q _ { i } ^ { T } p _ { u }$$GMF introduces neural CF layer as the output layer of standard MF. In this way, MF can be easily generalizedand extended. For example, if we allow the edge weights of this output layer to be learnt from data without the uniform constraint, it will result in a variant of MF that allows varying importance of latent dimensions. And if we use a non-linear function for activation, it will generalize MF to a non-linear setting which might be more expressive than the linear MF model. GMF can be shown as follows:$$\hat { r } _ { u , i } = a _ { o u t } \left( h ^ { T } \left( q _ { i } \odot p _ { u } \right) \right)$$where $\odot$ is element-wise product of vectors. Additionally, ${a}_{out}$ and ${h}$ denote the activation function and edge weights of the output layer respectively. MF can be interpreted as a special case of GMF. Intuitively, if we use an identity function for aout and enforce h to be a uniform vector of 1, we can exactly recover the MF model. 1.2 The MLP modelNCF adopts two pathways to model users and items: 1) element-wise product of vectors, 2) concatenation of vectors. To learn interactions after concatenating of users and items latent features, the standard MLP model is applied. In this sense, we can endow the model a large level of flexibility and non-linearity to learn the interactions between $p_{u}$ and $q_{i}$. The details of MLP model are:For the input layer, there is concatention of user and item vectors:$$z _ { 1 } = \phi _ { 1 } \left( p _ { u } , q _ { i } \right) = \left[ \begin{array} { c } { p _ { u } } \\ { q _ { i } } \end{array} \right]$$So for the hidden layers and output layer of MLP, the details are:$$\phi _ { l } \left( z _ { l } \right) = a _ { o u t } \left( W _ { l } ^ { T } z _ { l } + b _ { l } \right) , ( l = 2,3 , \ldots , L - 1 )$$and:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \phi \left( z _ { L - 1 } \right) \right)$$where ${ W }_{ l }$, ${ b }_{ l }$, and ${ a }_{ out }$ denote the weight matrix, bias vector, and activation function for the $l$-th layer’s perceptron, respectively. For activation functions of MLP layers, one can freely choose sigmoid, hyperbolic tangent (tanh), and Rectifier (ReLU), among others. Because of binary data task, the activation function of the output layer is defined as sigmoid $\sigma(x)=\frac{1}{1+e^{-x}}$ to restrict the predicted score to be in (0,1). 1.3 Fusion of GMF and MLPTo provide more flexibility to the fused model, we allow GMF and MLP to learn separate embeddings, and combine the two models by concatenating their last hidden layer. We get $\phi^{GMF}$ from GMF:$$\phi _ { u , i } ^ { G M F } = p _ { u } ^ { G M F } \odot q _ { i } ^ { G M F }$$and obtain $\phi^{MLP}$ from MLP:$$\phi _ { u , i } ^ { M L P } = a _ { o u t } \left( W _ { L } ^ { T } \left( a _ { o u t } \left( \ldots a _ { o u t } \left( W _ { 2 } ^ { T } \left[ \begin{array} { c } { p _ { u } ^ { M L P } } \\ { q _ { i } ^ { M L P } } \end{array} \right] + b _ { 2 } \right) \ldots \right) \right) + b _ { L }\right.$$Lastly, we fuse output from GMF and MLP:$$\hat { r } _ { u , i } = \sigma \left( h ^ { T } \left[ \begin{array} { l } { \phi ^ { G M F } } \\ { \phi ^ { M L P } } \end{array} \right] \right)$$This model combines the linearity of MF and non-linearity of DNNs for modelling user–item latent structures. 1.4 Objective FunctionWe define the likelihood function as:$$P \left( \mathcal { R } , \mathcal { R } ^ { - } | \mathbf { P } , \mathbf { Q } , \Theta \right) = \prod _ { ( u , i ) \in \mathcal { R } } \hat { r } _ { u , i } \prod _ { ( u , j ) \in \mathcal { R } ^{ - } } \left( 1 - \hat { r } _ { u , j } \right)$$Where $\mathcal{R}$ denotes the set of observed interactions, and $\mathcal{ R } ^ { - }$ denotes the set of negative instances. $\mathbf{P}$ and $\mathbf{Q}$ denotes the latent factor matrix for users and items, respectively; and $\Theta$ denotes the model parameters. Taking the negative logarithm of the likelihood, we obatain the objective function to minimize for NCF method, which is known as [binary cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropy):$$L = - \sum _ { ( u , i ) \in \mathcal { R } \cup { \mathcal { R } } ^ { - } } r _ { u , i } \log \hat { r } _ { u , i } + \left( 1 - r _ { u , i } \right) \log \left( 1 - \hat { r } _ { u , i } \right)$$The optimization can be done by performing Stochastic Gradient Descent (SGD), which is described in the [Surprise SVD deep dive notebook](../02_model/surprise_svd_deep_dive.ipynb). Our SGD method is very similar to the SVD algorithm's. 2 TensorFlow implementation of NCFWe will use the MovieLens dataset, which is composed of integer ratings from 1 to 5.We convert MovieLens into implicit feedback, and evaluate under our *leave-one-out* evaluation protocol.You can check the details of implementation in `reco_utils/recommender/ncf` 3 TensorFlow NCF movie recommender 3.1 Load and split dataTo evaluate the performance of item recommendation, we adopted the leave-one-out evaluation.For each user, we held out his/her latest interaction as the test set and utilized the remaining data for training. We use `python_chrono_split` to achieve this. And since it is too time-consuming to rank all items for every user during evaluation, we followed the common strategy that randomly samples 100 items that are not interacted by the user, ranking the test item among the 100 items. Our test samples will be constructed by `NCFDataset`. ###Code df = movielens.load_pandas_df( size=MOVIELENS_DATA_SIZE, header=["userID", "itemID", "rating", "timestamp"] ) df.head() train, test = python_chrono_split(df, 0.75) ###Output _____no_output_____ ###Markdown 3.2 Functions of NCF Dataset Dataset Class for NCF, where important functions are:`negative_sampling()`, sample negative user & item pair for every positive instances, with parameter `n_neg`.`train_loader(batch_size, shuffle=True)`, generate training batch with `batch_size`, also we can set whether `shuffle` this training set.`test_loader()`, generate test batch by every positive test instance, (eg. \[1, 2, 1\] is a positive user & item pair in test set (\[userID, itemID, rating\] for this tuple). This function returns like \[\[1, 2, 1\], \[1, 3, 0\], \[1,6, 0\], ...\], ie. following our *leave-one-out* evaluation protocol. ###Code data = NCFDataset(train=train, test=test, seed=SEED) ###Output _____no_output_____ ###Markdown 3.3 Train NCF based on TensorFlowThe NCF has a lot of parameters. The most important ones are:`n_factors`, which controls the dimension of the latent space. Usually, the quality of the training set predictions grows with as n_factors gets higher.`layer_sizes`, sizes of input layer (and hidden layers) of MLP, input type is list.`n_epochs`, which defines the number of iteration of the SGD procedure.Note that both parameter also affect the training time.`model_type`, we can train single `"MLP"`, `"GMF"` or combined model `"NCF"` by changing the type of model.We will here set `n_factors` to `4`, `layer_sizes` to `[16,8,4]`, `n_epochs` to `100`, `batch_size` to 256. To train the model, we simply need to call the `fit()` method. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 663.2377220259996 seconds for training. ###Markdown 3.4 Prediction and Evaluation 3.4.1 PredictionNow that our model is fitted, we can call `predict` to get some `predictions`. `predict` returns an internal object Prediction which can be easily converted back to a dataframe: ###Code predictions = [[row.userID, row.itemID, model.predict(row.userID, row.itemID)] for (_, row) in test.iterrows()] predictions = pd.DataFrame(predictions, columns=['userID', 'itemID', 'prediction']) predictions.head() ###Output _____no_output_____ ###Markdown 3.4.2 Generic EvaluationWe remove rated movies in the top k recommendationsTo compute ranking metrics, we need predictions on all user, item pairs. We remove though the items already watched by the user, since we choose not to recommend them again. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') ###Output MAP: 0.046273 NDCG: 0.190750 Precision@K: 0.173277 Recall@K: 0.096688 ###Markdown 3.4.3 "Leave-one-out" EvaluationWe implement the functions to repoduce the leave-one-out evaluation protocol mentioned in original NCF paper.For each item in test data, we randomly samples 100 items that are not interacted by the user, ranking the test item among the 101 items (1 positive item and 100 negative items). The performance of a ranked list is judged by **Hit Ratio (HR)** and **Normalized Discounted Cumulative Gain (NDCG)**. Finally, we average the values of those ranked lists to obtain the overall HR and NDCG on test data.We truncated the ranked list at 10 for both metrics. As such, the HR intuitively measures whether the test item is present on the top-10 list, and the NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.**Note 1:** In exact leave-one-out evaluation protocol, we select only one of the latest items interacted with a user as test data for each user. But in this notebook, to compare with other algorithms, we select latest 25% dataset as test data. So this is an artificial "leave-one-out" evaluation only showing how to use `test_loader` and how to calculate metrics like the original paper. You can reproduce the real leave-one-out evaluation by changing the way of splitting data.**Note 2:** Because of sampling 100 negative items for each positive test item, ###Code k = TOP_K ndcgs = [] hit_ratio = [] for b in data.test_loader(): user_input, item_input, labels = b output = model.predict(user_input, item_input, is_list=True) output = np.squeeze(output) rank = sum(output >= output[0]) if rank <= k: ndcgs.append(1 / np.log(rank + 1)) hit_ratio.append(1) else: ndcgs.append(0) hit_ratio.append(0) eval_ndcg = np.mean(ndcgs) eval_hr = np.mean(hit_ratio) print("HR:\t%f" % eval_hr) print("NDCG:\t%f" % eval_ndcg) ###Output HR: 0.488564 NDCG: 0.383339 ###Markdown 3.5 Pre-trainingTo get better performance of NeuMF, we can adopt pre-training strategy. We first train GMF and MLP with random initializations until convergence. Then use their model parameters as the initialization for the corresponding parts of NeuMF’s parameters. Please pay attention to the output layer, where we concatenate weights of the two models with$$h ^ { N C F } \leftarrow \left[ \begin{array} { c } { \alpha h ^ { G M F } } \\ { ( 1 - \alpha ) h ^ { M L P } } \end{array} \right]$$where $h^{GMF}$ and $h^{MLP}$ denote the $h$ vector of the pretrained GMF and MLP model, respectively; and $\alpha$ is ahyper-parameter determining the trade-off between the two pre-trained models. We set $\alpha$ = 0.5. 3.5.1 Training GMF and MLP model`model.save`, we can set the `dir_name` to store the parameters of GMF and MLP ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="GMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/GMF") model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="MLP", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) model.save(dir_name=".pretrain/MLP") ###Output Took 566.8783325639997 seconds for training. ###Markdown 3.5.2 Load pre-trained GMF and MLP model for NeuMF`model.load`, we can set the `gmf_dir` and `mlp_dir` to store the parameters for NeuMF. ###Code model = NCF ( n_users=data.n_users, n_items=data.n_items, model_type="NeuMF", n_factors=4, layer_sizes=[16,8,4], n_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=1e-3, verbose=10, seed=SEED ) model.load(gmf_dir=".pretrain/GMF", mlp_dir=".pretrain/MLP", alpha=0.5) with Timer() as train_time: model.fit(data) print("Took {} seconds for training.".format(train_time.interval)) ###Output Took 655.1110815689999 seconds for training. ###Markdown 3.5.3 Compare with not pre-trained NeuMFYou can use beforementioned evaluation methods to evaluate the pre-trained `NCF` Model. Usually, we will find the performance of pre-trained NCF is better than the not pre-trained. ###Code with Timer() as test_time: users, items, preds = [], [], [] item = list(train.itemID.unique()) for user in train.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) preds.extend(list(model.predict(user, item, is_list=True))) all_predictions = pd.DataFrame(data={"userID": users, "itemID":items, "prediction":preds}) merged = pd.merge(train, all_predictions, on=["userID", "itemID"], how="outer") all_predictions = merged[merged.rating.isnull()].drop('rating', axis=1) print("Took {} seconds for prediction.".format(test_time.interval)) eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K) print("MAP:\t%f" % eval_map2, "NDCG:\t%f" % eval_ndcg2, "Precision@K:\t%f" % eval_precision2, "Recall@K:\t%f" % eval_recall2, sep='\n') # Record results with papermill for tests sb.glue("map", eval_map) sb.glue("ndcg", eval_ndcg) sb.glue("precision", eval_precision) sb.glue("recall", eval_recall) sb.glue("map2", eval_map2) sb.glue("ndcg2", eval_ndcg2) sb.glue("precision2", eval_precision2) sb.glue("recall2", eval_recall2) ###Output _____no_output_____ ###Markdown 3.5.4 Delete pre-trained directory ###Code save_dir = ".pretrain" if os.path.exists(save_dir): shutil.rmtree(save_dir) print("Did \'%s\' exist?: %s" % (save_dir, os.path.exists(save_dir))) ###Output Did '.pretrain' exist?: False
frameworks/tensorflow/Onnx_Tensorflow.ipynb
###Markdown Referecne * API Tutorial: https://github.com/onnx/tutorials* Convert TensorFlow models to ONNX: https://github.com/onnx/tensorflow-onnx* Tensorflow Backend and Frontend for ONNX: https://github.com/onnx/onnx-tensorflow Onnx Preparation ###Code !pip install onnx-tf !pip install tf2onnx import onnx import onnx_tf import tf2onnx import os import sys import tensorflow as tf import cv2 import numpy as np import json import codecs from collections import OrderedDict import matplotlib.pyplot as plt from PIL import Image !pip list | grep 'onnx' !pip list | grep 'onnx-tf' !pip list | grep 'tf2onnx' !pip list | grep 'tensorflow' !pip list | grep 'opencv-python' !pip list | grep 'numpy' !pip list | grep 'matplotlib' ###Output onnx 1.3.0 onnx-tf 1.2.0 tf2onnx 0.3.1 You are using pip version 18.0, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. onnx-tf 1.2.0 You are using pip version 18.0, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. tf2onnx 0.3.1 You are using pip version 18.0, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. tensorflow 1.11.0 You are using pip version 18.0, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. opencv-python 3.4.3.18 You are using pip version 18.0, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. numpy 1.15.2 You are using pip version 18.0, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. matplotlib 3.0.0 You are using pip version 18.0, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. ###Markdown TensorflowHere we use a classification model as the example (https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_1.0_224_frozen.tgz). About how to generate a frozen model, please refer to `freeze_graph.py`. ```shpython freeze_graph.py \ --input_graph output/graph.pbtxt \ --input_checkpoint ./output-16640 \ --output_graph /tmp/out \ --output_node_names ``` ###Code frozen_model_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/frozen_graph.pb' label_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/labels.txt' assert os.path.exists(frozen_model_path), "Tensorflow frozen model does not exist." assert os.path.exists(label_path), "Label file does not exist." ###Output _____no_output_____ ###Markdown Load the graph ###Code detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(frozen_model_path, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') ###Output _____no_output_____ ###Markdown Show all operations ###Code def show_operation_names(graph, count=10): with graph.as_default(): with tf.Session() as sess: opts = tf.get_default_graph().get_operations() for opt in opts[:count]: for output in opt.outputs: print(output.name) print("...") for opt in opts[-count:]: for output in opt.outputs: print(output.name) show_operation_names(detection_graph, 5) ###Output input:0 MobilenetV1/Conv2d_0/weights:0 MobilenetV1/Conv2d_0/weights/read:0 MobilenetV1/MobilenetV1/Conv2d_0/convolution:0 MobilenetV1/Conv2d_0/BatchNorm/beta:0 ... MobilenetV1/Predictions/Reshape/shape:0 MobilenetV1/Predictions/Reshape:0 MobilenetV1/Predictions/Softmax:0 MobilenetV1/Predictions/Shape:0 MobilenetV1/Predictions/Reshape_1:0 ###Markdown Load Label ###Code def loadLabel(labelPath): tmp = [] labels = OrderedDict() with codecs.open(labelPath,"r","utf-8") as fin: for line in fin: tmp = line.strip().split(':') labels[tmp[0]] = tmp[1] return labels labels = loadLabel(label_path) [(str(label) + ":" + labels[str(label)]) for label in range(10)] ###Output _____no_output_____ ###Markdown Inference ###Code def inference_single_image(image, graph): with graph.as_default(): with tf.Session() as sess: # handle input and output tensor opts = tf.get_default_graph().get_operations() all_tensorflow_names = { output.name for opt in opts for output in opt.outputs } tensor_dict = {} for key in ['MobilenetV1/Predictions/Reshape_1']: tensor_name = key + ':0' if tensor_name in all_tensorflow_names: tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name) # run for single image # input image_tensor = tf.get_default_graph().get_tensor_by_name('input:0') # inference output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)}) # convert data type float32 to appropriate output_dict['MobilenetV1/Predictions/Reshape_1'] = output_dict['MobilenetV1/Predictions/Reshape_1'] return output_dict def single_image(imagePath): image_path = imagePath if not os.path.exists(image_path): raise FileNotFoundError("{} not found.".format(image_path)) image = cv2.imread(image_path) image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC) image = image[:,:,::-1] plt.imshow(image) output_dict = inference_single_image(image, detection_graph) return output_dict #image_path = '/Users/jiankaiwang/devops/Fruit_Recognition/eval/qnap_fruit_val_00003.JPEG' image_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/test.jpg' output_dict = single_image(image_path) print(output_dict) cls_idx = int(np.argmax(output_dict['MobilenetV1/Predictions/Reshape_1'], axis=1)) print(cls_idx, output_dict['MobilenetV1/Predictions/Reshape_1'][0][cls_idx], labels[str(cls_idx)]) r, c = np.where(output_dict['MobilenetV1/Predictions/Reshape_1'] > 0.05) for idx in range(len(r)): print(output_dict['MobilenetV1/Predictions/Reshape_1'][r[idx]][c[idx]], labels[str(c[idx])]) ###Output 0.16483758 digital clock 0.27690268 fire screen, fireguard 0.39241496 shower curtain ###Markdown Tensorflow to Onnx ###Code from onnx_tf.frontend import tensorflow_graph_to_onnx_model with tf.gfile.GFile(frozen_model_path, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # ignore_unimplemented: onnx did't implementation the whole tensorflow operations onnx_model = tensorflow_graph_to_onnx_model(graph_def, \ "MobilenetV1/Predictions/Softmax", \ ignore_unimplemented=True, \ opset=0) with open("/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenet_v1_1.0_224/mobilenet.onnx", "wb") as fout: fout.write(onnx_model.SerializeToString()) ###Output /usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/handler.py:35: UserWarning: Pack doesn't have ONNX_OP. Please use Handler.onnx_op decorator to register ONNX_OP. cls.__name__)) /usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:38: UserWarning: Unknown op Undefined in domain `ai.onnx`. Can't check specification by ONNX. Please set should_check flag to False when call make_node method in handler. "ai.onnx")) /usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/handler.py:35: UserWarning: Unpack doesn't have ONNX_OP. Please use Handler.onnx_op decorator to register ONNX_OP. cls.__name__)) /usr/local/lib/python3.5/dist-packages/onnx_tf/common/exception.py:15: UserWarning: Rsqrt is not implemented. self._func(self.get_message(*args, **kwargs)) /usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/frontend_handler.py:106: UserWarning: Skipped check for Rsqrt. warnings.warn("Skipped check for {}.".format(node.op_type)) /usr/local/lib/python3.5/dist-packages/onnx_tf/common/exception.py:15: UserWarning: Relu6 is not implemented. self._func(self.get_message(*args, **kwargs)) /usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/frontend_handler.py:106: UserWarning: Skipped check for Relu6. warnings.warn("Skipped check for {}.".format(node.op_type)) /usr/local/lib/python3.5/dist-packages/onnx_tf/common/exception.py:15: UserWarning: DepthwiseConv2dNative is not implemented. self._func(self.get_message(*args, **kwargs)) /usr/local/lib/python3.5/dist-packages/onnx_tf/handlers/frontend_handler.py:106: UserWarning: Skipped check for DepthwiseConv2dNative. warnings.warn("Skipped check for {}.".format(node.op_type)) ###Markdown In the above warning message, opts `Rsqrt`, `Relu6`, `DepthwiseConv2dNative`, ... are not implemented by onnx so that the onnx model would loss the corresponding operations even if it is transformed. In onnx community, they don't plan to implement all of them because of too many operations in tensorflow. Inference from ONNX model Here we use the onnx official model downloaded from https://github.com/onnx/models/tree/master/models/image_classification/mobilenet. Convert it and infer a image. ###Code from onnx_tf.backend import prepare onnx_model_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/mobilenetv2-1.0.onnx' assert os.path.exists(onnx_model_path), "ONNX model is not found." model = onnx.load(onnx_model_path) # Load the ONNX file tf_rep = prepare(model) # Import the ONNX model to Tensorflow ###Output /usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:74: UserWarning: Unknown op ConstantLike in domain `ai.onnx`. handler.ONNX_OP, handler.DOMAIN or "ai.onnx")) /usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:71: UserWarning: Fail to get since_version of Expand in domain `` with max_inclusive_version=7. Set to 1. handler.ONNX_OP, handler.DOMAIN, version)) ###Markdown Show all operations ###Code print(tf_rep.inputs) # Input nodes to the model print('-----') print(tf_rep.outputs) # Output nodes from the model print('-----') print(tf_rep.tensor_dict) # All nodes in the model from IPython.display import display test_img = "/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/test.png" assert os.path.exists(test_img), "Test image is not found." img = Image.open(test_img).resize((224, 224)) display(img) img = np.asarray(img) img = np.swapaxes(img, 1, 2) img = np.swapaxes(img, 0, 1) img = img.reshape(1, 3, 224, 224) # in order to meet the requirement of input's shape print(img.shape) output_cls = tf_rep.run(img) print(output_cls["mobilenetv20_output_flatten0_reshape0"].shape) output_cls_idx = np.argmax(output_cls["mobilenetv20_output_flatten0_reshape0"], axis=1) print(output_cls_idx) print(output_cls["mobilenetv20_output_flatten0_reshape0"][0][output_cls_idx]) ###Output [470] [185.9536] ###Markdown Onnx to Tensorflow ###Code import onnx from onnx_tf.backend import prepare def onnx2pb(onnx_input_path, pb_output_path): onnx_model = onnx.load(onnx_input_path) # load onnx model tf_exp = prepare(onnx_model) # prepare tf representation tf_exp.export_graph(pb_output_path) # export the model onnx_input_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/mobilenetv2-1.0.onnx' pb_output_path = '/notebooks/Google_Drive_Devops_Sync/sophia/tmp/mobilenetv2-1.0/mobilenetv2-1.0.pb' onnx2pb(onnx_input_path, pb_output_path) ###Output /usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:74: UserWarning: Unknown op ConstantLike in domain `ai.onnx`. handler.ONNX_OP, handler.DOMAIN or "ai.onnx")) /usr/local/lib/python3.5/dist-packages/onnx_tf/common/handler_helper.py:71: UserWarning: Fail to get since_version of Expand in domain `` with max_inclusive_version=7. Set to 1. handler.ONNX_OP, handler.DOMAIN, version)) ###Markdown Test PB model from onnx ###Code onnx_pb_graph = tf.Graph() with onnx_pb_graph.as_default(): onnx_pb_graph_def = tf.GraphDef() with tf.gfile.GFile(pb_output_path, 'rb') as fid: serialized_graph = fid.read() onnx_pb_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(onnx_pb_graph_def, name='') show_operation_names(onnx_pb_graph, 5) def general_inference_single_image(image, graph, input_layer="input:0", output_layer=['MobilenetV1/Predictions/Reshape_1']): with graph.as_default(): with tf.Session() as sess: # handle input and output tensor opts = tf.get_default_graph().get_operations() all_tensorflow_names = { output.name for opt in opts for output in opt.outputs } tensor_dict = {} for key in output_layer: tensor_name = key + ':0' if tensor_name in all_tensorflow_names: tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name) assert len(tensor_dict.keys()) > 0, "No output layer is found." # run for single image # input image_tensor = tf.get_default_graph().get_tensor_by_name(input_layer) # inference output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)}) return output_dict def single_image(imagePath, used_graph, input_layer, output_layer): image_path = imagePath if not os.path.exists(image_path): raise FileNotFoundError("{} not found.".format(image_path)) image = cv2.imread(image_path) image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC) image = image[:,:,::-1] plt.imshow(image) print(image.shape) image = np.swapaxes(image, 1, 2) image = np.swapaxes(image, 0, 1) image = image.reshape(3, 224, 224) print(image.shape) output_dict = general_inference_single_image(image, used_graph, input_layer, output_layer) return output_dict onnx_pb_output_dict = single_image(test_img, onnx_pb_graph, "data:0", ["mobilenetv20_output_flatten0_reshape0"]) print(onnx_pb_output_dict["mobilenetv20_output_flatten0_reshape0"].shape) onnx_pb_output_dict_idx = np.argmax(onnx_pb_output_dict["mobilenetv20_output_flatten0_reshape0"], axis=1) print(onnx_pb_output_dict_idx) print(onnx_pb_output_dict["mobilenetv20_output_flatten0_reshape0"][0][onnx_pb_output_dict_idx]) ###Output (1, 1000) [470] [185.9536]
P_menindee_lakes.ipynb
###Markdown Curlew Sandpipers at the Menindee lakesThe Menindee Lakes is a chain of shallow ephemeral freshwater lakes connected to the Darling River to form a storage system. The lakes lie in the far west region of New South Wales, Australia, near the town of Menindee. In the past few years, the Darling River System along with the Menindee Lakes have been severely impacted by extreme drought conditions. This has resulted in particularly low and prolonged storage inflows, lower storage releases, and high evaporation rates. [Source: MDBA](https://www.mdba.gov.au/river-murray-system/running-river-murray/menindee-lakes-facts)The Menindee Lakes are an integral breeding ground for the hundreds bird, fish and reptile species in the Murray-Darling Basin, one of the richest ecologies in the country. Lake Menindee has completely dried up several times in the past few years. (Source: ABC News) Your task:You are an ecologist working for Wildaroo, a non-for-profit organisation that protects endangered species across Australia. You have been tasked with drafting an action plan to protect the Curlew Sandpipers, a species of birds commonly found in the Menindee lakes. The number of birds spotted has been declining in recent years due to the lack of water in the region. Wildaroo wants to start an intervention to increase the numbers of Sandpipers in the area.Your mission is to identify the areas that are most affected by drought and decide the best locations where your organisation should deploy birdfeeders to support the existing population. The ideal place should be close to wet areas, which is the natural habitat of Curlew Sandpipers. Load packagesYou start by loading the usual Python libraries to start working on this project. ###Code %matplotlib inline import datacube import matplotlib.pyplot as plt import numpy as np import xarray as xr ###Output _____no_output_____ ###Markdown Load dataYou request Water Observations from Space (WOfS) data for the first 6 months of the year. ###Code dc_menindee = datacube.Datacube(app="Menindee_Lakes") query = {'lat': (-32.55, -32.25), 'lon': (142.15, 142.45), 'time':('2019-01-01', '2019-06-01')} menindee_19 = dc_menindee.load(product='wofs_albers', **query) menindee_19 ###Output _____no_output_____ ###Markdown Interpreting WOfSYou want to understand what values are contained in this dataset and use `unique`, a numpy function that returns the unique values contained in an array: ###Code np.unique(menindee_19.water.data) ###Output _____no_output_____ ###Markdown There is a special function in DEA that displays the interpretation of mask bitflags in collection. So, you import the required library from the Datacube. ###Code from datacube.utils import masking masking.describe_variable_flags(menindee_19, with_pandas=True) ###Output _____no_output_____ ###Markdown Creating the mask and computing the median water contentWet areas are represented using the value `128`, as oposed to dry areas which are represented with the `0` value. You represent the median water content over the first 6 months. ###Code m2019 = menindee_19.water.where(menindee_19.water == 128).median(dim='time') m2019.plot() ###Output _____no_output_____ ###Markdown Replicate for 2001 ###Code query = {'lat': (-32.55, -32.25), 'lon': (142.15, 142.45), 'time':('2001-01-01', '2001-06-01')} menindee_01 = dc_menindee.load(product='wofs_albers', **query) menindee_01 m2001 = menindee_01.water.where(menindee_01.water == 128).median(dim='time') m2001.plot() ###Output _____no_output_____ ###Markdown Representing changes in water using a Semaphore plotYou want to create a plot to represent changes in the water between the years 2001 and 2019. The plot maps the intersection of water areas of both years with the following mapping:* If a pixel contains water in both years -> Blue* If a pixel had water before but doesn't have now -> Red (water lost)* If a pixel didn't have water but has water now -> Green (new water)* If a pixel never had water -> Transparent (RGB supports a 4th channel called Alpha for transparency)_Hint: This is an example of what a semaphore plot looks like for the years 2016-2019 at this location:_ Coding the semaphore plotFor creating this plot you'll need to:1. Transform `m2001` and `m2019` to boolean types (water/dry) using a threshold.2. Use numpy logical functions to compute the boolean values of the different components Red, Green Blue, Alpha. _int: use `np.logical_not()` and `np.logical_and()` to compute the components.3. Stack the components along a 3rd dimension making use of `np.dstack`4. Convert the resulting array into `float64` type5. Plot using matplotlib ###Code ### Your code goes here ###Output _____no_output_____
notebooks/Dataset Statistics.ipynb
###Markdown Dataset Statistics Check dataset existence ###Code # check data existence from pathlib import Path data_folder = Path.cwd().parent.joinpath("data/processed") def check_data_existence(folder): file_count = len(list(folder.glob("e*_ann.json"))) if file_count == 0: raise Exception("Processed Data does not exist.") else: print("{} files exist.".format(file_count)) check_data_existence(data_folder) ###Output 230 files exist. ###Markdown Read data to DataFrame ###Code import json import pandas as pd companies = [] sentences = [] entities = [] for f in data_folder.glob("e*_ann.json"): with f.open(encoding="utf-8") as j: d = json.load(j) # company infos company_info = d["header"] companies.append(company_info) # sentences company_code = company_info["document_id"] for s in d["sentences"]: line = { "company": company_code, "sentence": s["sentence"], "entities": len(s["opinions"]) } sentences.append(line) # entities for o in s["opinions"]: entities.append(o) companies = pd.DataFrame(companies) sentences = pd.DataFrame(sentences) entities = pd.DataFrame(entities) companies.head(5) sentences.head(5) entities.head(5) ###Output _____no_output_____ ###Markdown Show Statistics ###Code %matplotlib inline ###Output _____no_output_____ ###Markdown Data distribution ###Code translation = """ 水産・農林業 Fishery, Agriculture & Forestry 鉱業 Mining 建設業 Construction 食料品 Foods 繊維製品 Textiles and Apparels パルプ・紙 Pulp and Paper 化学 Chemicals 医薬品 Pharmaceutical 石油・石炭製品 Oil and Coal Products ゴム製品 Rubber Products ガラス・土石製品 Glass and Ceramics Products 鉄鋼 Iron and Steel 非鉄金属 Nonferrous Metals 金属製品 Metal Products 機械 Machinery 電気機器 Electric Appliances 輸送用機器 Transportation Equipment 精密機器 Precision Instruments その他製品 Other Products 電気・ガス業 Electric Power and Gas 陸運業 Land Transportation 海運業 Marine Transportation 空運業 Air Transportation 倉庫・運輸関連業 Warehousing and Harbor Transportation 情報・通信業 Information & Communication 卸売業 Wholesale Trade 小売業 Retail Trade 銀行業 Banks 証券、商品先物取引業 Securities and Commodities Futures 保険業 Insurance その他金融業 Other Financing Business 不動産業 Real Estate サービス業 Services """ translation_list = [t.split("\t") for t in translation.split("\n") if t] translation_list = dict(translation_list ) companies["category33_en"] = companies["category33"].apply(lambda c: translation_list[c]) companies.groupby(["category33"]).count()["edi_id"].sort_values(ascending=False).plot(kind="bar", figsize=(15,5)) companies.groupby(["category33_en"]).count()["edi_id"].sort_values(ascending=False).plot(kind="bar", figsize=(15,5)) ###Output _____no_output_____ ###Markdown Label distribution ###Code print("{} entities are annotated.".format(len(entities))) entities.groupby(["category"]).count()["target"].sort_values(ascending=False).plot(kind="bar") (entities.groupby(["category"]).count()["target"].sort_values(ascending=False).cumsum() * 100 / len(entities)).plot.line(secondary_y=True, style="g", rot=90) entities.groupby(["polarity"]).count()["target"].plot.bar() entities.groupby(["polarity", "category"]).count()["target"].divide(entities.groupby(["category"]).count()["target"]).unstack("polarity").plot.bar(stacked=True) ###Output _____no_output_____ ###Markdown Sentence distribution ###Code print("The sentences that have entities are {}.".format(len(sentences[sentences["entities"] > 0]))) print("The number of sentences are {}.".format(len(sentences))) sentences[sentences["entities"] > 0].groupby(["entities"]).count()["company"].plot.bar() ###Output _____no_output_____
ML_pandas/ML_pandas2.ipynb
###Markdown Starting with this code, loading in a CSV to a dataframe can be as simple as: ###Code import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('ZILLOW-C252_MSPFCO.csv') print(df.head()) ###Output Date Value 0 2016-07-31 807.9414 1 2016-06-30 797.5535 2 2016-05-31 809.5157 3 2016-04-30 818.5681 4 2016-03-31 800.9157 ###Markdown Notice that we have no decent index again. We can fix that like we did before doing: ###Code df=df.set_index('Date') ###Output _____no_output_____ ###Markdown Now, let's say we want to send this back to a CSV, we can do: ###Code df.to_csv('newcsv2.csv') ###Output _____no_output_____ ###Markdown With pandas we can quickly make a plot of the data. ###Code df.plot() plt.show() ###Output _____no_output_____ ###Markdown We notice that it is ploting from most recent data to the oldest data. This is not how we would like such data to be presented. We can quickly flip the dataset using panda indexing.Lets start by looking at the 25 most recent data point ###Code print(df[:25]) ###Output Value Date 2016-07-31 807.9414 2016-06-30 797.5535 2016-05-31 809.5157 2016-04-30 818.5681 2016-03-31 800.9157 2016-02-29 771.4921 2016-01-31 747.5905 2015-12-31 745.8960 2015-11-30 786.6922 2015-10-31 744.3815 2015-09-30 719.1234 2015-08-31 736.4989 2015-07-31 736.1203 2015-06-30 734.6554 2015-05-31 761.6679 2015-04-30 730.1186 2015-03-31 762.7666 2015-02-28 704.9493 2014-12-31 654.5523 2014-11-30 648.7748 2014-10-31 673.1492 2014-09-30 662.6267 2014-08-31 630.2345 2014-07-31 629.4594 2014-06-30 651.3413 ###Markdown We can notice that this is the value reported at the end of each month. Let's say we only care about the value reported at the end of every third month. We could use indexing like this: ###Code print(df[:25:3]) ###Output Value Date 2016-07-31 807.9414 2016-04-30 818.5681 2016-01-31 747.5905 2015-10-31 744.3815 2015-07-31 736.1203 2015-04-30 730.1186 2014-12-31 654.5523 2014-09-30 662.6267 2014-06-30 651.3413 ###Markdown Alternatively, we could look at the data provided with year spacing. ###Code print(df[::12]) ###Output Value Date 2016-07-31 807.9414 2015-07-31 736.1203 2014-06-30 651.3413 2013-06-30 615.2628 2012-06-30 541.8987 2011-06-30 484.4020 2010-06-30 543.6748 2009-06-30 572.6300 2008-06-30 590.9401 2007-06-30 594.4301 2006-06-30 652.5614 2005-06-30 624.5646 2004-06-30 549.3914 2003-06-30 400.8241 2002-06-30 360.6274 2001-06-30 321.8108 2000-06-30 270.7787 1999-06-30 244.5099 1998-06-30 217.9388 1997-05-31 192.2811 1996-05-31 174.5560 ###Markdown Finally, we could use this indexing to flip our data and created a sensable plot. ###Code df[::-1].plot() plt.show() ###Output _____no_output_____ ###Markdown We can extract this data in a variety of ways. This is a Pandas tutorial, so if we can use Pandas, we shall. Let's check out the read_html from Pandas. It's not being called "experimental" anymore, but I would still label this as expirimental. The standard and quality of the other IO modules is very high and dependable. This read_html is not quite up to par, but I still say it's very impressive and useful code, and just plain cool. The way it works is you just simply feed in a URL, and Pandas will extract the dataframe worthy date from tables into a dataframe. This means, unlike the other typical methods you will usually use, read_html ends up reading into a list of dataframes. This isn't the only one that is different, but it is different. First, in order to use read_html, we need html5lib. Open up cmd.exe or your terminal and do: pip install html5lib. Now, we can make our first attempt by doing: ###Code fiddy_states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states') print(fiddy_states) ###Output [ 0 1 2 3 4 0 Sl no. Abbreviations State Name Capital Became a State 1 1 AL Alabama Montgomery December 14, 1819 2 2 AK Alaska Juneau January 3, 1959 3 3 AZ Arizona Phoenix February 14, 1912 4 4 AR Arkansas Little Rock June 15, 1836 5 5 CA California Sacramento September 9, 1850 6 6 CO Colorado Denver August 1, 1876 7 7 CT Connecticut Hartford January 9, 1788 8 8 DE Delaware Dover December 7, 1787 9 9 FL Florida Tallahassee March 3, 1845 10 10 GA Georgia Atlanta January 2, 1788 11 11 HI Hawaii Honolulu August 21, 1959 12 12 ID Idaho Boise July 3, 1890 13 13 IL Illinois Springfield December 3, 1818 14 14 IN Indiana Indianapolis December 11, 1816 15 15 IA Iowa Des Moines December 28, 1846 16 16 KS Kansas Topeka January 29, 1861 17 17 KY Kentucky Frankfort June 1, 1792 18 18 LA Louisiana Baton Rouge April 30, 1812 19 19 ME Maine Augusta March 15, 1820 20 20 MD Maryland Annapolis April 28, 1788 21 21 MA Massachusetts Boston February 6, 1788 22 22 MI Michigan Lansing January 26, 1837 23 23 MN Minnesota Saint Paul May 11, 1858 24 24 MS Mississippi Jackson December 10, 1817 25 25 MO Missouri Jefferson City August 10, 1821 26 26 MT Montana Helena November 8, 1889 27 27 NE Nebraska Lincoln March 1, 1867 28 28 NV Nevada Carson City October 31, 1864 29 29 NH New Hampshire Concord June 21, 1788 30 30 NJ New Jersey Trenton December 18, 1787 31 31 NM New Mexico Santa Fe January 6, 1912 32 32 NY New York Albany July 26, 1788 33 33 NC North Carolina Raleigh November 21, 1789 34 34 ND North Dakota Bismarck November 2, 1889 35 35 OH Ohio Columbus March 1, 1803 36 36 OK Oklahoma Oklahoma City November 16, 1907 37 37 OR Oregon Salem February 14, 1859 38 38 PA Pennsylvania Harrisburg December 12, 1787 39 39 RI Rhode Island Providence May 19, 1790 40 40 SC South Carolina Columbia May 23, 1788 41 41 SD South Dakota Pierre November 2, 1889 42 42 TN Tennessee Nashville June 1, 1796 43 43 TX Texas Austin December 29, 1845 44 44 UT Utah Salt Lake City January 4, 1896 45 45 VT Vermont Montpelier March 4, 1791 46 46 VA Virginia Richmond June 25, 1788 47 47 WA Washington Olympia November 11, 1889 48 48 WV West Virginia Charleston June 20, 1863 49 49 WI Wisconsin Madison May 29, 1848 50 50 WY Wyoming Cheyenne July 10, 1890, 0 \ 0 vtePolitical divisions of the United States 1 States 2 Federal district 3 Insular areas 4 Outlying islands 1 0 NaN 1 Alabama Alaska Arizona Arkansas California Col... 2 Washington, D.C. 3 American Samoa Guam Northern Mariana Islands P... 4 Baker Island Howland Island Jarvis Island John... ] ###Markdown That's more output than I am going to post here, but you get the idea. At least some of this data is what we want, and it looks like the first dataframe is off to a good start. So let's do: ###Code print(fiddy_states[0]) ###Output 0 1 2 3 4 0 Sl no. Abbreviations State Name Capital Became a State 1 1 AL Alabama Montgomery December 14, 1819 2 2 AK Alaska Juneau January 3, 1959 3 3 AZ Arizona Phoenix February 14, 1912 4 4 AR Arkansas Little Rock June 15, 1836 5 5 CA California Sacramento September 9, 1850 6 6 CO Colorado Denver August 1, 1876 7 7 CT Connecticut Hartford January 9, 1788 8 8 DE Delaware Dover December 7, 1787 9 9 FL Florida Tallahassee March 3, 1845 10 10 GA Georgia Atlanta January 2, 1788 11 11 HI Hawaii Honolulu August 21, 1959 12 12 ID Idaho Boise July 3, 1890 13 13 IL Illinois Springfield December 3, 1818 14 14 IN Indiana Indianapolis December 11, 1816 15 15 IA Iowa Des Moines December 28, 1846 16 16 KS Kansas Topeka January 29, 1861 17 17 KY Kentucky Frankfort June 1, 1792 18 18 LA Louisiana Baton Rouge April 30, 1812 19 19 ME Maine Augusta March 15, 1820 20 20 MD Maryland Annapolis April 28, 1788 21 21 MA Massachusetts Boston February 6, 1788 22 22 MI Michigan Lansing January 26, 1837 23 23 MN Minnesota Saint Paul May 11, 1858 24 24 MS Mississippi Jackson December 10, 1817 25 25 MO Missouri Jefferson City August 10, 1821 26 26 MT Montana Helena November 8, 1889 27 27 NE Nebraska Lincoln March 1, 1867 28 28 NV Nevada Carson City October 31, 1864 29 29 NH New Hampshire Concord June 21, 1788 30 30 NJ New Jersey Trenton December 18, 1787 31 31 NM New Mexico Santa Fe January 6, 1912 32 32 NY New York Albany July 26, 1788 33 33 NC North Carolina Raleigh November 21, 1789 34 34 ND North Dakota Bismarck November 2, 1889 35 35 OH Ohio Columbus March 1, 1803 36 36 OK Oklahoma Oklahoma City November 16, 1907 37 37 OR Oregon Salem February 14, 1859 38 38 PA Pennsylvania Harrisburg December 12, 1787 39 39 RI Rhode Island Providence May 19, 1790 40 40 SC South Carolina Columbia May 23, 1788 41 41 SD South Dakota Pierre November 2, 1889 42 42 TN Tennessee Nashville June 1, 1796 43 43 TX Texas Austin December 29, 1845 44 44 UT Utah Salt Lake City January 4, 1896 45 45 VT Vermont Montpelier March 4, 1791 46 46 VA Virginia Richmond June 25, 1788 47 47 WA Washington Olympia November 11, 1889 48 48 WV West Virginia Charleston June 20, 1863 49 49 WI Wisconsin Madison May 29, 1848 50 50 WY Wyoming Cheyenne July 10, 1890 ###Markdown Yep, that's looking good, we want column 0. So, we want to iterate through column 0 of fiddy_states[0]. Remember, right now, fiddy_states is a list of dataframes, and fiddy_states[0] is the first dataframe. To reference column 0 then, we do fiddy_states[0][0]. One is a list index, which returns a dataframe. The other is a column within the dataframe. Next, we notice the first item in column 0 is the word "abbreviation," which we don't want. We can correct the header and indexing as so: ###Code ##Use only the first Table fiddy_states=fiddy_states[0] ###Correct the header fiddy_states.columns = fiddy_states.iloc[0] fiddy_states=fiddy_states.drop(fiddy_states.index[0]) ###Corrrect the indexing fiddy_states=fiddy_states.set_index('Sl no.') print(fiddy_states) ###Output 0 Abbreviations State Name Capital Became a State Sl no. 1 AL Alabama Montgomery December 14, 1819 2 AK Alaska Juneau January 3, 1959 3 AZ Arizona Phoenix February 14, 1912 4 AR Arkansas Little Rock June 15, 1836 5 CA California Sacramento September 9, 1850 6 CO Colorado Denver August 1, 1876 7 CT Connecticut Hartford January 9, 1788 8 DE Delaware Dover December 7, 1787 9 FL Florida Tallahassee March 3, 1845 10 GA Georgia Atlanta January 2, 1788 11 HI Hawaii Honolulu August 21, 1959 12 ID Idaho Boise July 3, 1890 13 IL Illinois Springfield December 3, 1818 14 IN Indiana Indianapolis December 11, 1816 15 IA Iowa Des Moines December 28, 1846 16 KS Kansas Topeka January 29, 1861 17 KY Kentucky Frankfort June 1, 1792 18 LA Louisiana Baton Rouge April 30, 1812 19 ME Maine Augusta March 15, 1820 20 MD Maryland Annapolis April 28, 1788 21 MA Massachusetts Boston February 6, 1788 22 MI Michigan Lansing January 26, 1837 23 MN Minnesota Saint Paul May 11, 1858 24 MS Mississippi Jackson December 10, 1817 25 MO Missouri Jefferson City August 10, 1821 26 MT Montana Helena November 8, 1889 27 NE Nebraska Lincoln March 1, 1867 28 NV Nevada Carson City October 31, 1864 29 NH New Hampshire Concord June 21, 1788 30 NJ New Jersey Trenton December 18, 1787 31 NM New Mexico Santa Fe January 6, 1912 32 NY New York Albany July 26, 1788 33 NC North Carolina Raleigh November 21, 1789 34 ND North Dakota Bismarck November 2, 1889 35 OH Ohio Columbus March 1, 1803 36 OK Oklahoma Oklahoma City November 16, 1907 37 OR Oregon Salem February 14, 1859 38 PA Pennsylvania Harrisburg December 12, 1787 39 RI Rhode Island Providence May 19, 1790 40 SC South Carolina Columbia May 23, 1788 41 SD South Dakota Pierre November 2, 1889 42 TN Tennessee Nashville June 1, 1796 43 TX Texas Austin December 29, 1845 44 UT Utah Salt Lake City January 4, 1896 45 VT Vermont Montpelier March 4, 1791 46 VA Virginia Richmond June 25, 1788 47 WA Washington Olympia November 11, 1889 48 WV West Virginia Charleston June 20, 1863 49 WI Wisconsin Madison May 29, 1848 50 WY Wyoming Cheyenne July 10, 1890 ###Markdown Now we can issolate any part of this table. We are interested in the Abbrevations and can look at them using the header. ###Code print(fiddy_states["Abbreviations"]) ###Output Sl no. 1 AL 2 AK 3 AZ 4 AR 5 CA 6 CO 7 CT 8 DE 9 FL 10 GA 11 HI 12 ID 13 IL 14 IN 15 IA 16 KS 17 KY 18 LA 19 ME 20 MD 21 MA 22 MI 23 MN 24 MS 25 MO 26 MT 27 NE 28 NV 29 NH 30 NJ 31 NM 32 NY 33 NC 34 ND 35 OH 36 OK 37 OR 38 PA 39 RI 40 SC 41 SD 42 TN 43 TX 44 UT 45 VT 46 VA 47 WA 48 WV 49 WI 50 WY Name: Abbreviations, dtype: object
src/awesome_python/algorithm/staic_learn/第15章 奇异值分解/15.SVD.ipynb
###Markdown 第15章 奇异值分解 1.矩阵的奇异值分解是指将$m \times n$实矩阵$A$表示为以下三个实矩阵乘积形式的运算$$A = U \Sigma V ^ { T }$$其中$U$是$m$阶正交矩阵,$V$是$n$阶正交矩阵,$\Sigma$是$m \times n$矩形对角矩阵$$\Sigma = \operatorname { diag } ( \sigma _ { 1 } , \sigma _ { 2 } , \cdots , \sigma _ { p } ) , \quad p = \operatorname { min } \{ m , n \}$$其对角线元素非负,且满足$\sigma _ { 1 } \geq \sigma _ { 2 } \geq \cdots \geq \sigma _ { p } \geq 0$2.任意给定一个实矩阵,其奇异值分解一定存在,但并不唯一。3.奇异值分解包括紧奇异值分解和截断奇异值分解。紧奇异值分解是与原始矩阵等秩的奇异值分解,截断奇异值分解是比原始矩阵低秩的奇异值分解。4.奇异值分解有明确的几何解释。奇异值分解对应三个连续的线性变换:一个旋转变换,一个缩放变换和另一个旋转变换第一个和第三个旋转变换分别基于空间的标准正交基进行。5.设矩阵$A$的奇异值分解为$A = U \Sigma V ^ { T }$,则有$$\left. \begin{array} { l } { A ^ { T } A = V ( \Sigma ^ { T } \Sigma ) V ^ { T } } \\ { A A ^ { T } = U ( \Sigma \Sigma ^ { T } ) U ^ { T } } \end{array} \right.$$即对称矩阵$A^TA$和$AA^T$的特征分解可以由矩阵$A$的奇异值分解矩阵表示。6.矩阵$A$的奇异值分解可以通过求矩阵$A^TA$的特征值和特征向量得到:$A^TA$的特征向量构成正交矩阵$V$的列;从$A^TA$的特征值$\lambda _ { j }$的平方根得到奇异值$\sigma _ { i } $,即$$\sigma _ { j } = \sqrt { \lambda _ { j } } , \quad j = 1,2 , \cdots , n$$对其由大到小排列,作为对角线元素,构成对角矩阵$\Sigma$;求正奇异值对应的左奇异向量,再求扩充的$A^T$的标准正交基,构成正交矩阵$U$的列。7.矩阵$A = [ a _ { i j } ] _ { m \times n }$的弗罗贝尼乌斯范数定义为$$\| A \| _ { F } = ( \sum _ { i = 1 } ^ { m } \sum _ { j = 1 } ^ { n } ( a _ { i j } ) ^ { 2 } ) ^ { \frac { 1 } { 2 } }$$在秩不超过$k$的$m \times n$矩阵的集合中,存在矩阵$A$的弗罗贝尼乌斯范数意义下的最优近似矩阵$X$。秩为$k$的截断奇异值分解得到的矩阵$A_k$能够达到这个最优值。奇异值分解是弗罗贝尼乌斯范数意义下,也就是平方损失意义下的矩阵最优近似。8.任意一个实矩阵$A$可以由其外积展开式表示$$A = \sigma _ { 1 } u _ { 1 } v _ { 1 } ^ { T } + \sigma _ { 2 } u _ { 2 } v _ { 2 } ^ { T } + \cdots + \sigma _ { n } u _ { n } v _ { n } ^ { T }$$其中$u _ { k } v _ { k } ^ { T }$为$m \times n$矩阵,是列向量$u _ { k }$和行向量$v _ { k } ^ { T }$的外积,$\sigma _ { k }$为奇异值,$u _ { k } , v _ { k } ^ { T } , \sigma _ { k }$通过矩阵$A$的奇异值分解得到。 ---任意一个$m$ x $n$ 矩阵,都可以表示为三个矩阵的乘积(因子分解)形式,分别是$m$阶**正交矩阵**,由**降序**排列的**非负**的对角线元素组成的$m$ x $n$ 矩形对角矩阵,和$n$阶**正交矩阵**,称为该矩阵的奇异值分解。矩阵的奇异值分解一定存在,但不唯一。 奇异值分解可以看作是矩阵数据压缩的一种方法,即用因子分解的方式近似地表示原始矩阵,这种近似是在平方损失意义下的最优近似。 矩阵的奇异值分解是指,将一个非零的$m$ x $n$ **实矩阵**$A, A\in R^{m\times n}$表示为一下三个实矩阵乘积形式的运算: $A = U\Sigma V^{T}$, 其中 $U$ 是 $m$ 阶正交矩阵, $V$ 是 $n$ 阶正交矩阵,$\Sigma$ 是由降序排列的非负的对角线元素组成的$m$ x $n$矩形对角矩阵。称为$A$ 的奇异值分解。 $U$的列向量称为左奇异向量, $V$的列向量称为右奇异向量。 奇异值分解不要求矩阵$A$ 是方阵,事实上矩阵的奇异值分解可以看作方阵的对角化的推广。 **紧奇奇异值分解**是与原始矩阵等秩的奇异值分解, **截断奇异值分解**是比原始矩阵低秩的奇异值分解。 --------------------------------------------------------------------------------------------------------------------------------- ###Code # 实现奇异值分解, 输入一个numpy矩阵,输出 U, sigma, V # https://zhuanlan.zhihu.com/p/54693391 import numpy as np #基于矩阵分解的结果,复原矩阵 def rebuildMatrix(U, sigma, V): a = np.dot(U, sigma) a = np.dot(a, np.transpose(V)) return a #基于特征值的大小,对特征值以及特征向量进行排序。倒序排列 def sortByEigenValue(Eigenvalues, EigenVectors): index = np.argsort(-1 * Eigenvalues) Eigenvalues = Eigenvalues[index] EigenVectors = EigenVectors[:, index] return Eigenvalues, EigenVectors #对一个矩阵进行奇异值分解 def SVD(matrixA, NumOfLeft=None): #NumOfLeft是要保留的奇异值的个数,也就是中间那个方阵的宽度 #首先求transpose(A)*A matrixAT_matrixA = np.dot(np.transpose(matrixA), matrixA) #然后求右奇异向量 lambda_V, X_V = np.linalg.eig(matrixAT_matrixA) lambda_V, X_V = sortByEigenValue(lambda_V, X_V) #求奇异值 sigmas = lambda_V sigmas = list(map(lambda x: np.sqrt(x) if x > 0 else 0, sigmas)) #python里很小的数有时候是负数 sigmas = np.array(sigmas) sigmasMatrix = np.diag(sigmas) if NumOfLeft == None: rankOfSigmasMatrix = len(list(filter(lambda x: x > 0, sigmas))) #大于0的特征值的个数 else: rankOfSigmasMatrix = NumOfLeft sigmasMatrix = sigmasMatrix[0:rankOfSigmasMatrix, :] #特征值为0的奇异值就不要了 #计算右奇异向量 X_U = np.zeros( (matrixA.shape[0], rankOfSigmasMatrix)) #初始化一个右奇异向量矩阵,这里直接进行裁剪 for i in range(rankOfSigmasMatrix): X_U[:, i] = np.transpose(np.dot(matrixA, X_V[:, i]) / sigmas[i]) #对右奇异向量和奇异值矩阵进行裁剪 X_V = X_V[:, 0:NumOfLeft] sigmasMatrix = sigmasMatrix[0:rankOfSigmasMatrix, 0:rankOfSigmasMatrix] #print(rebuildMatrix(X_U, sigmasMatrix, X_V)) return X_U, sigmasMatrix, X_V A = np.array([[1, 1, 1, 2, 2], [0, 0, 0, 3, 3], [0, 0, 0, 1, 1], [1, 1, 1, 0, 0], [2, 2, 2, 0, 0], [5, 5, 5, 0, 0], [1, 1, 1, 0, 0]]) A X_U, sigmasMatrix, X_V = SVD(A, NumOfLeft=3) X_U sigmasMatrix X_V # rebuild from U, sigma, V rebuildMatrix(X_U, sigmasMatrix, X_V) ###Output _____no_output_____ ###Markdown same as A. ###Code from PIL import Image import requests from io import BytesIO url = 'https://images.mulberry.com/i/mulberrygroup/RL5792_000N651_L/small-hampstead-deep-amber-small-classic-grain-ayers/small-hampstead-deep-amber-small-classic-grain-ayers?v=3&w=304' response = requests.get(url) img = Image.open(BytesIO(response.content)) img ###Output _____no_output_____
nbs/12a_examples.glue-benchmark-sweeps.ipynb
###Markdown GLUE: hyperparameter search ###Code from transformers import AutoModelForSequenceClassification from fastai.text.all import * from fastai.callback.wandb import * from fasthugs.learner import TransLearner from fasthugs.data import TransformersTextBlock, TextGetter, get_splits from datasets import load_dataset, concatenate_datasets import wandb import gc %env WANDB_ENTITY=fastai_community %env WANDB_PROJECT=glue-benchmark ###Output env: WANDB_ENTITY=fastai_community env: WANDB_PROJECT=glue-benchmark ###Markdown Setup Let's define main settings for the run in one place: ###Code ds_name = 'glue' model_name = "distilroberta-base" max_len = 512 bs = 32 val_bs = bs*2 n_epoch = 4 lr = 2e-5 opt_func = Adam diff_lr_decay_factor = 0 GLUE_TASKS = ["cola", "mnli", "mnli-mm", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"] def validate_task(): assert task in GLUE_TASKS from fastai.metrics import MatthewsCorrCoef, F1Score, PearsonCorrCoef, SpearmanCorrCoef glue_metrics = { 'cola':[MatthewsCorrCoef()], 'sst2':[accuracy], 'mrpc':[F1Score(), accuracy], 'stsb':[PearsonCorrCoef(), SpearmanCorrCoef()], 'qqp' :[F1Score(), accuracy], 'mnli':[accuracy], 'qnli':[accuracy], 'rte' :[accuracy], 'wnli':[accuracy], } glue_textfields = { 'cola':['sentence', None], 'sst2':['sentence', None], 'mrpc':['sentence1', 'sentence2'], 'stsb':['sentence1', 'sentence2'], 'qqp' :['question1', 'question2'], 'mnli':['premise', 'hypothesis'], 'qnli':['question', 'sentence'], 'rte' :['sentence1', 'sentence2'], 'wnli':['sentence1', 'sentence2'], } glue_num_labels = {'mnli':3, 'stsb':1} #collapse_input def layerwise_splitter(model): emb = L(model.base_model.embeddings) layers = L(model.base_model.encoder.layer.children()) clf = L(m for m in list(model.children())[1:] if params(m)) groups = emb + layers + clf return groups.map(params) ###Output _____no_output_____ ###Markdown Running a GLUE task ###Code task = 'sst2' validate_task() ds = load_dataset(ds_name, task) valid_ = 'validation-matched' if task=='mnli' else 'validation' len(ds['train']), len(ds[valid_]) train_idx, valid_idx = get_splits(ds, valid=valid_) train_ds = concatenate_datasets([ds['train'], ds[valid_]]) train_ds[0] ###Output _____no_output_____ ###Markdown Here I use number of characters a proxy for length of tokenized text to speed up `dls` creation. ###Code lens = train_ds.map(lambda s: {'len': sum([len(s[i]) for i in glue_textfields[task] if i])}, remove_columns=train_ds.column_names, num_proc=2, keep_in_memory=True) train_lens = lens.select(train_idx)['len'] valid_lens = lens.select(valid_idx)['len'] dblock = DataBlock(blocks = [TransformersTextBlock(pretrained_model_name=model_name), CategoryBlock()], get_x=TextGetter(*glue_textfields[task]), get_y=ItemGetter('label'), splitter=IndexSplitter(valid_idx)) %%time dl_kwargs=[{'res':train_lens}, {'val_res':valid_lens}] dls = dblock.dataloaders(train_ds, bs=bs, val_bs=val_bs, dl_kwargs=dl_kwargs) dls.show_batch(max_n=4) ###Output _____no_output_____ ###Markdown Single run ###Code WANDB_NAME = f'{ds_name}-{task}-{model_name}' GROUP = f'{ds_name}-{task}-{model_name}-{lr:.0e}' if diff_lr_decay_factor: GROUP += f"diff_lr_{diff_lr_decay_factor}" NOTES = f'finetuning {model_name} with {opt_func.__name__} lr={lr:.0e}' TAGS =[model_name, ds_name, opt_func.__name__] #hide_output wandb.init(reinit=True, project="fasthugs", entity="fastai_community", name=WANDB_NAME, group=GROUP, notes=NOTES, tags=TAGS); #hide_output model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=glue_num_labels.get(task, 2)) metrics = glue_metrics[task] learn = TransLearner(dls, model, metrics=metrics, opt_func=opt_func, splitter=layerwise_splitter).to_fp16() # learn.summary() if diff_lr_decay_factor != 0: k = len(layerwise_splitter(model)) lr = slice(lr*diff_lr_decay_factor**k,lr) metric_to_monitor = metrics[0].name if isinstance(metrics[0], Metric) else metrics[0].__name__ cbs = [WandbCallback(log_preds=False, log_model=False), SaveModelCallback(monitor=metric_to_monitor)] learn.fit_one_cycle(4, lr, cbs=cbs) learn.show_results() # test_dl = dls.test_dl(ds['test']) # preds = learn.get_preds(dl=test_dl) del learn gc.collect() torch.cuda.empty_cache() ###Output _____no_output_____ ###Markdown Sweeps ###Code wandb.login() def train(): with wandb.init() as run: cfg = run.config model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=glue_num_labels.get(task, 2)) metrics = glue_metrics[task] k = len(layerwise_splitter(model)) if cfg.diff_lr_decay_factor: lr = slice(cfg.lr*cfg.diff_lr_decay_factor**k,cfg.lr) learn = TransLearner(dls, model, metrics=metrics, opt_func=Adam, splitter=layerwise_splitter).to_fp16() learn.fit_one_cycle(n_epoch, cfg.lr, wd=cfg.wd, cbs=[WandbCallback(log_preds=False, log_model=False)]) del learn gc.collect() torch.cuda.empty_cache() torch.cuda.synchronize() metrics = glue_metrics[task] metric_to_monitor = metrics[0].name if isinstance(metrics[0], Metric) else metrics[0].__name__ sweep_name = f"glue-{task}-sweep" sweep_config = { "project":"glue-benchmark", "entity": "fastai_cimmunity", "name": sweep_name, "method": "random", "parameters": { "lr": {"values":[1e-5,2e-5,3e-5,5e-5, 1e-4, 3e-4]}, "wd": {"values":[0.,1e-2,5e-2]}, "diff_lr_decay_factor":{"values":[0., 0.9, 0.8, 0.7, 0.6]} }, "metric":{"goal": "maximise", "name": metric_to_monitor}, "early_terminate": {"type": "hyperband", "s": 2, "eta": 3, "max_iter": 40} } sweep_id = wandb.sweep(sweep_config, project='glue-benchmark', entity="fastai_community") wandb.agent(sweep_id, function=train) wandb.finish() ###Output _____no_output_____ ###Markdown Another task example: MultiNLI ###Code task = 'mnli' validate_task() ds = load_dataset(ds_name, task) train_idx, valid_idx = get_splits(ds, valid='validation_matched') train_ds = concatenate_datasets([ds['train'], ds['validation_matched']]) train_ds[0] lens = train_ds.map(lambda s: {'len': len(s['premise'])+len(s['hypothesis'])}, remove_columns=train_ds.column_names, num_proc=4, keep_in_memory=True) train_lens = lens.select(train_idx)['len'] valid_lens = lens.select(valid_idx)['len'] dblock = DataBlock(blocks = [TransformersTextBlock(pretrained_model_name=model_name), CategoryBlock()], get_x=TextGetter('premise', 'hypothesis'), get_y=ItemGetter('label'), splitter=IndexSplitter(valid_idx)) %%time dl_kwargs=[{'res':train_lens}, {'val_res':valid_lens}] dls = dblock.dataloaders(train_ds, bs=bs, val_bs=val_bs, dl_kwargs=dl_kwargs, num_workers=4) dls.show_batch(max_n=4) lr=5e-5 wd=0.01 WANDB_NAME = f'{ds_name}-{task}-{model_name}' GROUP = f'{ds_name}-{task}-{model_name}-{lr:.0e}' NOTES = f'finetuning {model_name} with Adam lr={lr:.0e}' TAGS =[model_name, ds_name, 'adam'] #hide_output wandb.init(reinit=True, project="glue-benchmark", entity="fastai_community", name=WANDB_NAME, group=GROUP, notes=NOTES, tags=TAGS); #hide_output model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3) metrics = glue_metrics[task] learn = TransLearner(dls, model, metrics=metrics).to_fp16() metric_to_monitor = metrics[0].name if isinstance(metrics[0], Metric) else metrics[0].__name__ cbs = [WandbCallback(log_preds=False, log_model=False)] # cbs += [SaveModelCallback(monitor=metric_to_monitor)] learn.fit_one_cycle(4, lr, wd=wd, cbs=cbs) learn.show_results() valid_mm_dl = dls.test_dl(ds['validation_mismatched'], with_labels=True) learn.validate(dl=valid_mm_dl) ###Output _____no_output_____
estar/examples/ESTAR_synth_wave.ipynb
###Markdown Задаём функцию, позволяющую оценивать значений слагаемых уравнения. Это исполнение использует тензоры значений токенов на сетке и через поэлементное умножение тензоры, получает значения слагаемого. Далее, тензор преобразуется в вектор для использования в качестве признака в регрессии. ###Code def derivative_evaluator(term, normalize, eval_params): ''' Example of the evaluator of token values, appropriate for case of derivatives with pre-calculated values, defined on grid, that take form of tensors Parameters ---------- term : term.Term, or numpy.ndarray Object for term of the equation, or its gene, for which the evaluation is done; necessary for the evaluation. eval_params : dict Dictionary, containing parameters of the evaluator: in this example, they are 'token matrices' : list/numpy.martix of token (derivatives) values on the grid, 'parameter_indexes' : dictionary of orders of token parameters during the encoding. In simplest case of only power parameter: 'parameter_indexes':{'power':0}. Returns ---------- value : numpy.ndarray Vector of the evaluation of the token values, that shall be used as target, or feature during the LASSO regression. ''' assert 'token_matrices' in eval_params and 'parameter_indexes' in eval_params if type(term) == Term: term = term.gene token_matrices = eval_params['token_matrices'] value = np.copy(token_matrices[0]) for var_idx in np.arange(term.shape[0]): power = (term[var_idx + eval_params['parameter_indexes']['power']]) value *= eval_params['token_matrices'][int(var_idx / (float(eval_params['parameter_indexes']['power']+1)))] ** int(power) if normalize: value = normalize_ts(value) value = value.reshape(np.prod(value.shape)) return value ###Output _____no_output_____ ###Markdown Проводим препроцессинг данных и вычисляем значения производных на сетке. Вычисления довольно трудоёмкие => распараллелены, но всё равно могут занять относительно много времени, особенно если считать на ПК. Результаты препроцессинга сохраняем в отдельный файл, чтобы использовать при повторных запусках основного алгоритма. Это экономит время! Для примера используется решение волнового уравнения.В этом примере мы рассмотрим задачу поиска уравнения по синтетическим данным, полученным из решения волнового уравнения: $\frac{\partial^2 u}{\partial t^2} = \frac{\partial^2 u}{\partial x_1^2} + \frac{\partial^2 u}{\partial x_2^2}$,которое отражает эволюцию некоторой величины $u$ в двумерной области. Данные для этого эксперимента можно взять по ссылке: https://drive.google.com/open?id=1joW0zTwkSGLJVpyWxDqoSMzTvRItX24J ###Code op_file_name = path + '/Preprocessing/Derivatives.npy' filename = path + '/Preprocessing/wave_HP.npy' poolsize = 4 if 'npy' in filename: field = np.load(filename) else: shape = (201, 201, 201) field = np.loadtxt(filename) field = field.reshape(shape) field = np.transpose(field, (2, 0, 1)) Preprocess_derivatives(field, op_file_name, mp_poolsize=poolsize) ###Output Executing on grid with uniform nodes: Start: 2020-03-03 18:33:33.448178 ; Finish: 2020-03-03 19:19:32.216328 Preprocessing runtime: 0:45:58.768150 ###Markdown Загружаем значения в узлах сетки для исходной функции и её производных; из них формируем тензор для дальнейшего использования. Также задаём границы области, по которому будет вычисляться уравнение (в примере - обрезаем начало и конец временного ряда + по 15 элементов с каждой границы, чтобы использовать более "качественные" производные) ###Code u_initial = np.load('Preprocessing/Wave_HP/wave_HP.npy') # Пропишите путь к файлу с исходным полем u_initial = np.transpose(u_initial, (2, 0, 1)) print(u_initial.shape) derivatives = np.load('Preprocessing/Wave_HP/Derivatives.npy') # Пропишите путь к файлу с производными variables = np.ones((2 + derivatives.shape[1], ) + u_initial.shape) variables[1, :] = u_initial for i_outer in range(0, derivatives.shape[1]): variables[i_outer+2] = derivatives[:, i_outer].reshape(variables[i_outer+2].shape) skipped_elems = 15 timeslice = (skipped_elems, -skipped_elems) variables = variables[:, timeslice[0]:timeslice[1], skipped_elems:-skipped_elems, skipped_elems:-skipped_elems] ###Output (101, 101, 101) ###Markdown Получаем названия токенов для производных, используя функцию **Define_Derivatives()**. Она получает названия токенов в порядке: 1, u, $\frac{\partial u}{\partial x_1}$, $\frac{\partial^2 u}{\partial x_1^2}$, ... , $\frac{\partial u}{\partial x_2}$, $\frac{\partial^2 u}{\partial x_2^2}$, ...Далее зададим параметры для токенов: в этом примере единственным параметром является степень токена, используемого в слагаемом. Например, если 'power' = 2 для токена $\frac{\partial u}{\partial x_1}$, то в слагаемом будет $ (\frac{\partial u}{\partial x_1})^2 $. Также зададим слагаемые, которые будут в каждом уравнении: константу и исходную функцию. ###Code token_names = Define_Derivatives(u_initial.ndim, max_order = 2) print(token_names) token_parameters = collections.OrderedDict([('power', (0, 3))]) basic_terms = [{'1':{'power':1}}, {'1':{'power':1}, 'u':{'power':1}}] ###Output ('1', 'u', 'du/dx1', 'd^2u/dx1^2', 'du/dx2', 'd^2u/dx2^2', 'du/dx3', 'd^2u/dx3^2') ###Markdown Задаём объект для обучения уравнения: ###Code Trainer = Equation_Trainer(tokens = token_names, token_params = token_parameters, evaluator = derivative_evaluator, evaluator_params = {'token_matrices':variables, 'parameter_indexes':{'power':0}}, basic_terms = basic_terms) ###Output _____no_output_____ ###Markdown Так как мы не знаем, какой параметр $\alpha$ - коэффициент регуляризации в LASSO, позволяет получить правильную структуру уравнения, мы проводим запуск по сетке из гиперпараметров модели. Сетка в примере строится только по одному параметру ($\alpha$), но в общем виде допустимо задавать сетки сразу по нескольким. Для остальные гиперпараметров модели задаём соответствующие значения. В случае, если каждый параметр задаётся одним значением, то их допустимо подавать сразу в метод Train. ###Code Trainer.Parameters_grid(('alpha', 'a_proc', 'r_crossover', 'r_param_mutation', 'r_mutation', 'mut_chance', 'pop_size', 'eq_len', 'max_factors'), ((0.01, 0.16, 4), 0.2, 0.6, 0.8, 0.5, 0.8, 20, 6, 2)) ###Output _____no_output_____ ###Markdown Запускаем обучение и получаем искомое уравнение в символьной форме ###Code Trainer.Train(epochs = 50) ###Output Using parameters from grid Achieved best fitness: 0.0050783488205190215 with alpha = 0.01 Discovered equation: - { d^2u/dx1^2 : {'power': 1.0}} + -0.0001963014668564108 * { u : {'power': 1.0}} + 1.003430266231036 * { d^2u/dx2^2 : {'power': 1.0}} + 0.000465301995916656 * { du/dx1 : {'power': 1.0}} + 1.003430266231037 * { d^2u/dx3^2 : {'power': 1.0}} = 0 Achieved best fitness: 0.004485088661452463 with alpha = 0.06 Discovered equation: - { d^2u/dx1^2 : {'power': 1.0}} + 1.0013594947213769 * { d^2u/dx3^2 : {'power': 1.0}} + -0.00020581754275531233 * { u : {'power': 1.0}} + 1.001359494721376 * { d^2u/dx2^2 : {'power': 1.0}} = 0 Achieved best fitness: 0.0038249581919527357 with alpha = 0.10999999999999999 Discovered equation: - { d^2u/dx1^2 : {'power': 1.0}} + 1.0935985825740213 * { d^2u/dx2^2 : {'power': 1.0}} + 1.0935985825740218 * { d^2u/dx3^2 : {'power': 1.0}} = 0 Achieved best fitness: 0.003611457117138705 with alpha = 0.16 Discovered equation: - { d^2u/dx1^2 : {'power': 1.0}} + 1.0935985825740213 * { d^2u/dx2^2 : {'power': 1.0}} + 1.0935985825740218 * { d^2u/dx3^2 : {'power': 1.0}} = 0 ###Markdown Для основной части исследуемого интервала $\alpha$ мы получаем структуры уравнения в формате$\frac{\partial^2 u}{\partial t^2} = a_1 \frac{\partial^2 u}{\partial x_1^2} + a_2 \frac{\partial^2 u}{\partial x_2^2}$, где коэффициенты $a_1$ и $a_2$ отличаются от исходных, равных 1, в силу погрешностей, в большей степени связанных с численным решением исходного дифференциального уравнения, а также с погрешностями вычисления производных. При слишком низких значениях $\alpha$ вектор коэффициентов уравнения недостаточно разрежен, и в уравнении присутствуют лишние слагаемые, хотя и обладают незначительными весами. ###Code Trainer.Parameters_grid(('alpha', 'a_proc', 'r_crossover', 'r_param_mutation', 'r_mutation', 'mut_chance', 'pop_size', 'eq_len', 'max_factors'), ((0.3, 0.4, 2), 0.2, 0.6, 0.8, 0.5, 0.8, 20, 6, 2)) Trainer.Train(epochs = 50) ###Output Using parameters from grid Achieved best fitness: 0.00227045231823162 with alpha = 0.3 Discovered equation: - { du/dx1 : {'power': 1.0} d^2u/dx1^2 : {'power': 1.0}} + -0.0004731879609027912 * { u : {'power': 1.0}} + -0.6650816595841387 * { d^2u/dx2^2 : {'power': 1.0}} + -0.6650816595841381 * { d^2u/dx3^2 : {'power': 1.0}} = 0 Achieved best fitness: 0.002505082208075159 with alpha = 0.4 Discovered equation: - { d^2u/dx1^2 : {'power': 1.0}} + 1.0935985825740213 * { d^2u/dx2^2 : {'power': 1.0}} + 1.0935985825740218 * { d^2u/dx3^2 : {'power': 1.0}} = 0 ###Markdown Можно отметить, что в силу стохастического характера построения уравнений для описания системы и протекающей эволюции, возможны случаи, когда алгоритм не сходится к лучшему решениюб а остается в некотором локальном оптимуме функции приспособленности. Так в случае уравнения для $\alpha = 0.3$, получается неправильное уравнение.Индикатором этого является сравнительно низкое значение функции приспособленности, отражающей "качество" уравнени, которое превышается "правильными" структурами даже при больших значениях коэффициента регуляризации. Детали задания фитнес-функции представлены в разделе wiki на github-странице проекта и в соответстующих статьях. Для избежания таких локальных оптимумов, алгоритм рекомендуется запускать несколько раз с одними и теми же данными и выбором значений, соответсвующих максимуму функции приспособленности. ###Code Trainer.Parameters_grid(('alpha', 'a_proc', 'r_crossover', 'r_param_mutation', 'r_mutation', 'mut_chance', 'pop_size', 'eq_len', 'max_factors'), ((2, 3, 2), 0.2, 0.6, 0.8, 0.5, 0.8, 20, 6, 2)) Trainer.Train(epochs = 50) ###Output Using parameters from grid Achieved best fitness: 0.0006686093848979459 with alpha = 2.0 Discovered equation: - { du/dx1 : {'power': 1.0}} = 0 Achieved best fitness: 0.0006686093848979459 with alpha = 3.0 Discovered equation: - { du/dx1 : {'power': 1.0}} = 0
Atividades/Regression/Ex-Regression.ipynb
###Markdown Loading Data ###Code # https://developers.google.com/machine-learning/crash-course/california-housing-data-description dataset_path = "../datasets/housing/housing.csv" housing = pd.read_csv(dataset_path) housing.head() ###Output _____no_output_____ ###Markdown Prepare the data for ML Shuffle and Split dataset into training & test ###Code from sklearn.model_selection import StratifiedShuffleSplit housing["income_cat"] = np.ceil(housing["median_income"] / 1.5) housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True) split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing["income_cat"]): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] for set_ in (strat_train_set, strat_test_set): set_.drop("income_cat", axis=1, inplace=True) print(len(strat_train_set), "train +", len(strat_test_set), "test") ###Output 16512 train + 4128 test ###Markdown Create Train set and Labels ###Code # train housing = strat_train_set.drop("median_house_value", axis=1) # drop labels for training set housing_labels = strat_train_set["median_house_value"].copy() ###Output _____no_output_____ ###Markdown Create pipelines ###Code from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import Imputer from combined_attributes import CombinedAttributesAdder num_pipeline = Pipeline([ ('imputer', Imputer(strategy="median")), ('attribs_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler()), ]) from future_encoders import ColumnTransformer from future_encoders import OneHotEncoder num_attribs = list(housing.drop('ocean_proximity', axis=1)) cat_attribs = ["ocean_proximity"] full_pipeline = ColumnTransformer([ ("num", num_pipeline, num_attribs), ("cat", OneHotEncoder(), cat_attribs), ]) ###Output _____no_output_____ ###Markdown Prepare data ###Code # prepared train housing_prepared = full_pipeline.fit_transform(housing) housing_prepared.shape housing_labels.shape housing_prepared from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] some_data_prepared = full_pipeline.transform(some_data) pred = lin_reg.predict(some_data_prepared) print("Predictions: ", pred) print("Labels: ", list(some_labels)) from sklearn.metrics import mean_squared_error housing_predictions = lin_reg.predict(housing_prepared) lin_mse = mean_squared_error(housing_labels, housing_predictions) lin_rmse = np.sqrt(lin_mse) lin_rmse from sklearn.metrics import mean_absolute_error lin_mae = mean_absolute_error(housing_labels, housing_predictions) lin_mae from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor(random_state = 42) tree_reg.fit(housing_prepared, housing_labels) housing_predictions = tree_reg.predict(housing_prepared) tree_mse = mean_squared_error(housing_labels, housing_predictions) tree_rmse = np.sqrt(tree_mse) tree_rmse def displayscores(scores): print("Score: ", scores) print("Mean: ", scores.mean()) print("Standard: ", scores.std()) from sklearn.model_selection import cross_val_score lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) lin_rmse_scores = np.sqrt(-lin_scores) displayscores(lin_rmse_scores) tree_scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) tree_rmse_scores = np.sqrt(-tree_scores) displayscores(tree_rmse_scores) from sklearn.ensemble import RandomForestRegressor forest_reg = RandomForestRegressor(random_state=42) forest_reg.fit(housing_prepared, housing_labels) housing_predictions = forest_reg.predict(housing_prepared) forest_mse = mean_squared_error(housing_labels, housing_predictions) forest_rmse = np.sqrt(forest_mse) forest_rmse forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) forest_rmse_scores = np.sqrt(-forest_scores) displayscores(forest_rmse_scores) from sklearn.model_selection import GridSearchCV param_grid = [ {'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]}, {'bootstrap': [False], 'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]} ] forest_reg = RandomForestRegressor(random_state=42) grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring="neg_mean_squared_error", return_train_score=True) grid_search.fit(housing_prepared, housing_labels) grid_search.best_params_ grid_search.best_estimator_ cvres = grid_search.cv_results_ for mean_score, params in zip(cvres['mean_test_score'], cvres['params']): print(np.sqrt(-mean_score), params) X_test = strat_test_set.drop("median_house_value", axis=1) y_test = strat_test_set["median_house_value"].copy() X_test_prepared = full_pipeline.transform(X_test) final_model = grid_search.best_estimator_ final_predictions = final_model.predict(X_test_prepared) final_predictions final_mse = mean_squared_error(y_test, final_predictions) final_rmse = np.sqrt(final_mse) final_rmse ###Output _____no_output_____
workshop/notebooks/02a_image_manipulation_nibabel.ipynb
###Markdown Using Python for neuroimaging dataThe primary goal of this section is to become familiar with loading, modifying, saving, and visualizing neuroimages in Python. A secondary goal is to develop a conceptual understanding of the data structures involved, to facilitate diagnosing problems in data or analysis pipelines.To these ends, we'll be exploring two libraries: [nibabel](http://nipy.org/nibabel/) and [nilearn](https://nilearn.github.io/). Each of these projects has excellent documentation. While this should get you started, it is well worth your time to look through these sites.This notebook only covers nibabel, see the notebook [`02b_image_manipulation_nilearn.ipynb`](02b_image_manipulation_nilearn.ipynb) for more information about nilearn. NibabelNibabel is a low-level Python library that gives access to a variety of imaging formats, with a particular focus on providing a common interface to the various **volumetric** formats produced by scanners and used in common neuroimaging toolkits. - NIfTI-1 - NIfTI-2 - SPM Analyze - FreeSurfer .mgh/.mgz files - Philips PAR/REC - Siemens ECAT - DICOM (limited support)It also supports **surface** file formats - GIFTI - FreeSurfer surfaces, labels and annotations**Connectivity** - CIFTI-2**Tractography** - TrackViz .trk filesAnd a number of related formats.**Note:** Almost all of these can be loaded through the `nibabel.load` interface. Setup ###Code # Image settings from nilearn import plotting import pylab as plt %matplotlib inline import numpy as np import nibabel as nb ###Output _____no_output_____ ###Markdown Loading and inspecting images in `nibabel` ###Code # Load a functional image of subject 01 img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz') # Let's look at the header of this file print(img) ###Output _____no_output_____ ###Markdown This data-affine-header structure is common to volumetric formats in nibabel, though the details of the header will vary from format to format. Access specific parametersIf you're interested in specific parameters, you can access them very easily, as the following examples show. ###Code data = img.get_fdata() data.shape affine = img.affine affine header = img.header['pixdim'] header ###Output _____no_output_____ ###Markdown Note that in the `'pixdim'` above contains the voxel resolution (`4., 4., 3.999`), as well as the TR (`2.5`). AsideWhy not just `img.data`? Working with neuroimages can use a lot of memory, so nibabel works hard to be memory efficient. If it can read some data while leaving the rest on disk, it will. `img.get_fdata()` reflects that it's doing some work behind the scenes. Quirk - `img.get_fdata_dtype()` shows the type of the data on disk - `img.get_fdata().dtype` shows the type of the data that you're working withThese are not always the same, and not being clear on this [has caused problems](https://github.com/nipy/nibabel/issues/490). Further, modifying one does not update the other. This is especially important to keep in mind later when saving files. ###Code print((data.dtype, img.get_data_dtype())) ###Output _____no_output_____ ###Markdown DataThe data is a simple numpy array. It has a shape, it can be sliced and generally manipulated as you would any array. ###Code plt.imshow(data[:, :, data.shape[2] // 2, 0].T, cmap='Greys_r') print(data.shape) ###Output _____no_output_____ ###Markdown Exercise 1:Load the T1 data from subject 1. Plot the image using the same volume indexing as before. Also, print the shape of the data. ###Code # Work on solution here t1 = nb.load('/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz') data = t1.get_fdata() plt.imshow(data[:, :, data.shape[2] // 2].T, cmap='Greys_r') print(data.shape) ###Output _____no_output_____ ###Markdown `img.orthoview()`Nibabel has its own viewer, which can be accessed through **`img.orthoview()`**. This viewer scales voxels to reflect their size, and labels orientations.**Warning:** `img.orthoview()` may not work properly on OS X. Sidenote to plotting with `orthoview()`As with other figures, f you initiated `matplotlib` with `%matplotlib inline`, the output figure will be static. If you use `orthoview()` in a normal IPython console, it will create an interactive window, and you can click to select different slices, similar to `mricron`. To get a similar experience in a jupyter notebook, use `%matplotlib notebook`. But don't forget to close figures afterward again or use `%matplotlib inline` again, otherwise, you cannot plot any other figures. ###Code %matplotlib notebook img.orthoview() ###Output _____no_output_____ ###Markdown AffineThe affine is a 4 x 4 numpy array. This describes the transformation from the voxel space (indices [i, j, k]) to the reference space (distance in mm (x, y, z)).It can be used, for instance, to discover the voxel that contains the origin of the image: ###Code x, y, z, _ = np.linalg.pinv(affine).dot(np.array([0, 0, 0, 1])).astype(int) print("Affine:") print(affine) print print("Center: ({:d}, {:d}, {:d})".format(x, y, z)) ###Output _____no_output_____ ###Markdown The affine also encodes the axis orientation and voxel sizes: ###Code nb.aff2axcodes(affine) nb.affines.voxel_sizes(affine) nb.aff2axcodes(affine) nb.affines.voxel_sizes(affine) t1.orthoview() ###Output _____no_output_____ ###Markdown HeaderThe header is a nibabel structure that stores all of the metadata of the image. You can query it directly, if necessary: ###Code t1.header['descrip'] ###Output _____no_output_____ ###Markdown But it also provides interfaces for the more common information, such as `get_zooms`, `get_xyzt_units`, `get_qform`, `get_sform`). ###Code t1.header.get_zooms() t1.header.get_xyzt_units() t1.header.get_qform() t1.header.get_sform() ###Output _____no_output_____ ###Markdown Normally, we're not particularly interested in the header or the affine. But it's important to know they're there. And especially, to remember to copy them when making new images, so that derivatives stay aligned with the original image. `nib-ls`Nibabel comes packaged with a command-line tool to print common metadata about any (volumetric) neuroimaging format nibabel supports. By default, it shows (on-disk) data type, dimensions and voxel sizes. ###Code !nib-ls /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz ###Output _____no_output_____ ###Markdown We can also inspect header fields by name, for instance, `descrip`: ###Code !nib-ls -H descrip /data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz ###Output _____no_output_____ ###Markdown Creating and saving imagesSuppose we want to save space by rescaling our image to a smaller datatype, such as an unsigned byte. To do this, we first need to take the data, change its datatype and save this new data in a new NIfTI image with the same header and affine as the original image. ###Code # First, we need to load the image and get the data img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz') data = img.get_fdata() # Now we force the values to be between 0 and 255 # and change the datatype to unsigned 8-bit rescaled = ((data - data.min()) * 255. / (data.max() - data.min())).astype(np.uint8) # Now we can save the changed data into a new NIfTI file new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header) nb.save(new_img, '/tmp/rescaled_image.nii.gz') ###Output _____no_output_____ ###Markdown Let's look at the datatypes of the data array, as well as of the nifti image: ###Code print((new_img.get_fdata().dtype, new_img.get_data_dtype())) ###Output _____no_output_____ ###Markdown That's not optimal. Our data array has the correct type, but the on-disk format is determined by the header, so saving it with `img.header` will not do what we want. Also, let's take a look at the size of the original and new file. ###Code orig_filename = img.get_filename() !du -hL /tmp/rescaled_image.nii.gz $orig_filename ###Output _____no_output_____ ###Markdown So, let's correct the header issue with the `set_data_dtype()` function: ###Code img.set_data_dtype(np.uint8) # Save image again new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header) nb.save(new_img, '/tmp/rescaled_image.nii.gz') print((new_img.get_fdata().dtype, new_img.get_data_dtype())) ###Output _____no_output_____ ###Markdown Perfect! Now the data types are correct. And if we look at the size of the image we even see that it got a bit smaller. ###Code !du -hL /tmp/rescaled_image.nii.gz ###Output _____no_output_____ ###Markdown Using Python for neuroimaging dataThe primary goal of this section is to become familiar with loading, modifying, saving, and visualizing neuroimages in Python. A secondary goal is to develop a conceptual understanding of the data structures involved, to facilitate diagnosing problems in data or analysis pipelines.To these ends, we'll be exploring two libraries: [nibabel](http://nipy.org/nibabel/) and [nilearn](https://nilearn.github.io/). Each of these projects has excellent documentation. While this should get you started, it is well worth your time to look through these sites.This notebook only covers nibabel, see the notebook [`02b_image_manipulation_nilearn.ipynb`](02b_image_manipulation_nilearn.ipynb) for more information about nilearn. NibabelNibabel is a low-level Python library that gives access to a variety of imaging formats, with a particular focus on providing a common interface to the various **volumetric** formats produced by scanners and used in common neuroimaging toolkits. - NIfTI-1 - NIfTI-2 - SPM Analyze - FreeSurfer .mgh/.mgz files - Philips PAR/REC - Siemens ECAT - DICOM (limited support)It also supports **surface** file formats - GIFTI - FreeSurfer surfaces, labels and annotations**Connectivity** - CIFTI-2**Tractography** - TrackViz .trk filesAnd a number of related formats.**Note:** Almost all of these can be loaded through the `nibabel.load` interface. Setup ###Code # Image settings from nilearn import plotting import pylab as plt %matplotlib inline import numpy as np import nibabel as nb ###Output _____no_output_____ ###Markdown Loading and inspecting images in `nibabel` ###Code # Load a functional image of subject 01 img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz') # Let's look at the header of this file print(img) ###Output _____no_output_____ ###Markdown This data-affine-header structure is common to volumetric formats in nibabel, though the details of the header will vary from format to format. Access specific parametersIf you're interested in specific parameters, you can access them very easily, as the following examples show. ###Code data = img.get_fdata() data.shape affine = img.affine affine header = img.header['pixdim'] header ###Output _____no_output_____ ###Markdown Note that in the `'pixdim'` above contains the voxel resolution (`4., 4., 3.999`), as well as the TR (`2.5`). AsideWhy not just `img.data`? Working with neuroimages can use a lot of memory, so nibabel works hard to be memory efficient. If it can read some data while leaving the rest on disk, it will. `img.get_fdata()` reflects that it's doing some work behind the scenes. Quirk - `img.get_fdata_dtype()` shows the type of the data on disk - `img.get_fdata().dtype` shows the type of the data that you're working withThese are not always the same, and not being clear on this [has caused problems](https://github.com/nipy/nibabel/issues/490). Further, modifying one does not update the other. This is especially important to keep in mind later when saving files. ###Code print((data.dtype, img.get_data_dtype())) ###Output _____no_output_____ ###Markdown DataThe data is a simple numpy array. It has a shape, it can be sliced and generally manipulated as you would any array. ###Code plt.imshow(data[:, :, data.shape[2] // 2, 0].T, cmap='Greys_r') print(data.shape) ###Output _____no_output_____ ###Markdown Exercise 1:Load the T1 data from subject 1. Plot the image using the same volume indexing as before. Also, print the shape of the data. ###Code t1 = nb.load('/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz') data = t1.get_fdata() plt.imshow(data[:, :, data.shape[2] // 2].T, cmap='Greys_r') print(data.shape) # Work on solution here ###Output _____no_output_____ ###Markdown `img.orthoview()`Nibabel has its own viewer, which can be accessed through **`img.orthoview()`**. This viewer scales voxels to reflect their size, and labels orientations.**Warning:** `img.orthoview()` may not work properly on OS X. Sidenote to plotting with `orthoview()`As with other figures, f you initiated `matplotlib` with `%matplotlib inline`, the output figure will be static. If you use `orthoview()` in a normal IPython console, it will create an interactive window, and you can click to select different slices, similar to `mricron`. To get a similar experience in a jupyter notebook, use `%matplotlib notebook`. But don't forget to close figures afterward again or use `%matplotlib inline` again, otherwise, you cannot plot any other figures. ###Code %matplotlib notebook img.orthoview() ###Output _____no_output_____ ###Markdown AffineThe affine is a 4 x 4 numpy array. This describes the transformation from the voxel space (indices [i, j, k]) to the reference space (distance in mm (x, y, z)).It can be used, for instance, to discover the voxel that contains the origin of the image: ###Code x, y, z, _ = np.linalg.pinv(affine).dot(np.array([0, 0, 0, 1])).astype(int) print("Affine:") print(affine) print print("Center: ({:d}, {:d}, {:d})".format(x, y, z)) ###Output _____no_output_____ ###Markdown The affine also encodes the axis orientation and voxel sizes: ###Code nb.aff2axcodes(affine) nb.affines.voxel_sizes(affine) nb.aff2axcodes(affine) nb.affines.voxel_sizes(affine) t1.orthoview() ###Output _____no_output_____ ###Markdown HeaderThe header is a nibabel structure that stores all of the metadata of the image. You can query it directly, if necessary: ###Code t1.header['descrip'] ###Output _____no_output_____ ###Markdown But it also provides interfaces for the more common information, such as `get_zooms`, `get_xyzt_units`, `get_qform`, `get_sform`). ###Code t1.header.get_zooms() t1.header.get_xyzt_units() t1.header.get_qform() t1.header.get_sform() ###Output _____no_output_____ ###Markdown Normally, we're not particularly interested in the header or the affine. But it's important to know they're there. And especially, to remember to copy them when making new images, so that derivatives stay aligned with the original image. `nib-ls`Nibabel comes packaged with a command-line tool to print common metadata about any (volumetric) neuroimaging format nibabel supports. By default, it shows (on-disk) data type, dimensions and voxel sizes. ###Code !nib-ls /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz ###Output _____no_output_____ ###Markdown We can also inspect header fields by name, for instance, `descrip`: ###Code !nib-ls -H descrip /data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz ###Output _____no_output_____ ###Markdown Creating and saving imagesSuppose we want to save space by rescaling our image to a smaller datatype, such as an unsigned byte. To do this, we first need to take the data, change its datatype and save this new data in a new NIfTI image with the same header and affine as the original image. ###Code # First, we need to load the image and get the data img = nb.load('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz') data = img.get_fdata() # Now we force the values to be between 0 and 255 # and change the datatype to unsigned 8-bit rescaled = ((data - data.min()) * 255. / (data.max() - data.min())).astype(np.uint8) # Now we can save the changed data into a new NIfTI file new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header) nb.save(new_img, '/tmp/rescaled_image.nii.gz') ###Output _____no_output_____ ###Markdown Let's look at the datatypes of the data array, as well as of the nifti image: ###Code print((new_img.get_fdata().dtype, new_img.get_data_dtype())) ###Output _____no_output_____ ###Markdown That's not optimal. Our data array has the correct type, but the on-disk format is determined by the header, so saving it with `img.header` will not do what we want. Also, let's take a look at the size of the original and new file. ###Code orig_filename = img.get_filename() !du -hL /tmp/rescaled_image.nii.gz $orig_filename ###Output _____no_output_____ ###Markdown So, let's correct the header issue with the `set_data_dtype()` function: ###Code img.set_data_dtype(np.uint8) # Save image again new_img = nb.Nifti1Image(rescaled, affine=img.affine, header=img.header) nb.save(new_img, '/tmp/rescaled_image.nii.gz') print((new_img.get_fdata().dtype, new_img.get_data_dtype())) ###Output _____no_output_____ ###Markdown Perfect! Now the data types are correct. And if we look at the size of the image we even see that it got a bit smaller. ###Code !du -hL /tmp/rescaled_image.nii.gz ###Output _____no_output_____
ExerciciosDeDecisao - Feito.ipynb
###Markdown Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo. ###Code num1 = input("Digite um valor ") if int(num1) > 0: print("O numero {0} é positivo".format(num1)) else : print("O numero {0} é negativo".format(num1)) ###Output _____no_output_____ ###Markdown Faça um Programa que verifique se uma letra digitada é "F" ou "M". Conforme a letra escrever: F - Feminino, M - Masculino, Sexo Inválido. ###Code letra = input("Digite o sexo F-Feminino M-Masculino ").upper() if letra =="F": print("O sexo é Feminino") elif letra == "M": print("O sexo é Masculino") else : print("O sexo é invalido") ###Output _____no_output_____ ###Markdown Faça um Programa que verifique se uma letra digitada é vogal ou consoante. ###Code vogal = ('A','E','I','O','U') print(vogal) letra = input("Digite uma letra ").upper() if int(len(letra)) != 1: print("você digitou uma palavra") else: if letra != vogal[i]: print("A letra é uma consoante") else: print("A letra é uma vogal") ###Output _____no_output_____ ###Markdown Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar: * A mensagem "Aprovado", se a média alcançada for maior ou igual a sete; * Mensagem "Reprovado", se a média for menor do que sete; * A mensagem "Aprovado com Distinção", se a média for igual a dez. ###Code var1 = input("Digite sua nota referente a p1 ") var2 = input("Digite sua nota referente a p2 ") media = (int(var1) + int(var2)) / 2 if media >= 7 and media < 10: print("Você foi Aprovado") elif media == 10: print("Você foi Aprovado com a maior nota meus parabêns") elif media < 7: print("Você foi Reprovado") else: print("Você digitou algum numero errado sua media foi {0} os numeros digitados são {1} {2}".format(media,var1,var2)) ###Output _____no_output_____ ###Markdown Faça um Programa que leia três números e mostre o maior deles. ###Code var1 = input("Digite um numero ") var2 = input("Digite qualquer valor ") var3 = input("Digite qualquer valor ") if (var1 < var2) and (var1 < var3): print("O valor {0} é o maior numero".format(var1)) elif var2 < var3: print("O valor {0} é o maior numero".format(var2)) else: print("O valor {0} é o maior numero".format(var3)) ###Output _____no_output_____ ###Markdown Faça um Programa que leia três números e mostre o maior e o menor deles. ###Code var1 = input("Digite um numero ") var2 = input("Digite qualquer valor ") var3 = input("Digite qualquer valor ") if (var1 < var2) and (var1 < var3): print("O valor {0} é o maior numero".format(var1)) if var2 > var3: print("O valor {0} é o menor numero".format(var3)) else: print("O valor {0} é o menor numero".format(var2)) elif var2 < var3: print("O valor {0} é o maior numero".format(var2)) if var1 > var3: print("O valor {0} é o menor numero".format(var3)) else: print("O valor {0} é o menor numero".format(var1)) else: print("O valor {0} é o maior numero".format(var3)) if var1 < var2: print("O valor {0} é o menor numero".format(var1)) else: print("O valor {0} é o menor numero".format(var2)) ###Output _____no_output_____ ###Markdown Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, sabendo que a decisão é sempre pelo mais barato. ###Code var1 = input("Informe o valor do produto ") var2 = input("Informe o valor de outro produto ") var3 = input("Informe o valor de outro produto ") if (var1 < var2) and (var1 < var3): print("O valor {0} é o mais barato".format(var1)) elif var2 < var3: print("O valor {0} é o mais barato".format(var2)) else: print("O valor {0} é o mais barato".format(var3)) ###Output _____no_output_____ ###Markdown Faça um Programa que leia três números e mostre-os em ordem decrescente. ###Code var1 = input("Digite um numero ") var2 = input("Digite qualquer valor ") var3 = input("Digite qualquer valor ") if (var1 > var2) and (var1 > var3): print("O valor {0} é o maior".format(var1)) if var2 > var3: print("O valor {0} é o medio".format(var2)) print("O valor {0} é o menor".format(var3)) else: print("O valor {0} é o medio".format(var3)) print("O valor {0} é o menor".format(var2)) elif var2 > var3: print("O valor {0} é o maior".format(var2)) if var3 > var1: print("O valor {0} é o medio".format(var3)) print("O valor {0} é o menor".format(var1)) else: print("O valor {0} é o medio".format(var1)) print("O valor {0} é o menor".format(var3)) else: print("O valor {0} é o maior".format(var3)) if var2 > var1: print("O valor {0} é o medio".format(var2)) print("O valor {0} é o menor".format(var1)) else: print("O valor {0} é o medio".format(var1)) print("O valor {0} é o menor".format(var2)) ###Output _____no_output_____ ###Markdown Faça um Programa que pergunte em que turno você estuda. Peça para digitar M-matutino ou V-Vespertino ou N- Noturno. Imprima a mensagem "Bom Dia!", "Boa Tarde!" ou "Boa Noite!" ou "Valor Inválido!", conforme o caso. ###Code letra = input("Digite o periodo da sua turma M-matutino V-vespertino ou N-noturno ").upper() if letra =="M": print("Bom Dia!") elif letra == "V": print("Boa Tarde!") elif letra == "N": print("Boa Noite") else : print("Valor invalido") ###Output _____no_output_____ ###Markdown As Organizações Tabajara resolveram dar um aumento de salário aos seus colaboradores e lhe contraram para desenvolver o programa que calculará os reajustes. Faça um programa que recebe o salário de um colaborador e o reajuste segundo o seguinte critério, baseado no salário atual: * Salários até R$ 280,00 (incluindo) : aumento de 20%; * Salários entre R$ 280,00 e R$ 700,00 : aumento de 15%; * Salários entre R$ 700,00 e R$ 1500,00 : aumento de 10%; * Salários de R$ 1500,00 em diante : aumento de 5% Após o aumento ser realizado, informe na tela: * O salário antes do reajuste; * O percentual de aumento aplicado; * O valor do aumento; * O novo salário, após o aumento. ###Code sal = float(input("Digite seu salario, obs utilize ponto para marca as casas apos a virgula ")) if 280.00 >= sal : aum = sal * 0.20 print("O seu salario era {0} o percentual aplicado é 20% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum)) elif 280.00 > sal == 700.00 or sal < 700.00: aum = sal * 0,15 print("O seu salario era {0} o percentual aplicado é 15% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum)) elif 700.00 > sal == 1500.00 or sal < 1500.00: aum = sal * 0.10 print("O seu salario era {0} o percentual aplicado é 10% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum)) else : aum = sal * 0.05 print("O seu salario era {0} o percentual aplicado é 5% o valor do aumento referente ao seu salario é {1}, o novo salario é {2}".format(sal, aum, sal + aum)) ###Output _____no_output_____ ###Markdown Faça um programa para o cálculo de uma folha de pagamento, sabendo que os descontos são do Imposto de Renda, que depende do salário bruto (conforme tabela abaixo) e 3% para o Sindicato e que o FGTS corresponde a 11% do Salário Bruto, mas não é descontado (é a empresa que deposita). O Salário Líquido corresponde ao Salário Bruto menos os descontos. O programa deverá pedir ao usuário o valor da sua hora e a quantidade de horas trabalhadas no mês. Desconto do IR: * Salário Bruto até 900 (inclusive) - isento * Salário Bruto até 1500 (inclusive) - desconto de 5% * Salário Bruto até 2500 (inclusive) - desconto de 10% * Salário Bruto acima de 2500 - desconto de 20% Imprima na tela as informações, dispostas conforme o exemplo abaixo. No exemplo o valor da hora é 5 e a quantidade de hora é 220. ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAzEAAACCCAYAAAB2IEbGAAAgAElEQVR4Ae2dC7LqKhBF35wc1XUkKScSB2JlEqfKMTgDXkECNND54ye6btUtPcYQ2KzG3kD0P8M/FEABFEABFEABFEABFEABFDiQAv8dqK5UFQVQAAVQAAVQAAVQAAVQAAUMJgYIUAAFUAAFUAAFUAAFUAAFDqUAJuZQ3UVlUQAFUAAFUAAFUAAFUAAFMDEwgAIogAIogAIogAIogAIocCgFMDGH6i4qiwIogAIogAIogAIogAIogImBARRAARRAARRAARRAARRAgUMpgIk5VHdRWRRAARRAARRAARRAARRAAUwMDKAACqAACqAACqAACqAAChxKAUzMobqLyqIACqAACqAACqAACqAACmBiYAAFUAAFUAAFUAAFUAAFUOBQCmBiDtVdVBYFUAAFUAAFUAAFUAAFUKC6iXk8Hob/aAADMAADMAADMAADMAADv8PAq20VJgbThemEARiAARiAARiAARiAgV0MYGIAaBdAzHj8zowHfU1fwwAMwAAMwAAMfAoDmBhMTDAx9+vZNDeC81OCk3rAIgzAAAzAAAzAAAzoDGBijmRibo05nU79/0tr2ktr7kvqH85rTDfyfmtgTpcuGJrFAXNrzPl6X3+eWo/ONL59/vHfwjaq5enQL25bzTJvTWoQQ5/0/bnaPOblLanrX2vOY7pOHXs8jOPDnZszdDftv/y1D9J9iS5veY/VbYhl3ydq/N2Xx3lohy/7bNq/si98X+pxG2NQO95dhjqvjEt/zdNJYUWwtzoOQpvLdr4lzqlPpc8C+hN+YQAG5hnAxBzlQ8d+0IvEwSUF4u8lsHcXJYH4tPb/taaRpqiaSepMoyaJ80GyRNvp9yiJ/hYTYvvqrzOdTUz9+f7v2X7M6mBNVNBj6pi9ZmvO/r3yub9mtT56RV980jVSJm1Mp0l8NBR28kIzFRp3vpz7tSlNjO+/PM6Gvuwu0fjI5+46sp/lc8/B2KO/pj0un7v3S/bk80/qJ+qiccZrcAEDMPBuBjAxYx+8n/Z68eGfBk+c6TyZ04i50U1MmiglSZRfLbAJrHzutInnqcmVra+fYT7FxGgW+Dy58sm6vaasg3z+GOoyJNpei1Av/95Qn5NI4K2OsS02WUw0kG0d0XVJm4IJ8FzJdvnXFj2mdVVntpeW441J8f48uW6TFbySo840W7Uprp1yPavtp5w/MBaYW1SvVGeb4EsDX5iIRWVG/VQT48vIrtXr3JlWTiBk7+muclV0+QrR/TrBz19rWrmFdXNcxHYfhhnfFzyyegMDMAADmxjAxBwIHJ+cuy1ledJ462KiOTJLWiaf9oP/brqb3w6mzYQOSfNwvSIxyhIdnww1ybYRW8bCVaDE/GiGbKw+WUJYaJAdD/1u2yxNVv631Si95tokyc+MJ+dlxqo0TtNJmU1wzzu2cal1GjTJj+XcFAw8NM2m659oEfriwOdUMDF2q5bkoNR5nT6T52txW7wmY6Y0LTkXY32avy+pV25aijqsa/NYHXgdHWEABmDg+xjAxBw1gbJJk5xJz5JibUY4Tyb6gLYJqNybn5sNmcgoAaAlHXliYjXWXtO0z8uz7UoM21h9steXmpj8erZO2mtaXRe+lien5UBq+yDXXdF6uJ41Ga5/izaOnyOvac9P2BHt0I7l3CRJ6HDu3lUDWb/feT6Y42GFsIzZ9Lg0OEs00vopnKcxXrwmYwoTE7QT8cJry8YcdEInGICBZzCAiTnsB5JMKrLtPEUy0gdPnoxaoOZm3d0qhDRLuV7atTTDor2Wl2X/VspL6y0TKzkoZK8XCX523F9buZ5Whz3BN29iHiZto2xX3efuxuyR/hw7NrkdaNARE7OlnySTWQzb1b9F5n38uqtNzIPtZHvinHPHWUQbtIEBGHgGA5gYn8x++GOYfQ/1FAlQdr+MTUbLWV09UU6ST7eVK18RENcJ1xbBqJkAm4BlifLiJL0ob2kyJ9/Xz2CnGsg6ye1P0gz27SqTv9rbyWx5Ygub1T1JWIW+muabXutX3KImst1Tx3pjGe9bkDr6eko9/Ws/9lhhO5m75yvETboKc5K8LOz/kmPRJ0Wc9cfkfS/F+bc2flGAcr4zwdq20eS+l5yfu2mv8VsRF48TCzV4xocmZQqO6IdN+/hhCIZgoA4DmJiDDMI2oTgn275EEvywBiVuCTtfGndTfb/9pE9Qw1czD1tXwtaU5B6UxjS2nHD/yzl+pbM7T1wz276W36fjtiWJG+lj8jwFbp642TbFa+ZlymN2QIganE177b+OOrQzOZ7eexC+MMDXNySSvq77TIxd2Slu7Pf32eS6PovHpJ+zr8mdOubqIxmK/REHYWkgvWY/9rjaxEhN48SBYzjhb722k3Gi9HUSmzKuk3rY/pTxGevcc9C3JykrsCzbWvIj66uf/2MsBd1odxxj0AItYAAGSgYwMXxgMJP0dAZsEpcnfWUwHnaAKrbufVHbns7GnFbrTcxbOHLm6IsZfzsHc5xw/C3cwwX5Awy8lQFMDAC+FcCf+eBZek/Q4Xj8coN2uP54UzJrV3CKlZs31YU+Y0yHARiAgZ9gABMD6D8B+s+YJXiGZxiAARiAARiAgR9gABPzA51MAs+MMAzAAAzAAAzAAAzAwDcxgInBxDBbAQMwAAMwAAMwAAMwAAOHYgATA7CHAvabZhBoCzNiMAADMAADMAADMLCNAUwMJgYTAwMwAAMwAAMwAAMwAAOHYgATA7CHApbZim2zFeiGbjAAAzAAAzAAA9/EACYGE4OJgQEYgAEYgAEYgAEYgIFDMYCJAdhDAftNMwi0hRkxGIABGIABGIABGNjGACbmoCamu6z4dWz7a9pbfojO/Qr3yZxO8f/5ejePh/2Bw/iaO76lfKv9X2saV6YHOCv7X2vuoY/EMeV63eVs2j9fDo8Mip/MgGDZx5fCtIu1i4yB57apu8i4zsYY+4OWvq6nk2luy+tyv56Hc7MyhzHgPJS7pkz4Xq4/WqEVDMDANzKAiQkJ8oEA32BKdiX4hdGwWnWmEUmXTVK2JCBlve6mHUvaRD1UE7dBl28Matp0lFiei6HONMI09BMIz22bGld+jLw1m2LcTlSESRT53JVrzZw3NvL5c9tJjKAvDMAADByfAUyM/4A+0OMmw3BrzOYkSJiHGPRpAlauqCwJjs40yUqLPWfCxMgVIGGgYp1IgqIWS/TnPdX0GlYp1sXYdAyVBv/5/fUME3O/tqYT42tyjb/WtHJFZ6tREuVX61PKZKsxDMAADHw0A5iYwwG6MVG3M6CFYViYFC0wMXYbyuqVGNVY2faJLS1qneMMdZ40bqrH4RhY2G+063WDbwUTk7N7vzYv3x75jO1kiWl5PEzSrty0qGMNvGPMYAAGYAAGSgYwMYdL9GwC77dfDB1qDYrYdtLvW8/vD9FWPUog1CBRE4toJOz1cjOhlpNpbVeUZs+TRifsyc/bFtuxaZUqq9eSuvOeqDlabNViLobS46snCfZyXWz9ku1cPpmCiZG68ZzxAgZgAAZqMYCJ2ftB//LzFROzqA5PMDFhS9e2sheZGMVA2fN6o5aZOTfLu+3enFoBRTkMzssZkNvJ8hhS/g7x9iqNZf3Ka+bmZKzdbCcrtRvTitfRCgZgAAaWM4CJWWQAlgv6fPiWz4AmdXnydrKHXSVZm2TJVZahH5xBEeXkW2ySe2byrSiPhynf/0l9R10SJg8XexP9V2E7WRpD6SrM6TS++lhNU9sGsX0znWSw9RF1UMaTfitaObFg75eL973kxuhu2msXtv0tNUbV2vxNDNKWwBF8TIxVcAInX8QAJuaAnWmTi9VbSxTDMDvQK9vU+u1f8r6VmLS4JEYYkNny7TeciaTJvz+utJxUYxSPx2v35240eAdkwGvF44d8WK82MUtjSI+Rp/X70A630lnEpjRWwtC4+Onbo28PlW3Nz7P3yPiV1W3bUp+mBeMCyR4MwAAMfDQDmJgjAjq5V11P6t7xTUdLkouq9dqgy5I68h6dKXR5hS4vNjFbx0M34ZFPKrxCH65BHMIADMDArzKAidn6of3m81Ztu/jk5F6552VrMFY1RG/u360acB4fZm9hwK7grFqFpZ/e0k+Max89qwwTjAswsI4BTAyDOoM6DMAADMAADMAADMAADByKAUwMwB4KWGYp1s1SoBd6wQAMwAAMwAAMfCMDmBhMDCYGBmAABmAABmAABmAABg7FACYGYA8F7DfOJNAmZshgAAZgAAZgAAZgYB0DmBhMDCYGBmAABmAABmAABmAABg7FACYGYA8FLLMU62Yp0Au9YAAGYAAGYAAGvpEBTAwmBhMDAzAAAzAAAzAAAzAAA4diABMDsIcC9htnEmgTM2QwAAMwAAMwAAMwsI4BTMxBTcxLfuzS/Qr3yZxO8f/5ejcP/7r/cTv7Q3fDe9xxp2lnmnBeY9prY9o/D+fdtP98mWd3rLn5Y8sfV2nw8PXRf1X8fj0PbciP+/NOJrYt1pEf2IxaMPiu0ULGwBALPp6SMelu2ktr7slra65TvtezrvH88LGs1uVhustQ13/r6uSveTrl8fWI48npZLaMA3BX9jGaoAkMwMAvMICJqZgcvAwYayJGkoyxOuxKtv9a01jzIrVyyc5ZGJPONKFONkETx5zpiX/bRCgmK71JiH8vHHhWauDbrxofWZZ8btt7a4a6jiST+fulRjxPmUGPTA8ZMw9jE/00DqKBtpMEqulYq6nnVYtpa/SdOUnrFeL+1sQ6yOdzdfDXtO+Tz915dqzwxkY+XzgOzF2b4xlz6Bp4hg3YgIHDM4CJOSDEZbKz4INpTdKRa6IlPK68dkh67PVl4jOdjHhDsefDZJMGDzuT7BOmqNn92ppOtDl9T0wk9SRyuq172si5sY8+XothBUNnZKwdMmb6BF9OFtSIk1HdtJgOMZDVa3i9u8rVlxFTH8qIbZ6Mr7/WtHIVNkwaxPNH26Bci/eiGwzAAAz8DgOYmMN9EG5Mmu0M6MotIGEg0BKewRRFM5ElPvZ6YTtZXIXpy4zGwM4wp7PPS4JvowYjJiY1LXZGXG59G+oj2pPXN11ZWlJ/3hPYOlz8jfRdBROTc6RyWEsvLaZD2Vksu9dL05LHzVif5u9L2pWblsl6jWgf6s3xsT7gddiAARj4RgYwMYf7ALQGIFtNEAm2vzfldFKMwxNMjFuBmdqC4vRV6hx0t4Ykr+vcYDNV3vS5eUJlgzp/TSZZ1qT1mmaah/pr24Cm6/CNAwlt2tLnqZkvV3HS47l53qX5pFnAxOzSVowNlLMlLjgHbmAABpYxgIk53AfO1gTe73dfBkYSQFrCI7enudlULfGJ15LGICnb6p/Pxs72yVYNSsNi6zK53WWoy9QNzXE1Kra3aONsmzj39zSTMZPHp/J3uOesAitaTAdGZb3itdhOFrX4PVZpO30OAzDweQxgYsIH9+d1jh4wG7dSPWk7WV9Hu9XkHL9sQLlWXO0oDcikwVH7Z6MGyqqLq3+yL19L4OJrWl3zbUB6vx2FL+q5qf8qbCdz3wwWjEq6ClOurO7spw0m5nFr4xd5KOf3Rl9ZsZyMr7tpr124uTSOEzvbp44blLmJbbQMfKIfMQQDn8UAJuaAA/SmmX+5crK0zco2NbflRb7uky6bxPnnNsH557dh9V/JGrfK2Bnms7hf5hTPW1ovt3qSf5PTdGCFlZRwn45MtqwpEl/5HL4KOpYZzi+25G03VAyGUd/Da7HaxEjmIouOMx9HLh7yFZmdmsnYnfxadO1rn6WxinXu+65vT4xzWU/Z1nLraNyyWekb2FaMI4fnjrZiMGAABn6YAUzMETvfJiJJoiMTBv35U7/p6B0abtDgKQnLp9TjHX3ANV/w4VnZxDyrz5w5yo2NPhY9JQ6f1S7KfQHjcEJMwAAMbGMAE3PQD6lV2y6+NNFepcGT+vnrzOGTdGKA3jZAH0Y3uRILQyT+MAADMAADL2AAE/MCkQ+TiKAFgw4MwAAMwAAMwAAMwMABGMDEHKCTMEFfPosNg3xYwAAMwAAMwAAMwMAqBjAxALMKGAwVhgoGYAAGYAAGYAAGYODdDGBiMDGYGBiAARiAARiAARiAARg4FAOYGIA9FLDvdv1cn5knGIABGIABGIABGHg/A5gYTAwmBgZgAAZgAAZgAAZgAAYOxQAmBmAPBSwzH++f+aAP6AMYgAEYgAEYgIF3M4CJwcRgYmAABmAABmAABmAABmDgUAxgYo4CrP0xudOp+H++3gfgOtOE443pbo1pbg/zmD3vYe7Xcyj3fG1Nc+lWQ1z+8KSvj/Yr3nfT/rNtOZv2L5/J8OedTGxbfA8/Lhm1ePcMCNff0xc+BkRMq3F3N+2lNfeK45SPdy2+/DE71pTHfWxqMT2tRSxXOdf+GO8wdrkxq2JbYXS6X9AHfWAABo7MACbmQB+Y92uTJv1/rWmcibEJkTAEQ1LgTUyfjNj3DAnErYkJSvZL293lZE5qMjUR6PZ62TnebJTmpjdNtm5Fe2xfePP1GEnelGsdOQCp+wRXB4rNbf3YJRMGNtFPk3hvGnqjU5qKDdr5+AljhyjDHxt0t2OBrM9UTE+2X5Yrn7vriHHJxrwfo76+74XutHX1pNkkb+iJnjDwMwxgYg4Ee0j6pQmx9Q+J/9QH40iCkJe1QY8y+Yr10EyM/wAK7UmuGRM3PWkbaUdSRry+vxaPaPJUBoYVT53ZMe1TE/PIjIU3DU+pd3Yt9Roj48pUTGvl3K+t6UR8Juf/taa1K8b++Mg1w3H/Ph6jZmiBFjAAAz/KACbmQB0ft2SkWz2mTET88B9P/t3qi9+Klq2oxPNFopFoNl6uPTdJWJLzRlZi/HvsjO3IFpN8hni+jmN153W0q8RABROTc62b/Er1nTUx43E9FdMaT/n7k3blpmW2XpXa78cZHkn+YAAGYOCwDGBiDgRv+PDPVk+kiZFG5/RP7qUfT0pk4mHPXz2bfFL2uQ+65glMeq1se9xD3p8zXqZsryyP5yR4x2IgrjpO34PSbyeTW7t2t3PSLNixIt1KJq83FdPyff55/v4wjtkxAhNz2OTB9y+PjLswAAPvYgATc0QTk9c5TwQGM5AmPctMzOORbXHJr1X8bROxccORJzAS9CSZycoNq0OJEesHCkwMA6bk6LjPZax1pklYV/5evUo6wcmoiemNVTp2pOVMxbTWF2wnS/XTNOI1NIIBGICB9QxgYrLk+ZMhGk/684Qn3jwf26ObmHwLS74vP54/Bpderj9vKuGZbM+QsGnvKep8oD70uvA4xtNBX6+wncx9k2AwKukqjf5Nfju00kyM28KZfkFI/8Uh6XXGYrqfeFAmNJL7XqRxs+XeTXuN34Y4VjbxkvYBeqAHDMAADDxe7WHMf7Wv+DOdOCRJ/dcsi0TDJ/DJ8ewbxrJjcruYvXn47L7u2H/Vq5KE+GuMPGorI2Elxd9rI1Zr7PvTr4su2xPOT2anh6SHbzBiC8oIi28bD1abmH7LVh8HMeYc98HIWN7LCYpdbRT3mvkY9ONBGZfpvXchJpWYdmbkX/r+WE/Z1jLW5XV9XeK5JCpoAQMwAAMwoDNQ21PMlYeJ+bTkq0Z9iq9N1WGrEoSvvFYNbSgDw7WLgcomZlddJuLamaNoxqrE+rPqSrnEJAzAAAx8BQNzpqP2cUzMlwbOq7aBPPVrZ7+0b0goJ5Jv+rzOB5ldiUpWkNCcuIMBGIABGHguA7VNylx5mBiSpjpJEzqiIwzAAAzAAAzAAAz8LANzpqP2cUwMwfazwcaMzHNnZNAXfWEABmAABmDgdxiobVLmysPEYGIwMTAAAzAAAzAAAzAAAzCwi4E501H7OCYGYHcBywzL78yw0Nf0NQzAAAzAAAzAwBgDtU3KXHmYGEwMJgYGYAAGYAAGYAAGYAAGdjEwZzpqH8fEAOwuYMfcOK8zUwMDMAADMAADMAADv8NAbZMyVx4mBhODiYEBGIABGIABGIABGICBXQzMmY7axzExALsLWGZYfmeGhb6mr2EABmAABmAABsYYqG1S5srDxGwyMXfT/juZ0yn//+xfyI7XPV/vm81Hd7H1rl3XzjRWD35gb3O/jA0KvP6KD4wYWyGuVZbvpr205r5p3NDbcb+e3VgyHtNDbNn4+ieubX/QUoxBzU0vX+PHX1MdB/5acx7KXVOmdh1eW94naIVWMAADR2dgznTUPo6J2ZSM2IRnMAG3xvTJh3httsydidBfa5odJsYGSXepbWLs4NOZRk38PmVg+vT6fYpOv1qPlA+b6KdJvDATp9MQ9zu1sobBxsxYTDtDcTbtn3KdW5PVT3mPNhb5a9pj8rl7rxzH5POFZWvX4zUmNmAABmDgJxiobVLmysPE7A2sYGKyD3mXfPiVGpGEJK8Px+XsqjUCc7OrYwnPVFvkrO2lS02MPHYSdZ0qbzjWr+r07WhuMgm0CZBvv7JCk+jQmCYxVem5zTU1bfKaceVnOOfSRP2krkkbh3pJw5XUJ9dA9snZ1VVNKhfodfRZlsPUf+jv8dWNLF5d30l++wRfThZ0l5wLrYyNr43E9ORkw0YTc7+2phOsJtf4a00rV3Q2XuMwnAgdqPNGdtHwJ5JT4oP4WMLAnOmofRwTs3cAVk2MTXrlSkf+99RKzN10N79VbGQmdCThGQXMJugioe+3kvj6daYRx9xqSlL38cC15cQk0ZuIzg3o+Sx2+t5sJcgZCF8fe+wkZpj7cv118nLSv63OMdHM6zC+UpT3T/r3/dqImfD0GqOa7+WK8/clBhVMTMrhw6QcjMfFJibUmO7HidZt/7TGO8aIu0ZmzNNVo/H6JablkbUrNy1qvcbL3tR2WN/HOvqhHwzAwIcwUNukzJWHidnb8ZqJyRMBe43ktWkTk6xg5ImLLWtlYlEmX8IcJSsQceVkPiFS2mDLGlY3ktUSv7IkzdJoApbNhif9o1zzIV/Lzi36Jjvuy076ZkjQ5GuZRt5QkbB9WzJrDWqMgbKf0+PzMbJCHzWmh+v5FUMRXyV7IqY91yOPmJgV/TKiYak/ZaIJDMAADMyZjtrHMTF7P6SKRDk3LAPUMilOEu8U+nz1IE843CChJjxpOXIwmTMxcsuMPG/6uTQPw7VFkqXWe1RruRo0YjTcuco1Ey2zc4u+yY77+iR9o/WX1HZ5sjitnyyT55+hleRDMmn7R/nbmwvP0Z5HNaatiZGrLxr/kZ2lMcd2sqjZZ3BHfegHGICB72CgtkmZKw8TsyfxsOcWibIFUSZDPZhpgpEmI3LbSrLv3q0AyCRmgFxNeCYCQJgLN1C4VRBf7vakvDRc4t4XVRdfRyUhFKs0iQZuu8s5fOtZYcgSLTLdizrI47bdfuuZfD3vr1yf/G/fJh4/6kOownYyG9vxnqt0FeYkti1WaXfCcWSp2C4ZjJOtj+d3uEFfxJCtU78a6uM8lmlXcuN9Lzn7d9Ne+y2hfRnK+XvHTM5n6wsMwAAMfCUDc6aj9nFMzOZAssls3HqS71e3yYf8+tN8a4o8nhxzxsWXa294F1+rmm3BcuVnictYQiWvZ7+m1e2z9+cm17TXFsnRpD6pBmd7A7741qZiS1mSgPk2atfLEsZwnk3EsmNhpjrWxekp2iS3/cg6ydcTfUQbHnalJ+lneb+OSAwndeJ9Y1w+7fXVJkb2c0zcHS85fz5uavS54NSPF8l4EMzIXJzkMdu3Jy+r11u2NT/P3iMTxy79fHh+Grc1mKKMr0wOYY5x5wgM1DYpc+VhYhjwGfBhAAYWM5CvIn7oB6szR9GMHeHDjzp+KEuLY4P6wzAM/DoDc6aj9nFMDAM0CSwMwMC3MZBshSOx+PXEgvYTAzAAA69goLZJmSsPE/NtyQvtISGHARiAARiAARiAARh4MQNzpqP2cUzMizv4FU6YazDjAgMwAAMwAAMwAAMw8EoGapuUufIwMZgYZipgAAZgAAZgAAZgAAZgYBcDc6aj9nFMDMDuAvaVDp9rMaMEAzAAAzAAAzAAA5/JQG2TMlceJgYTg4mBARiAARiAARiAARiAgV0MzJmO2scxMQC7C1hmQz5zNoR+oV9gAAZgAAZgAAZeyUBtkzJXHiYGE4OJgQEYgAEYgAEYgAEYgIFdDMyZjtrHMTEfDKz95Wz5q/KvdNNci9kbGIABGIABGIABGICBpQzUNilz5WFi9pgY+4Nyp1P//9Ka9tKa+5Lywnnjv6htDczp0q13xLfGnK/39eep9e5M49tnH/+NtK/qNf1gcTftv3F9XEC5XyU/VWyvvzaPSwes73mf5W2IZc+8Gn/35XGuxpTCVhgPNJZFDOb1EefZcWjNhIcbX1w7lRgb4mptmd/DgtJHS/uS91X67KEPiCcYOCIDc6aj9nFMzNYPHftBL5J6lxSIv5fA112UBGJrfZ543kevCP21pqlm2hg0l3D7ve/pTCOMQsm9MBQnzXBs4aczjRg3usvZtH++HHu9+HdRn1uzyriEfrNjl2+nfO7GEDl5IJ/7OvEYdHzimMs14AwGYOCIDNQ2KXPlYWK2fhAVH/5pwDlT42d0RZIiodRNTJooJbOrfubVJiDyuWtDPE9dibH19fURiZGsz9jzInkKmk1fs7v4mW2biPXvtXXrXx8MnK+X0Chqp5k8OWN+Nu0tNTHx3ImVo1D/tM/G2s/rB9RpiA81Fkb7PzUxj8wgpwajkibZNfJrSvaKONxoYu7X1nRCg2Qc+mtNexNt23gNWW+eCz2F7uiCLjAAA9/GwJzpqH0cE7PjQ2UyYb51MVEY2W6VJA+hHnfT3fx2MG0mdDAOQ9J/vzZi5vZh9CTIniMNQf739EBSJE+hrsN5eSJmjydtHursZ38fD5O2PZ2N9kGdvqe/ljVA0dj15YZE1Sau4hrO6Mm/83rz9/du/ahgYlLWHqaItSr8pOw7g58xG8aZ7PUwkTFMTsS4mI7nPK6SduWmRYvtKu2erqMfA4bGqqkAAAQtSURBVHhEJxiAARg4DgO1TcpceZiYWh/IWgIdVj70rSd5MtEHqlxpsCsZ0nxYkLPZ4rz+WtKRJyb2HO21vKzh7y0mpmhbtnKVHk8TOT9gpe8ZabswS+X7c7N0nIHAa8DjK/tsMNtD3AZzHOIiPb7UNMz24WC43P11lybZ0ibPnY5DbcJD1y6PE0yMrpPUnudoBAMwAAPzDMyZjtrHMTEhQZnvnGmA5Q2/WVKuGYtiNaK/fp6o5AnHJ5iYok5K+7T3hH34RdszvYY+KcrQDBwm5ntXU6rF5tLYlhMEOZPK3/nKSI36KrEUxx1Zv7JNZbyU77FlsZ1M1yXqzHG0gAEYgIEtDNQ2KXPlYWI2Jh7WbKQztSLBKFYdlq/EJPvu3f0iFVZilOR/acJjIZ41VlridWuTbW75NhnZTndM3BPjA0erozzv8ehXrUI/FKtLok829rOvC48HG9ArbCdLtyOmqzCnlfeVLeMnW03JVnfTMcfWJ970b7eRyi8asddzcVWs5PZbTuN9L3mM3E17jd+KqMXgsrYcjBfGByZDYAAGYGA3A3Omo/ZxTMxGaO0WjHPylawioQgJRH9j+/nSuJvq++0n+Xax/j1ha4ozLv6GeLu1JN6gbpOY8JXObsuLuKbckuK3sQljkJ8bEv/J9ueJW6yXuzF48prpuedrm26TEee6Y7bObmZ7Rh9ryHz77NfKXvsvLNC1FfpMtpOE6+sS04GvZZzb/pfcxYmD3Hy7lVARV/t1y3iWN9UPEwgh5ouVH3luznrfHr39sq35ef2khb+mfj7xsr/f0RANYQAGvo+B2iZlrjxMDMntbue9bCDKZ3y/L3iX6UC7j61Tvq3sQ/tTXcX90LoyBr9oDKb/jz320H/03+czMGc6ah/HxPAB+vQP0H5by7AqxW+6PF1vBvrPH+if3kd2JapYuUGXp+vO5wnjGwzAwA8zUNukzJWHiflh2PhAJ6mDARiAARiAARiAARiowcCc6ah9HBODiWHWBAZgAAZgAAZgAAZgAAZ2MVDbpMyVh4kB2F3A1nDulMEMEAzAAAzAAAzAAAwcm4E501H7OCYGE4OJgQEYgAEYgAEYgAEYgIFdDNQ2KXPlVTcxcxfkOAqgAAqgAAqgAAqgAAqgAArsUQATs0c9zkUBFEABFEABFEABFEABFHi5ApiYl0vOBVEABVAABVAABVAABVAABfYogInZox7nogAKoAAKoAAKoAAKoAAKvFwBTMzLJeeCKIACKIACKIACKIACKIACexTAxOxRj3NRAAVQAAVQAAVQAAVQAAVergAm5uWSc0EUQAEUQAEUQAEUQAEUQIE9CmBi9qjHuSiAAiiAAiiAAiiAAiiAAi9XABPzcsm5IAqgAAqgAAqgAAqgAAqgwB4FMDF71ONcFEABFEABFEABFEABFECBlyvwPwh/sJXx+Vx5AAAAAElFTkSuQmCC) ###Code hora_trab = float(input("Digite suas horas trabalhadas no mes em questão ")) valor_hora = float(input("Digite o valor da sua hora trabalhada ")) sal_brut = hora_trab * valor_hora if sal_brut <= 900: inss = sal_brut * 0.10 fgts = sal_brut * 0.11 sindicato = sal_brut * 0.03 desc = inss + sindicato sal_liq = sal_brut - desc print("Seu salario bruto é {0}".format(sal_brut)) print("Você é isento de imposto de rende pois sua renda é inferior a R$900,00") print("você pagara ao inss um total de R${0}".format(inss)) print("Sua empresa pagara um valor de R${0} ao inss".format(fgts)) print("Você pagara ao sindicado um valor de {0}") print("Seus descontos somados são {0}".format(desc)) print("O seu salario liquido é {0}".format(sal_liq)) elif sal_brut <= 1500: ir = sal_brut * 0.05 inss = sal_brut * 0.10 fgts = sal_brut * 0.11 sindicato = sal_brut * 0.03 desc = ir + inss + sindicato sal_liq = sal_brut - desc print("Seu salario bruto é {0}".format(sal_brut)) print("Você pagara de imposto de renda {0}".format(ir)) print("você pagara ao inss um total de R${0}".format(inss)) print("Sua empresa pagara um valor de R${0} ao inss".format(fgts)) print("Você pagara ao sindicado um valor de {0}".format(sindicato)) print("Seus descontos somados são {0}".format(desc)) print("O seu salario liquido é {0}".format(sal_liq)) elif sal_brut <= 2500: ir = sal_brut * 0.10 inss = sal_brut * 0.10 fgts = sal_brut * 0.11 sindicato = sal_brut * 0.03 desc = ir + inss + sindicato sal_liq = sal_brut - desc print("Seu salario bruto é {0}".format(sal_brut)) print("Você pagara de imposto de renda {0}".format(ir)) print("você pagara ao inss um total de R${0}".format(inss)) print("Sua empresa pagara um valor de R${0} ao inss".format(fgts)) print("Você pagara ao sindicado um valor de {0}".format(sindicato)) print("Seus descontos somados são {0}".format(desc)) print("O seu salario liquido é {0}".format(sal_liq)) else: ir = sal_brut * 0.05 inss = sal_brut * 0.10 fgts = sal_brut * 0.11 sindicato = sal_brut * 0.03 desc = ir + inss + sindicato sal_liq = sal_brut - desc print("Seu salario bruto é {0}".format(sal_brut)) print("Você pagara de imposto de renda {0}".format(ir)) print("você pagara ao inss um total de R${0}".format(inss)) print("Sua empresa pagara um valor de R${0} ao inss".format(fgts)) print("Você pagara ao sindicado um valor de {0}".format(sindicato)) print("Seus descontos somados são {0}".format(desc)) print("O seu salario liquido é {0}".format(sal_liq)) ###Output _____no_output_____ ###Markdown Faça um Programa que leia um número e exiba o dia correspondente da semana. (1-Domingo, 2- Segunda, etc.), se digitar outro valor deve aparecer valor inválido. ###Code dia = int(input("Digite de 1 - 7 para os dias da semana, obs domingo começa 1 e sabado começa com 7 ")) if dia == 1: print("Bem Vindo ao Domingo") elif dia == 2: print("Bem Vindo a Segunda-Feira") elif dia == 3: print("Bem Vindo a Terça-Feira") elif dia == 4: print("Bem Vindo a Quarta-Feira") elif dia == 5: print("Bem Vindo a Quinta-Feira") elif dia == 6: print("Bem Vindo a Sexta-Feira") elif dia == 7: print("Bem Vindo a Sabado") else: print("Você digitou algum numero errado") ###Output _____no_output_____ ###Markdown Faça um programa que lê as duas notas parciais obtidas por um aluno numa disciplina ao longo de um semestre, e calcule a sua média. A atribuição de conceitos obedece à tabela abaixo: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyYAAACpCAYAAADeOSmiAAAgAElEQVR4Ae2dTW7jutKGvz15VR3g9DqMTIKzCmfYvYfA00aPA3h+gTPoHehDkSyyWCRlyR3Hjvxc4Fzrhz9Vbz2Sq0S5838T/0MBFEABFEABFEABFEABFECBGyvwfzeen+lRAAVQAAVQAAVQAAVQAAVQYKIwAQIUQAEUQAEUQAEUQAEUQIGbK0BhcvMQYAAKoAAKoAAKoAAKoAAKoACFCQygAAqgAAqgAAqgAAqgAArcXAEKk5uHAANQAAVQAAVQAAVQAAVQAAUoTGAABVAABVAABVAABVAABVDg5gpQmNw8BBiAAiiAAiiAAiiAAiiAAihAYQIDKIACKIACKIACKIACKIACN1eAwuTmIcAAFEABFEABFEABFEABFECBDy9M/vz5M/EfGsAADMAADMAADMAADMDA4zDwEWUVhQmFFIUkDMAADMAADMAADMAADPwVAxQmAPRXAPEU43GeYhBrYg0DMAADMAADMHBNBh63MHnbT0+vJ5JyCjMYgAEYgAEYgAEYgAEYuAMGtlWYvO2n3W7nCo7jtN/tpt3zsQAn7b4dptNMAI7Pu2m320/Hqs1pOnzzxz6qcu7YWc29fJ5ou9jv/L5wvGtWxnlsCsXCp49T4jrE89thOr7up8P7ch6yxn7cv9knXuN4/Y2u9EVXGIABGICBB2ZgW4XJnz/T8flpenouRcfpdT89XVhMHJ+vVYSMksrjtLcF1F+BGcf6fB9Gvl16/CM1udQG2++T7Xk/TE+2iA5FytPtC5Mhm5+sz9AOGzO2r1Kcoj3JEwzAAAzAwAczsMHCZD8d3/bT/k2SkeO0/3aYDrnAkBWPtJLQW02wT6afj5NN6k+vT3EFollFSfPIeOm/OPeyZMiubuzf6qSuzNlbvTkzvmqgnwKOJLlhRekQV5GCvab4Uv+lOLLbCl3qH/3U5NhqGscqdmubtBqU9KlfoSvn6uN/ig1GW7vyVebZmRWwZM/zPq+UqcYlLmVO8aXM6/rKvE1RUOIcdLCFZFefM3FSbQeflsFucjmYM2qzn/Zh5U9s1lioPTZuu2n/epj2+dXGkT7St5wruqUxlZlBvJS/mh+1h89ufAdc0BZeYAAGYAAGtsjANgsTSZ5Sci3JqCZ3kqyV5PTPJPs5uZIEzyShmtjVr3KVsWoYTtPxTX+vIgmfSfZnEotq/j+aFKdXzsQen/Qa++r524tTfQ6JpB0nJI8mSXV+58QzzSUrTvG1IUlIrV92325HW6xvsoplXz2SQsHGIfjybhNj609drFV+vx3Lq3bVq0ViT5yz2FHGmben9JW5pH9taxmnsiUk7CN9rD9rts+x5HWv90Xnim/DQR2DyJ62ndcn2b86XrVtkTOr1xpdaFuzhx7oAQMwAAMwsA0GNlqYSAFRVhk0SY/H3BPvJgHXwPaTQh2rvgBSUZGfFC9JuE7TwbxyFsazxUjv6XPz1Ftt9Z918lzZLOOaBFXmLcWHjFP3zX7alRcttsyxUgCkMXIR47VJ+jsb5Gl6eWJv/RnYIzY4jTSxtj4U33Scc/Zou2RDVfBcrk/WUbVb9NlnMI9l9O8dq+Iucc2FrfOxsuWcPkmXtfE6Y2u2v7LFcsA2GsEADMAADMDA1hnYbGEiyekxvM5VVjnqRK2GuySweryfFPbG8E/Ve21akM4XJvWTerVrwadL2OXVmTxWk2h/TGFin4DXRUrHz17yuTbRrRLt+JpaKWxK4l3iqsfO2aPtks6NXu68+nKlxNuvXlQcnZmz5nBFYeILZvXRfq6N1xlbK7/sPGzz/jIMwAAMwAAMPAwDGy5MSgKfE7QmySxtwvvv9il+SO7blY88lrlIquRRVj3yaoEZ37TXJKwtaOy/omUTyflxdDz9bG00yXTjl0/UTdvK5va4nycWJPL7lVo376faWX0uSnSlWEyvhdnVpfCPHpjXlsyqT1uY9F7Psvo6Pxtm7Hljj5lT/fL66PFVn87PP+GVP9XX2hJ9sHPa7VA45hUTKdbr1+skRrqSdpV4XUufilEbR7ZXcYaOD/OlDxfcG2AABu6ZgU0VJiG5Sq9TxRUCea89vTqUkrLmdS5TjNj+8qPng7wOFvr1X2/JqxChGNFXxNIPjk0SOAagHvdJfoBsf4zdrHxoQjq+qIp/JoG3Gkii/bwPP4KPP0IuqymV/6FPJ3nVsaydmtQkHcorVWpn7afMm9s0Pqrm2ldfy4v6Zs1TMaI+qE/7tzKXzBF9Et0iC3He0ib3Dz/8LsdDOxPX0bz2uNcv+6j6XPjpx10yZ+7j+c28m2tDYpqPi+5Fh1qf9vW5cN6xXhgsbMk1kG1KDH2UPuPrqzBEG7SAARiAARiAgftnYFOFCcAtAK5ZAVjQ58KEmnigLQzAAAzAAAzAAAzAwFIGKEweKek2KwDtH4/koll60dAOVmAABmAABmAABmDg4xmgMHmkwgRfeY8cBmAABmAABmAABmDgThmgMLnTwFCFf3wVjqZoCgMwAAMwAAMwAAP3ywCFCYUJTw1gAAZgAAZgAAZgAAZg4OYMUJgA4c0h5MnF/T65IDbEBgZgAAZgAAZg4LMYoDChMKEwgQEYgAEYgAEYgAEYgIGbM0BhAoQ3h/CzqnDm4YkPDMAADMAADMAADNwvAxQmFCYUJjAAAzAAAzAAAzAAAzBwcwYoTIDw5hDy5OJ+n1wQG2IDAzAAAzAAAzDwWQxsrDA5TYdvu2m3c/89H+83+bZ/9HClnafXp+Trfjp+SoF1nPZB2/58l9uj4+6mp9fT/cbqShofnw2vKxn4rBsF8/ClBAMwAAMwAAMwcG0GNlaYCDDHaV8ld35/Dqo1befGWXpOCqmS5Etiv39b2FcKGvXTbl8peRYQj89P0+FdPovNGVBrg91eYI+Oa+fI4y7ov422kb2utg+jwUL20ePhivdtXOPwTRxhAAZg4BwDGy5MTtPh+TCdchKTVlOe9+mp/27afTPn3/YzKy26EiOJud1WwMoTf1mtWf7U3xdCfl/Hbz9Pr4dqlWR5Qqv2p6f0Wtxkndq5PES9uS635zgd7CrJ+2Ha2/0Vdnk7v9T+2z4Wpfr5KH7jJ0UGDMAADMAADMBAYmCbhYm+ymULj+CwFBDxqb8kre0KxXxhoK8qxVWN0tY+8Zdx5dWcZSsfUiSU1YcwfmNzv1DwxcHpdR9WM84l495n2V9eSEVb/NzR5+JH1HaZPX+aQqToes6XLZ0vmj6m/1uKJb7071nogi4wAAMwAAPnGNhmYRJWAfyKicDgkr63vUvK3XlXwfeTf7cCoUXR0pUIee1J+3zbT/tqlWcMcElkY5u+bW3/6vcMeV6zcuR87gHk55Y2/thSeyhMWi69lr0YcKxlG03QBAZgAAZgAAa+NgMbLkx6gXGFx0cVJguLifMXi7Nvpki49NWpj0h6e2Ncao8Uiw//KlfnNcJlK249xjl2/jpDIzSCARiAARiAgXtk4AEKE/u6lEv8ZwsT6Vde+5LgjVYB/OtRlwbavxIW54z/8laTqL4fpkP+obzza6ag+dP4vP7C7BUmsvIxb0/6HU7nVbXja1mxGWl8qaZfoV+r54p4zsWac7yzCwMwAAMwAAMw8IUY2FhhMnitaie/fyjnwm8qzCtUNum3rzqV4/WP28M/R1y9qlXG1n+qeNnvNup+vT7hdye73m9WbN+6gDqXjFsfW1/GhUrTL+iq7c/ZMy5MpFhS3XaVrjr2dj+LpimGhsvqH2f4QjeVc/xxfrs8E1tiCwMwAAMw8DcMbKwwAYa/gYG+8AMDMAADMAADMAADMHArBihMeBLNEicMwAAMwAAMwAAMwAAM3JwBChMgvDmEt6rKmZcnQjAAAzAAAzAAAzBwPwxQmFCYUJjAAAzAAAzAAAzAAAzAwM0ZoDABwptDyJOK+3lSQSyIBQzAAAzAAAzAwK0YoDChMKEwgQEYgAEYgAEYgAEYgIGbM0BhAoQ3h/BWVTnz8kQIBmAABmAABmAABu6HAQoTChMKExiAARiAARiAARiAARi4OQMUJkB4cwh5UnE/TyqIBbGAARiAARiAARi4FQMUJhQmFCYwAAMwAAMwAAMwAAMwcHMGNlaYnKbDt92027n/no83F7pbeb7tG1v3bwur9PfD9GT8fHo9fYKPVt/9dFxzARt7F/u4Zvwv2/Y47U0cA7vfDtPpy/qzkF/8+4TrlVh077uwB3swAAMwcLcMbKwwkS/i47SvChG/P/dlvabt3DgLz73tpypJ9/tzF877Ydp/SjFSfDm9PhV7pdCodC7t2mRAChotZOz2XJ8HOudj+bafPqfQfCCN564lzt3tF1R7L4FZNIEBGICBLTOw4cLkNB2e7ZPn9LT/eV+eUNsn053Vi11OvHWl4Gk6vNttvTjqp96XJpXHZ03eddyZT5/MLk6u1P60qpR9nJkrje3t8/vDC+X9MB3sStCaAmyxX+ftH9p36zl8LNGHRPnWTDI/DMIADMAADNyAgW0WJvpqjC08grhSQEhxEZPYagVAz88k6tJeXrWJqxxldeX4XMaU5Pf4rG3WJMtlvEUJtHk1SmxaWgx5n2X/or5h/trvod0+0faJ+A3AH9p6C1tcLHcNt2s4ou1dxfYWPDEnyQQMwAAMwMAXZWCbhUkoLvyKiSRsLvlvXplx511QT6/7XNSU5MetQGhRNFPglL4mifTJu5u728e08cXRqL0UTc1vcBYnwtbXp2n/3NPD+KT2ed8oTOobptdDVu8Wx6Sjt+rOZ60zeqAHDMAADMAADNw1AxsuTHoJmys8PqowqV4Z6817/tji16IGF1S/aGrn/dt5bMGzeCxe5Zq/CfjCJKy6rXitb8CEjRXb7bWAJmgCAzAAAzAAA/fFwAMUJvKUX5O8NYWJ9KtfVRol//71qPWQH6f94Al5/fqYwhNXLsoP5+UVNfVR2ww+m2Js0O5cstsbJ/1Op3017DQdXsu/jLa4oDlnw1bON4XJmIf1bF0Y361oix/zRTH6oA8MwAAMwMAdMbCxwsS+amRfWZKkvZwLibN5r78k+PH3IfqqUzle/7g9nK9e1Spja982OZ9JEJvEtLTtFyZyvp6z2Fr6jpLY5nWuypdxf7Ul+NgrpIaFyZ/J9l2lzR1dLCM9/+54hy3zO6i/G3scS8ZFGxiAARiAARiAgXtjYGOFCYDdG2DYA5MwAAMwAAMwAAMwAANLGKAw2fwTeS6EJRcCbeAEBmAABmAABmAABm7LAIUJhQnvVsIADMAADMAADMAADMDAzRmgMAHCm0PI04nbPp1Af/SHARiAARiAARi4BwYoTChMKExgAAZgAAZgAAZgAAZg4OYMUJgA4c0hvIcKHRt4UgQDMAADMAADMAADt2WAwoTChMIEBmAABmAABmAABmAABm7OAIUJEN4cQp5O3PbpBPqjPwzAAAzAAAzAwD0wQGFCYUJhAgMwAAMwAAMwAAMwAAM3Z4DCBAhvDuE9VOjYwJMiGIABGIABGIABGLgtAxsrTE7T4dtu2u3cf8/HO0++j9Nebf52mE4Li6XT61PydT8dF/a5/IIzNqqti3V1cVnh4+X23vbCWmt3ieVu2j0fp+PzZ8T0a2m0VlPaE18YgAEYgAEY+FoMbKwwEfGP075KmP3+XIDWtJ0bZ8W598P0tHuaDu8r+kgRIv3UT7t9tQLFa+P35+w/TYfn5QXXw91E3vahGFG/Y5FCYaJ68Dl3bXEOPmAABmAABrbDwIYLE58Mp6f2z/v+6oQkh7oSoJ+a+P/RJ/5SQNhtBaFeTXh6PS1eobn0yfjp9VCtkiwfR+1Pq0rZR/Vl4efbftq/LWwr+lGYDJhAG75Qll5HtIMVGIABGICBbTOwzcJEC4vmlSEpIMrqhDyZrpPr+VUAfd0m9iltj89lTLlgjs87N+4IopiUHp711bPlT8l9IXJ63S9adfE+y/6aQkpvCH5+Pd7/dMVQE5eRPo9w/Djt0WNQtD1C/PGxf89AF3SBARiAgUdkYJuFSVgF6D2JLsVECPbb3iXl7rx7Laqf/LukW4uiRSsRaaVF2654JcsXBn3b2otaiqZmZWh1Yjyv09kLqdG9tfPsGC42X7c9hcnXjd0jc4vvcAsDMAADMPDxDGy4MOmJ5RLqJkF2513y20/+ewVQb+7eMSlM7CrJ8rEufZXLFzQXXVSrXuPq+P1+mPYrXne7yEYXu/sdQwpby0BHry/jC7bfL2fEhtjAAAzAAAzcPwMPUJjYxM8VHrOFifSrX9HqFyZ/Jv961Brw61epnH1/4tiywlG/chZ//H7Iv/Fo+w1taHxeD6l/dS3PlX6n418NEx/lX5rSdstfdVtvm87xpT6bmLCK8qXiR+GYr23i9iD3LJiHeRiAgSsxsLHCZPBaVViVKOdC4hz+Naz4WpNN+u2rTuV4/eP28CqUSbT/5B/Hl9ekfHI+94Vd5qwLIekTkvpeYVLN2fZbNl+yt/Ll3BfrzKrOoDCxfrTanZvvMc4XBiQm6+I5F2vOPQY/xJk4wwAMwAAMbIGBjRUmQLkFKPEBjmEABmAABmAABmDg8RigMLnSUhQX0+NdTMScmMMADMAADMAADMDA5QxQmFCY8J4kDMAADMAADMAADMAADNycAQoTILw5hDxZuPzJAtqhHQzAAAzAAAzAwFYYoDChMKEwgQEYgAEYgAEYgAEYgIGbM0BhAoQ3h3ArVT5+8MQKBmAABmAABmAABi5ngMKEwoTCBAZgAAZgAAZgAAZgAAZuzgCFCRDeHEKeLFz+ZAHt0A4GYAAGYAAGYGArDFCYUJhQmMAADMAADMAADMAADMDAzRmgMAHCm0O4lSofP3hiBQMwAAMwAAMwAAOXM7CxwuQ0Hb7tpt3O/fd8vPvk+/i8m3Yr7Ty9PiVf99PxEwusYGvQ+Gk6vC+E7/0wPaW47N8W9vlEn253E+kw++0wnR7Cdzi4HXdoj/YwAAMwAAP3x8DGChMR+DjtqwTf788FYU3buXFWnnvbT0+vB2f3mTEk0Vc/7fZVE9qYRD+9nlYWetJPiye7fcbHq/pyT3M77t72q4tUbq73FE9sgUcYgAEYgAEYuISBDRcmp+nwbJ88pyfTz/tprysq9sm0JIN6XD818f+T+u5khcBuK3THMuZuN61L3NVOl5yeScpPr4dqleT4rIm/2jT6VPvTqlL2cdTeHH/bTxetdrwfpoNdJbl0nDOaXHIB3EefNvbL42nis1l98PE+OCUOxAEGYAAGYOC6DGyzMNHCwhYeIWmTAqK8fiSvQtWJdpsgWgD11anYp7Q9Ppcxpb286lSPOw5isaGMZ+ccbfvE9fS6X/RaVZkv2iT7SwupMMdrKeCW+vjHFyLvh2m/etVlrOFIo69zvI390nh+HR+3HD98g0MYgAEYgAEY+AgGtlmYhFUAXYmwoLgEMLxCZV9LcufdE+h+suhWILQoWrQSYeez29bm/valhUn5fYj5HU5TwI3mlD66MiN+63a/fQaUwuTMq29t7PusndHZ8Zr15/gZ/dEVVmAABmAABmDgHhjYcGHSA8wlgB9VmFSvjPXmHRwzPwjX18iWr15c9iqXL2jWQOhXghYnz7zKdSYxdlyGVbeFRR9FxxltB9ceuqEbDMAADMAADNwdAw9QmNgn+y4BnC1MpF/9itYoEfevR61J9ktbZ1u6WGRsKVqa16aqZL/ft4xtkrPGZ3Pu3AUqhZRZXWmKnPQ7nba4Ok2H1/IvozX9zs27+fMufp/2jxmsiP3mY4AW3fsFcb+7L23ixLUKAzCwZQY2VpgMXqsKrx+VcyFxNqsVNum3rzqV4/WP28PqRvWqVhl77cpHgMv88N4n9cPCJP8gX16vqguoc8BaH1tfzlzwM7bKb0lkPO+D2KN+jM6fs3m751t2yutyZ2JB0kjSCAMwAAMwAAMwsCEGNlaYkMhtN4EntsQWBmAABmAABmAABrbMAIXJhqrMLYOKb9yIYQAGYAAGYAAGYGDbDFCYUJiwBAoDMAADMAADMAADMAADN2eAwgQIbw4hTz+2/fSD+BJfGIABGIABGICBJQxQmFCYUJjAAAzAAAzAAAzAAAzAwM0ZoDABwptDuKSCpg1PWmAABmAABmAABmBg2wxQmFCYUJjAAAzAAAzAAAzAAAzAwM0ZoDABwptDyNOPbT/9IL7EFwZgAAZgAAZgYAkDFCYUJhQmMAADMAADMAADMAADMHBzBihMgPDmEC6poGnDkxYYgAEYgAEYgAEY2DYDGytMTtPh227a7dx/z8e7T76Pz7tpt9LO0+tT8nU/HSmw7j7G/Zvpcdp7Xne76en19EX92fYNsx9DfEYXGIABGIABGPgIBjZWmAgUx2lfJfh+fw6cNW3nxll57m0/Pb0enN1nxng/TE/qp92mQPl6Cf37YdrbQsTvE9OvF1NiRsxgAAZgAAZgYDUDGy5MTtPh+TCdMhRpNeV5X55QfzPn3/YzKy26EvM0Hd7tthYP9VPvdU+71c51RdHp9VCtkhyfl66aqP1pVUmLm6yT+tR+hlUd+3R/qJ/oVPrryo7oYre1stZjYaXLjrnAJh3jS3/mQmQdA1/a50eJLX6u/lKC63LvRAu0gAEYeDQGtlmYaPLcJLlSQJSkWRLi/ZuFfj4x1AQ69iltj89lTAFIEvh6XDtHvV1sKOMtgdAXIqfXfVUMjMYo80U7ZH9pIWXnrMc5TvtKa9HZFUqyqiNxSYVQHksKQlsc+f1HSOxUG6PPKH4cr68f9EAPGIABGIABGNgOA9ssTEKiqysRNlgu+Q+vUNl3+d15lxT3k3+3AqFFkU223TjlArLz2W1rc387J/Zp7L5tbd9m1UPsrYqKtk+xN53zxYNNrNX/nSvO8qpAPb73Q+bqHWtsGGpaj/8l+mVt1jHwJXzbUpzwhdUPGIABGIABGLgqAxsuTHoJqkv8PqowqV4Z6807ONZJ6JeuXlz6KtdfJ/1SlPhCJifWAz/lIh606dnTO7bpJHygzaZ95sZ+1Rs77Mzci2AP9mAABmDgbhl4gMJEVjT0taI1hYn0q1/RGq1K1K81XfqF6GxLF42MLb+9aF4Nez9Mh/waWr9vNzlpirEV9rqipBQQVuPBeKPk+23vfFvhy1ZuLI02/tW4gaZb8R8/7vYLonsPIV7ECwZgAAZg4EoMbKwwGbxWFX7vUM6FVQmzWmGTfvuqUzle/7g9/Ei7elWrjK3/VPHSlY/wxS8Jf3oFyvcbFiZ/7Jx1AXUumbA+tr6MkmA7n/5zzFrwxRWR8BuS/CpXsamZr/r9iR+39DvnxzbOd9i65PW6K90gtqHxiGmOE18YgAEYgAEYuCcGNlaYANc9wYUt8AgDMAADMAADMAADMLCUAQoTnjSzHAkDMAADMAADMAADMAADN2eAwgQIbw7h0iqadjxxgQEYgAEYgAEYgIHtMkBhQmFCYQIDMAADMAADMAADMAADN2eAwgQIbw4hTz62++SD2BJbGIABGIABGICBpQxQmFCYUJjAAAzAAAzAAAzAAAzAwM0ZuMvC5COMYgwUQAEUQAEUQAEUQAEUQIHHUuD/HstdvEUBFEABFEABFEABFEABFLhHBShM7jEq2IQCKIACKIACKIACKIACD6YAhcmDBRx3UQAFUAAFUAAFUAAFUOAeFaAwuceoYBMKoAAKoAAKoAAKoAAKPJgCFCYPFnDcRQEUQAEUQAEUQAEUQIF7VIDC5B6jgk0ogAIogAIogAIogAIo8GAKUJg8WMBxFwVQAAVQAAVQAAVQAAXuUQEKk3uMCjahAAqgAAqgAAqgAAqgwIMpQGHyYAHHXRRAARRAARRAARRAARS4RwUoTO4xKtiEAiiAAiiAAiiAAiiAAg+mAIXJgwUcd1EABVAABVAABVAABVDgHhWgMLnHqGATCqAACqAACqAACqAACjyYAhQmDxZw3EUBFEABFEABFEABFECBe1SAwuQeo4JNKIACKIACKIACKIACKPBgClCYPFjAcRcFPl2B//2Yvu++Tz/+9+kzMyEKoAAKoAAKoMAXUuB6hUlIRnbTbpf++/f3hbL8N/34Zze9/Lqwe9PNjvd7evnUhEnm+0hfGuc+5cDvf3fT95//fcpcl0yyzD7LwSWzfH6f/35+n3YXX0efb6/OKPE4d/0G3/75MQlVy+Kno3/9z+LvZ9+PvpZ2lpGLLA/fSS/Tpd9EF81Jp7ECv36EhxW///Uxiffm5l7nc4qUW+TvosH5Zpx0j8m5icsB5Hoc9jH339Au3bOKk/E7vowd85/u/e/XS8mPdvV3qt7rZY5u3zLhg2zZ7+u/uE9mzT/mQVmfgTMh6XLqr4EzY/zF6cCW5uV3mo9epzAJwa8Df1EAg/gWyL+IRrfrXwDeHe/cwXjT+uo3Goll/jI45/INzi+z75pcXcdp/bK6zui3HfWvk87bmv9Bs3/2/eiDzP6kYf6aEQqTT4rUsmliQfLf9OOnKxUlf/j3ZXrZuWQtxK/OK6bJXDPD8za57xQ9KVHM38shf3Fzh3nMONLnn5fp5Z+ePaadSOHHn6YpJod2Dm/X7+lFiqBfLxQmDU4m5s25+QOi+4flLkMG5m2IPNTchPzYFL1nRrj4dMvdfeakVyhMRgnf6HjROIqmqywaONcvV7y76eXfl2lnn1iYc+V47P/9n+/h6cTLLx0vfsYnGzJXOv5TXjuJNgjAAZiw37mJpHb5hlZcMVtmnnCzNTetdMOKNuj4al8Zokq0uz6WtnlL2v3zY/ohT3/UfjNfdXGOxrTH05eEjVEYI8zzPWqWYmHb7PLTqOhXiUO2tGyEseRmrzYrA9IkXkD5SdTgIrZzZx+tH5kXp7Ntc+4pwrCtibV7Alac1C+ql+klx8YwIQ3t+GqvPZZ8t74WnXX8noaVFe2OnWNOg8DRy/RDVnACWxInEx8bG8Pczicaeb7v08u/3/O1POR9zqbWG3Ptni+krZaZG/dkNR9P/g/jZ33W+I3imleI3sPKcNEzOlTuPy7ox94AABR5SURBVNfw4fv0PVxrL9PvYLPZl+mtH1XsBpxLPP09x8Sl68s5LQeMyLDd8aZ0Xct3Q7o/x6ffhs/sS+3H4vv4WQ7tXP2n71GWZOvffOcEzQf3zCp+9l5a+12+K4OqYVU/aBe+r0y/HItdvlY1Dt9//jD9fqfEW+5Bpn90Ov+/xO/ll2hVt4nHOyunidH6ldCkobxR0T3vxkmMNuv91fEYv4qH4Lt+R8fCQu4H4b5h73e+gEnehnb5XtD6HJuZ46kgaVeTsnz190RmWs7X8a38MN3j5pq29TVXxh2Mce7atnxmbQbff3pdp/ytvk8O5ne+2nt8XhHrMh3Hm81VUnHZZ8BN7Hd7nFb8+Q7Ktn6nFw5DrmC0a3m0Y3W4ltPn5p7RqDBg2LVTXrj98YVJgtE9AwnmhS+S6iI2Vgfni+ClbYQkCmCFTTBqUBLksV06F+ay2zKfH09vim68FIyYjMRzmpgE23Te0E7HMP6kzbatJqHWl3TBpzFruEzAK6C9X27uZH+lh968rNZD3cy85iKUWcQn1SIm0cZ/O7YmD904OHtlt2ez6ZvnrHRox6nsq9pazeJ20Kdqk55maXz98DNt27gVnqthwhhFw4qRdK6KW7pmqvHP6Bz7J01T/8oGvzPjl2+qCavGI9ivyUU1zphxHaPyM2me41eNdSYuzsiglfJeaeoayq5omeNduB+Okcar/O/0l6H7vlgOTRv79Dddc7mYu5IPmZNm/HHsKg6DzYnzdP1Wulh2e/GY07KyKWmWdA42qOYz7ar7U2hXrsnArV4bwXZzH3OYLG8b7VQNRslyHL72Se9/sW89znD+pHl1HQWfYv8cX2mXfK3Gcom0PRf5T5oE7VSfZLcdz8W2P5cTtbsr3KUYyZwaY2lb2ZA622N2O4/dcpxjk9u0Y1sdpJnsl37iv2ph7A3j1fPlKYJt5jqxfuVGdczz4e6GzKM2lEJJmla2L+FabTnTtrrujT9hvt4YoU3RrWrn7nVZ3yqGlrO4HXmufa/GXe2D5mPtXMpwV/6QR6r+noF+j3y08jEezf7nRnYjMqX8Nf6q9vqdofc0O4RsB23K/c+f7u6nGJ69v7h4dsdacfBTC5MK7DNGlrYGSBHWBKFKKHrnwg0u9tegnitMcrsQkBLEAo6xJ/jgx7eO9duGIHtI7Hz2nPEraDLy304r23YMf2P1c/kxg271xW+HL1q089h2su3jmPX1DWXf2Vz6+sZj26SltW+smY+NmcNobo72N03bsb2ua9Bfb2rJb42BGS/0MprMjV/OzfjlzJjd9XbYxs7+Mrc0MrExtofu57hLGtj42Wmr67060e74MWR/9EXj2+po/ngew/lf2TXQLWikMZYJTLsyj9HOcSxd8vxqoPksY5iDc2N4H/z+TOzqeJv5Zvp4+7IvvXlVJ6NRmCXvt/fcMr4/ZzQNc+k9XY5rUiKjz103a9oaPcKmmd+fSnPme2Jln72PzczvNC+xmfPHGmLbeVvL/jl+y7VV+sgsxR4758y2+JMTK7HN3CeDPvrUuHzm4mtwPuvbuR6KJbXd9vqs7mnSQeZRRtOY2QZX6OXxbWwzx/ls3igc50ODDWdvbiXHl3JtYy8DxH2rVx52eH3MjBF8NvGzftvtMknkxWhb4mDnsb7b4+d8cDx6G2S/mzMaA3VzlgFtNPgMuhR+w+qP9dl3s+zIOaur82H2enNt/TTdfd/HaVRzb2LdHWz5wTsqTCJgefldluHDDaqAN3dzbM7lYJb+URa7PwN47h97lRuGv/DtF4gX3rc1c4cAOzjzE5BiV5k3XVT6ekL+1C9aN7cDyo4TwY79xrrpBZBsNBdONZabR29uc3F0lpZdN1Z1kTV6jS8Ca18YI2uleovvJhaa9Nl2xt9iYNyS8Sv/TFt7rly0bgTHVrn5phib8ZpYVV/azo58LnIXbRzw4UySXWt76GvtsO2d/VWcmsLE2ZgYb7gzsbfxW2yTta/zBVvbaBtHDtov4/Z4HsP5Pxu/NFXoa/kK2zE2xd9y3et1ZO3K81vzw3Zra2zSHs9jeB/8fnO9SRzLNWfjkjk3MQzz5zFX2GHGCLZaBvO5OF51DYqe7vsiamA0zfbIGXuNFEat3rH/2rb6gKWMaXUrY8pW9CPrV9lnv1dmbM2axJFzfMOu7WfuA2Eea19KZMPxEmObkIdxz/Irkxq9LyhMLFca3xyTxr7oc/5/f1600dWX1Khca7mXnqlWIKwfnsOuFu7em2Oq01hbXMy0iXyO7bOt0raNY75ObMxLjLOG1TCxrbV1PH/bNg7VHs9jBPsMd8Zvr6ma1dXWFAvRVplTOZ2ZXwc1n2H8FKvGhmyvuy5Nf93s2pkZ0FaDT8+pNLN8+G7NOeO/0VS6Wf/8MLNzNI3jgeUaGZsGY605/PGFib/ZZmvmg+0FKAKbfi4INiGotmXOHEzTP9hi962Y9rgm5eWiyhdb41/s17/w3Zi2r/cl6xQ34ny/qydGRRPXuLfrxi/2O99cu6JbPaiduxrL9V8Ux3rostcbK1zs/uZj41a665a1z9qt5+OniY3Ma7/AnB1Vv8VtZ2zMN8A0sp3PbstpM5/1ZaxzZW31ZN6dqXfNPOGEt8O2dvZbu+wXenNN2jH8+GY/x2+NTXbszhe8jFmexNaN83z14SZJyGM4/ys/jR92uFoje8YmIzUz3q48f9097Pm22sQfz2N4H/z+wA8dt3wam3vxStfVYjvsvHZbJsz75tothqQtf87YV/lojjdj+ANr29on1nN9na2VfWMuKuuyJvHokLPcLs5Zvq+sDd7Wsj8ct7nWSh+xaK5f5YfsBP814UxnrSa983aQzvnAe07aLUO2Y++46iTfwT6edl/GsT7Ltj/vHzjZ9taO0XHbpr9ddF4zho29jKs+N7/AyedsERMtmRnDxk4aZwbdtnGp+GEOhk07j/XRHpeGcz44Hq090lX2myLI2yH7Mr+PsbWp18cc63BaM2TayqbX0fZ3PuT7uxsi7vbsTuPba8T2deMPNbI22f4Xbl+hMNEA1zeY5gbhDA5AZnGigO0TMCtsBDC/2hWEUVjSuZDU9sFtK2/XzsFgv1grXwLMta/Wtcqv0FZttL6kC8Ynx/KEKmuigJa5KjvspLLtgLL2V6CPdHP+ix/6ZVaN5eap/NWnkt04eINbm8NYncIkHM9PTNpxKvuCHz3NTLxDXLQITexY3e0UM22reYPvOqYdQONozlkNR/FwX/JjnYWr4m/gIGjobPC7M375phU/zq76BjvHuD1Xa551XGOTMzIykjSuNHUN1f4c72LXcAx3bVTXWo830b93PM2Z/a0SHHdPuLYP3if35Wu1KPaKlqJX0jnES+9vMbHWYtD2j1qkdn5eey1UNtSMhPFczOL9yVzXIdRiX7oe3Fzih9pX2dQiUr9GNxuLwo8ME/0212M1trO1Y5+953ZtrfSyiZfxW+aUduY+rOOG4ybJsppUtgfbih+hXcOvTFTPG8ZYcv9RrXJMVSijp7NBW+TP7nnTPzRMHFmbQr/CrY4X/ZdVB3evtvtmzKipn0/v9/X4cWwzbkqoc4zViNFnsLn0l/E0pjaGi7hWzcP1W2Lsp27HjW0tC5GnNIaz8e/uk/ZaqRkbzu8dUMY09lXcLRd2rs4gQaeifWwR+2gMOr3KoTCv07k7pnaJTOnYY39juzmGWu7qsXXG/HlGI50rjut8yoOs37hOYSJ2JId0OVYdGJuYRNVXHCRQ4YJxkIQAys3i+/Tjp7ZJo+ZzNqF3/e2qhd4MwheXaxfsL/AJDAqGVuXqW/sUwXoZxw1t079Yk9tXGvmgDoCxPjY3SDNv1i8eq+x3vumXk9qoz0sibGk5WG9eenHrvzrl5olfTLqE/DL9zuedvsbUvJnbxiNh/nQTsbZ8/+mfYuURwoa2zfGScfNrCBpTa4+JkbDwS/5lNm1Xj13H3re1DNdfRNUoPf2NvqN45ONBEzuX1VkfDJgYpMmtnpU9YWeFBs7+elyxy7A8x7iJy4v8y0RJg8LqjE3OhtaflBinuGcWeg0lnTKv59m23eN+bsdtjpN/sGD8tclO62/Rrzv/NXzwPskcw9hZ9gznQQfzL0RZpkca+3lntLSMiHlWm/L9Yq/r0MrwqHbrtW35svf3nsDL2+r9R+458/cqZ6vTonAh9gzmd3pV1+KAt4rPf3+7lUHVSGyXf2mrsFj1M/fH2s76+q/s6cmaj0X/7LWnp0KchaWgj7FHG+jn4HyMh8Y8Nq7YsT7qWPIZxjPFqzKnia1pW+Yo+pXvHHONmD61nuf4sx3jdpyz/X4estIO0XCVc5NVbWs28xiO5+CvvSdYPkfHM2f2WtH5lAXdj1rk+Ts+NDx2bbBztYMEds4yoLa1/TNXOScRu2fayxDKYugzYjn9S5kd26wVFTeax9kGfrurUW1Tc6/wY6zcv15hstKQS5qPALlkLPqgwPYVSP82/kYcnf2nNDfi45dyQ77AbILxpYzH2EaBkJDUSVDThgMogAKNAv/9fAl/PLQ5wYFFCnytwqSqGu2qyCJfaYQCj63Apv5Y17aKrE2ASWHyxcNYP3k++xT3i3uL+ShwHQX+m378+2PSN0+uM8e2R/1ahcm2Y4F3KIACKIACKIACKIACKPCwClCYPGzocRwFUAAFUAAFUAAFUAAF7kcBCpP7iQWWoAAKoAAKoAAKoAAKoMDDKkBh8rChx3EUQAEUQAEUQAEUQAEUuB8FKEzuJxZYggIogAIogAIogAIogAIPqwCFycOGHsdRAAVQAAVQAAVQAAVQ4H4UoDC5n1hgCQqgAAqgAAqgAAqgAAo8rAIfX5j878f049c0lT8w0/9LqNUf4vJ/nyT/Rcz0x50G5/NfiR2cL38NWOPr/532+o9Hjf5gY308jeH/umbXhv5f8wzjZR/rNnIu/rXg+rh68Bif5i+vBl3rOC3SIP+10st19H8hNfwl3yv/AbmaDf3r7ev/KvAijWiEAiiAAiiAAiiAAnekwIcXJlqQ/P6pf2AmFiYvv6zXMfGsC4uZBDIkp/68jJuODc/vpjJvtCPPKeaE5NWM2/1Lt85+afPvy/Syc8lyzwY//tQpakI/Y+f/fkwvP/8zhZ3V7QG3gz5O6wUySFFRxXpBH98kFCauEAmFgzvm+/3NfhjfF71/MyB9UQAFUAAFUAAFUOCLKHCFwkQSwt/Tj3802XaJfRImJH2agPWSeitg97x/qm4KjNQ3rj7Ev79ZzWfGro93bHXFiowpxY4dOwzXtTG2yys3MlYvqTXHY2E3/suhIXHV1RbVTwwI8+sT9rlEPhVHaYxSuBlR0qadq7Qb9A8+vEwv/6gNdTyCzmlOWzDYOcpxjW2MR1ipyIXgYH5jvp1LtZ+b5/s/3yeZo/gYBwvjNPHqMGLmnpunjC9j1ProEKG/jaue4BMFUAAFUAAFUAAFNq7AhxcmrV69RC4mlzkRHST1eazeeXvMbudOdl5NdPPJstEpPDSZlUaSKGY7JxkzJf0yp01auzakVZnUrh6rmBCLin6ialrFFZ48p/ikfayvqRjK7aoRgj+5OAq+6xh1u6pgC75Fv0PirGPb/mFbk/tUPGiCLee0T9Awzlkl/mGOun9I5M3cYuFw/tr8ydoftm3cchHi7OyNke0uJ0dxPDcPhUnRkC0UQAEUQAEUQAEU8Ap8WmESn3rr0/TdZJP/+ml/p01KWv0YOdEbnC8FxUxh4hLf8HpXTkYl4TeJuyTYmmyH17LMuTCO2VelTVI+Smgnk6xrt+6nGas6L8c18ZYT3qfc2OsQ94tO2tC3Gx03/Z0NtjDo+236puFLOzN/5Ys5Hvq0Y2RLf37PsSrjxrOyH+M47i8tgw+ZBR3ZF6vj436ezOtMvEMfXRHLnx2uyrRsoQAKoAAKoAAKoMAmFPi0wiQnZb0EvnfMyrv2vEuSp/TbjmxDM7Z99akUIz4x7SWNOakf2GjHkP65vbVhJlGtmmmy7F+JCv6agi6c7yWz9cqKjN23qW0X7WiP5/5ig0niS2EySv7jcV9s2oIhxCvoqvGZmd8JNTe/P9flQrU2PsUp5v2x8R3PI3704pNWhHLx65xiFwVQAAVQAAVQAAU2rMDnFyaa8DVP+PuJWtB+kPTnuHTOhyLCJJUlScy9wkZ7XBNP+zsZXYVwNtqkuWODTBDs0ETTJe/ZktHx3KC3Ee0MSfXi/qZPGFJ9jb/DKbP4dnrGHzf9nQ1W11y86DBm7n5RYOaxGjcFppm/GjutdiTd/fwlJmYe1192gw+GodikLY606+J5BqzIOMU2HZVPFEABFEABFEABFHgMBW5SmOgKRn66PJOohTBcdN4nkHE/zykDSzLdeXIdEtKw6qBP6hckqT0bm/FjIlxeB9OCR39bMQ+dTfajhloo1b5G+4vtdtSQ+Gqy3dhXWlYJsvFt2H+mMKkT/GJr73iMjykYwtzFl+H8xfSwZbWq9Ajjqd5mHtdfdmv7YoNqftfn3Dwa99hOY1cPUulen2IPBVAABVAABVAABTatwI0KE1cUpGSxea1HiwaTGHejMThfJYqhYyoM8rv7JeGtxlV7dKUjPamviprUISeq2iePLa9WzSSfuV2/TWVP3qntr+yp5p8bsx6jv2IhE47aDY7PFCYyWtAp+Wzttsc1cde5o22xkCm/oRnMnzWKGyH2OX6j+eNYIw0iP+4VOS3q3Hy6a/2xftrfUX3/+WP+Va7Mhpl7sPqj8/KJAiiAAiiAAiiAAl9dgU8oTL66RNiPAiiAAiiAAiiAAiiAAihwbQUoTK6tMOOjAAqgAAqgAAqgAAqgAAqcVYDC5KxENEABFEABFEABFEABFEABFLi2AhQm11aY8VEABVAABVAABVAABVAABc4qQGFyViIaoAAKoAAKoAAKoAAKoAAKXFsBCpNrK8z4KIACKIACKIACKIACKIACZxWgMDkrEQ1QAAVQAAVQAAVQAAVQAAWurQCFybUVZnwUQAEUQAEUQAEUQAEUQIGzCvw/7ad3caY5r4MAAAAASUVORK5CYII=) ###Code p1 = float(input("Digite o valor da sua P1 ")) p2 = float(input("Digite o valor da sua P2 ")) media = (p1 + p2) / 2 if 9.0 <= media <= 10: print("Sua media foi {0} seu conceito é A você está Aprovado".format(media)) elif 7.5 <= media < 9.0: print("Sua media foi {0} seu conceito é B você está Aprovado".format(media)) elif 6.0 <= media < 7.5: print("Sua media foi {0} seu conceito é C você está Aprovado".format(media)) elif 4.0 <= media < 6.0: print("Sua media foi {0} seu conceito é D você está Reprovado".format(media)) elif 0 <= media < 4.0: print("Sua media foi {0} seu conceito é E você está Reprovado".format(media)) else: print("Você digitou algum numero errado") ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA4oAAACOCAYAAAB39PZzAAAgAElEQVR4Ae2d63EjuQ6Fb05KY1Ox4th/KsewCciRTDmMqXJNCrwFgCAP0GSr9fJI1tmqWfWDxOMD2E0029L/Cv+7ksBnOex25fALxXyV49uu7D++7ODvY9nv9uX4G9vA9ug8HsPt1g31mr5oQ23461B2u0P5rLuf77uye/e9UmS/2VlEZm0rOt+OpXpQytCGUorIr+2irGbovC800U2QVYr45MzQV7PZdWYR6p/brb67jNjy62PfOahv5ve0v8ryOBvvxjHYLbaaTtXhtqiO2F/jBbrFwqn+aH5B+3Ub49byMdk5kuH2wblZHE/p6fnXGYBY3VT/IP/yee6TAAmQAAmQAAmQAAk8BoH/PYYZz2yFFTG73a6EfzgZrkVCOC/tvc3kfJt4T873Am+lUEyFCBZ2RQtDKKSk4HGbQqFW5sUeFEmzAmOhZxZukBWayHEvhORE9qk1zhxsv3Pyhrnd7Dj0TzZgoTb2G/pW8b0d6A++wHHts5TRLIVCt8u1s7JvcZz3l5bqwxmF4ik9LV9zXrnRXgjnsbL2EAX6cpMESIAESIAESIAESOD7CLBQvJp1XO0arrxpMQAFWdZ57vlUtNjqm69WJeGhEJFzfbUnFwpaYKRJfCuyJjaijFxIdEu6zn5svKXyqg1Nt/qbCvFhcZFiUQuTJqepXLazU8vjzSexAYoqtVOL6lkxZsfHDwfs3HJFcUV/s9021vTnc72Ai0K0HfhUJcfV8NZl6edcj/gxznfNsfYwognnBgmQAAmQAAmQAAmQwIMRYKF4dUCWk3udQC9WwMYTZ1U/KcKaaYPzOuGGSX6ftLdeurE87hP+Ty0IWhEx0BFW7kbnfYXIJ/6pmGqWzI63BqMNKKY294c+5v1q0dN8b+pX+icbkGsrJpsc2ciy8CScU67+ajAc1+a2vyx062pg5Z7192Isy0MbZiuKy3z2Xpv1THJF5HTbXCo/SYAESIAESIAESIAEHpEAC8WrozKaWKcJ/srEWdVfdD7rtf1QVEhxM1jZsUJWVui8QNlQNIxsXMg3v/vrq/6a6GS1M7HH4mvtbxTN/m47itFCxAvohX29ZShYwLdp/5VCUe1xnbpia/6Ojlt8oIBT3d2Xqf5uum4hq8BD5Tlv0JP6y260zxoE/anPKT0ed2s3fjASuCf53CUBEiABEiABEiABEngcAiwUr45FLtiqQCxS6uR98RqiF3FQqAzNmZwPE3ftWAu19vpoL0CCXLfHVwLr6lcoMmuHVjh4nyZbCs2VYqC1G7cJ9rSdaH+wJ+hfkxllLFcNXdms3eT4SqEoEpVTfmU2HfdCKq42Wv70on2i382unxr7Fr+ZfpM1Y2D5k17pbQVvUlh3Z37a6rPJ2n8c1189bbkBuiero2MreJQESIAESIAESIAESODeBFgo3psw5ZMACZAACZAACZAACZAACZDAkxFgofhkAaO5JEACJEACJEACJEACJEACJHBvAiwU702Y8kmABEiABEiABEiABEiABEjgyQiwUHyygNFcEiABEiABEiABEiABEiABErg3ARaK9yZM+SRAAiRAAiRAAiRAAiRAAiTwZARYKD5ZwGguCZAACZAACZAACZAACZAACdybAAvFexOmfBIgARIgARIgARIgARIgARJ4MgIsFJ8sYDSXBEiABEiABEiABEiABEiABO5NgIXivQlTPgmQwF8g8FWOb7uyezuWr7+gnSqfnMDvY9nv9uX4+8n9oPkk8AMIfL7vym53KJ8/wBe6QALPRuCmheLXx77s3uNQtgEug9z+7T9OTds+y6G29T76+awTPp1wdP/dp9Mcni2VaO+MgI4Lz+k0PkIfzZVzb4Y2Xg6/gqSb7ajtDzL25FoyGzeLc78Oei1aHL8ZmVLKRfGaGCD2eo7s5n5Oev/sw8rm3HFxHRLJm9UxJTZtGRctriw6r4tI733XMd3V3GfrL+TyfRy5Tuportgk5rEl19m3Y/kczC9bn2s3bnItl3vxBeMccyL7fq1fP76/PRBevVYPGDzSvGZg3sMdulmh2CbDYSJ8SRDvO/H99gjoBShfPC68oHy78VR4NYF6k7PHIzYeZsXOZYXHfcfLI11Qz5kgfv3uD6Rw++p4ooCbTC5KsWsnFkJ1NTRcS1Hxi23jROrJXJfYTsf7k/nyKOaecx14FJubHU+cy82HG2zoNW/r9e33V38rBLdvYEcTcZNr+YXzOuZEC8P5G5fUGPWeu+VB3/kG/cgeNykU5cItT1eXT3wuGTinJr52vj19DxcbPAcTLx2IfVVv+vQhXyykX00muzkd+2rn+2ed4IncXAhCrqjMfB6SW3Xsy15WEzxx0V4/piLrBFLbHsrhrT71rjr28qqdv56BMnbwdLz6dNRXOWp77W98cFJjE1g7vupj8cmutN2X44dzAz/V/pQPoLfZDejaprY7lKM8UdRVl8hT86+uxqD90h/P9bhjnuwWq+BNr24Ac+RY4vGsN8roe6s3yOqnrclH+S03sk3vB83J5tuMaTge+XXr6lbLnX05vO97Xiaec59X4j6MJcQjjOduGeai6NXx+Gb50Pbb2wogT/LCZVbdh5b7MC4wnm8wturxxrdADld5/g4F5tqcTffJtkBeODU7Lo0q3w95PTKNW7VpX8K1QI/5OIbrYhq3GuvKKvMt6rfLWDKNYxP4O3sxu+WVyelMg+O2AzYf3mW1tds95Fyva+3xQIhNHEtN74jVVhtBn7J6P7RY+FjFnPUcHNpe47mv+Xz4NY5v79tZTOOi9qX7SsLc5UE8pQ2wR+6p+yJOoW2QMbve5OuEXa997AR+cI9V3m2sT2I78h9ji/fVTbbW/H07lnDvBCjI033Q0yA/53IYE2CT+XjpfMPHaeQ+ts/49dwDh+rmiLeNoXPyFORiHOT6kGOl+/4nAzG+Prb8oer4Wh7nI4vrWsod9UVjBOMKbYS4gBeDTbxmVzZ67arxwGvhLCeC73gdPeqfUkxtRRsvsj2Ne5A3G4fxOrEl1wbIVuYUmK9+/VxKqJz9jSr0PczX8P6T5zUxx9r9Idk2t2Fp1U87cpNC0aFoQk0Gg07wIfm8z/LTBgcGq7exgLaLsA42T9CYMM2W0ObEk4TRxaLabElbLyQ+yKuveg797gbXm67bWE+gTZrYcL7KNv9rAlfZ6pMzrAOiXzhg0ovyfTI46tcmx9UvlTnY9gGzycdqs+qLMbFJjfsa46wM3UbkJ9uJN3LAbW/nudNyoMkQ38ymcQ5lxbXQdLsgVkG2TqIrt6UIOJJ0wxndVD9NTsypFVaYB3Uy7751pikO0mc1lp5L1s9vzmusoytJ36K46q9Wqo0+CUx5G2XGSWT3zVrJvvmdGKPMmkdLPum6EJie8sXipWy8mKl6PA+zH2FfdHl+hRPJj8G5NjEf+Nh1W+6MfLbxEsej50Xkm2wZ6AvyR/HEPvmaFHyTnYHNle08B6WP+1LjWXNcffF819jWdjlO59gIcVP5k9jjdWKeI8bX2fuDgJYXNR+X+b0SF/RzwbcXPf1NB2e3cq1ZyBnEaXTdF1ucf5KBfML9Qe3v11SMoWyHfPPxgz7jtugMsUXett3Gy4qtXtQF3dWveWwHjEKuOHe0yR9wVv9rnjpDZBFxzn1R+5xTyPuoN8qzPeTtuWm8al+Xq8w9NnbOWWW5Ie45VrJfZUZfITerDy5f27kdIdbWB9l5n+CL9om8g4+T/I1+ia4UT7cp+LiSEzPftX+9L6OtYgD0med5tHS5h7bDvVb1jsbhiVwLOe7ziaXW6bhBn3TO5lyzDLAjxD3d0/Wc2xHzNuSO+lt1bbYh2/Tz9u9bKIYkq8E5OeDqwK5Pym0FqSdqDAEkdx48sWHfC8Hvh3Ury4C2ywtWT9xw0UsivXhpK6DVr3Bj8kElfUGnimoMYUDoCdhPAySbEGQ2edZK/GoXzuw/CFrzUc/5BVH6NB/ARpUF8Up2GKdJnBf+uRyT3+yvBa3tZ93gTNh0WeFg3ckyur41HiNJykRjP/FROk35ox24LZ1gP/dv3KDN0Dg42GJXj7V9kzFmDf11M+sDxs0m6xM5QrssMsS2TqTgWhLyOPQFmUn3PE/RftwWwVmexTPrj9eLYFDcaXzjYdWEYzOcXolF9lH3IefgfGS/UlwF3WpZnxCBPDkTZQKrLGPF78U4aD6s+B0eAGHMxAafIKiF6cl8v45nE3t+LM7ANS7nIuqOPOY5kv1K+81/syPL6dYBb+GL95XeyLZm/HO/pDuIyed0X3hGBqFP3kF9M5tSXnX/sx7ghnK9/6Z7VDYQ9pNMzNNuk7WXfS1OhoxsPOpYGdqUcwri6r7Ata9bmHn4GeBSD3V7l+e8l3/2tnIEdaS+ydfYz6XZZ7hOZK7TPADdLdeqXOgTZCde0aYsr14nQZZKz/ZFV2AP45TYTO4b2hm5Nd2nrltwTW99UnEkwuEcGLrc3NiuswV2SVpknHM5Np623WhPzMcoO/ie5bX97AfErbVJcl9w976FYgYq4NduXto+D5AkRGX4qxXyCdX/RLYkYyjU8OKM4nHAynFIlJjQeEGIkwEUp9v5gpYbgA45tbh5NJsyF0jw1qYLn/qc9AW/ghyTH7gNb04D/5sOsFFNA27SJjwMgFh2N2wr2CWHXM7ARpGpdmZeIHShezZZXMpAXsi4Ff6gZripuuEij43QT92OeWs6sk3AeCHbOYkS62fMJ/pH+ScydbyssUYnZBts0lNgB/ro+lpeQbssclAoTovWWXyT7j6+jU2PIdqP22IU2NjkWRu0p99QB47gocYXD9o25lo8m22ym7HqbzbVHis5ofnb2MdxvNC9kWn0G1jV+IUxP7sOZ5ubT+Z3kNHGO9jf2gsDi23uM2R1ro3V/sgqxqbzWMuR2GcxfoI/EGtxbxYXOT7jW1NDbavX4Ja7C3kr1+UcJxwbgfv8eoPjacQxxK3mam+Xxy2wSf6jr12m24U54scqJPxIMnuhuBLbzAhiuWZT91EMiOOo5xQa59sjX8y+7ne9ryjPnHsup39GW7A9bi8fdMZ+XZ5sBR8yV9xXXoP7IHBUydBH9PoKYtYVbQL7QZ7ahmMHzkUv8h7GCWRrMzi3khPxnoTzEpCX7QHf13IqW4v7C5/bSdMbcqfdM+a51q4nOdZNrmyY7Flb9AXbBBFprqGxx3lljePCv8bMfOj3friGuO35GhkNeIm97y8UcQAOES8D15vlc7JfB1MePN4pD8qWIN4APpMMTK54gQG9nkxt8IA82VSZOODT+WzPaF8LYLhQqAjYT3bb5AFueCgTt+vEqA1CkIO+izrdn/iY2/aLHdiYWSQ7EpW4mxm2/SQ/9JqdW8mh0F92sgzbb7xa+5gP7fBoo9k+ONn4Zz1oB26LDNhv/avsma419vlc2wc9A9PjodQW7Ug2xrxa54hjELdFd99fiW/SPc1TZBq203gGeV2/kZB9nKxEPrg383l2XPoa356HsA82qZbhvl2PIvs4xqM/25lGmeCD5BE+yGt5hSzq9tBmuZ6lvMpdvZ/IbtcqsGHW3o+fYyPYH1lFG5FHbGc5a3bGPh7fNnlxv6qdXc5KXMA+d2/+CfrP6Zfsmt7rTsg0fz7L8a3fJ5UbzBXGHMFudc72dVwkndh/ziE+HF60E5k5h+t+j4n1kn2N7ZCR3ZvXbIryYg6v9Qs2NwaZE7ZaO9d96dcazLnUN/kafUCd8VrTr8O1TbK76wZ9SRfKyHxwP9oEvqC8ph/swbhHV2AP4wS2ags4h7rkHO433WCb9gd52N7nZlgQtWsfmHZqs+mNDZXdZByGltA/MsbrXOihO9vagu8LEXBObMA4gU2YHyqinYP+esL2e865wtzOj7/G510LRRygfvNbBiCDzgMEz8dzKt8LxfoE02+uLcFD8liwp09adQD6a0qxbUxoGPQ+UGeDU2X2GyB6o9stYeuZkQ1VdvNJmqpf1dZ04bBzXihGP/KACX6BnKDLnw6v+jjiVnWj/WvxwkGOoCoTzx20Dbd99cDbiW9twqgyJA5rOYRKbVtl+IVSmVssAzfl47yTjBTfaG9q2/gbN/cjxNrzLdjk7M0379dtj/mq8maxDOMo5k60PeqKnqzEvfloPVRmsyXZGYVCMYiFoTXq8TC7wnXAcy7pxrEQfMOxVYsTzyNtN5Bnx2sOqB6PSXJisBv66vnIb9klxiVM0LOPNZ7LnPAJil+bjJv72XmK9u1M1ZdRPJWpj5Fk/8JBOx9srtcGle+5n3zze4w8/fb4q/WL60A9n1mdYyOM68jKbHf9yCPEOeRI7ON+uIwwkVx5KGLyazzBvgVev4a0OIn+mAeuO9i8EGR5EeKksZHjLq/eq5quhZB2L8P7supNcR7lZr/G+T1x4r/y7jb1fmfYqvnRc0tlVL8Cp0FsAyO/z01tyte3aKPqGvKM7fA6P+JpNuXcW8ans3LGziD1VX98jGcfotzgQ87Vtm/ynV24DyZdeC0P18N6jcDcaXmG8UR5uh199P7Ri7yH/BMbtcPzL/qlfD0nmu+1wPIxsLA12eft1HbXk2Rkc3F/1M+/sNFlB5boaxzj87GACm171jbkh96Du09RCnBWRp5/drzFutpu17V4bpnfpmu7DdGin7h310JRgNkgwFcdDKMcbxeAQFYS0AdBOKE7llgmb/8hTyGhbR3gtkyeEkaXj/fl+Eu+KdDPrcnHb+/MF704SGJCJZlpAKaz4fXWdq5eFNSPNkjlbE1w8eV95VuwsJ3csNFnuBCJxBAHtdXZWBway9Sv2eobYLN+s5vbDTHZf8i3uMGAh3PtFWKXh5/Vrv4NZ26jNZrlWOAFObWaQ6hXt4E5yPDJs/GBHFz0r09P2+sQ0fbQHPkDT7lJhThhfOu38PnEzm6S/qoO6EJ5K/mv9kDbg3yzpsdyZTwHP2QHYhvijj4uJqxxXGWZHje5bkQeMY+9ncQmXCOS7jC5QKY4ts7wBfMQr23Z1uyX7gNzs7t9f+egueWkfYOixTrGH+Ke7A9PXJ2/5mb9VuE6Ac02b2Wq7dokFuOJ4yhdkwYeLvOn+4Sc8wTO7OxtTTTqhvtOzgfMgXzdzDZKvOq4iKxqbOq38EUe8X7YcyT28evWLKaobxoXsC+bvspETsLYXb0uL9oC95DPcHxojN1rOg9pNL//oP/OanEdHvk/s2l2PNuqMu0bkVUfXBfVYnkgUa/zwRfgaddC4DHRHX3EcZRW40Y2Tu41ItPt6+Mm514WqJ7Bt73DvKOOly15upDqfsu1Qrn6t5zWosPZejvxCe+DeewmGX1cxOtayCu8zo/kOUe3xa+X7fqWvcI4Za54Lo6xkBPBD5NhuYbfxI1zijhPVYuQGdzrY05l2/tDgJjb83HohbvlFOR0mieEsTBQi3nZ24Lvq78rjJyxz+AeA1zivAb74VwuHu+2DZz44YduWig+BKvfx3JoX3/8EBbdxwi9sEHRdR8tl0kNF7vLRIRe+SIeTp7a+SrHd7gJnWrO8yTwyGNLo4M3x1uFy2S+8s3wViQp54cSuPV97Ydieiy3fsp17QbX/FeZGz9WAv4Ia35eoXjyqc/zxg2fvMhTnPYk79FcuvUN9apC0Z/exSdej4aM9vxdAk8zthTTDSYNIgeesOpT4emT8r8bG2ongYcgcOv72kM49QON+JHXteuv+V8fh3L8/QPjTZfuTuCHFYo2mB66iLp7SKkACbRXUDgJRizcJgESIAESIAESIAESIIFVAj+sUFz1lSdJgARIgARIgARIgARIgARIgAQ2EGChuAESm5AACZAACZAACZAACZAACZDAKxFgofhK0aavJEACJEACJEACJEACJEACJLCBAAvFDZDYhARIgARIgARIgARIgARIgAReiQALxVeKNn0lARIgARIgARIgARIgARIggQ0EWChugPTQTfSnI+BHpB/aWBqXCejPMjzqN7I+/O8JVpr6dej8+ZOcW9wnARIgARIgARIggWsIPGSh2H7T7O0Vfyj9sxx2+8nv3Sx/S0dYHX6t9bkmPWpf/n7UDSAOREgh9jdzHOIqeZR/bN1yC+y+9vcsQdTJzc26ZEzMxstJLRsaLMfchk7P3QTy4rkdofUkQAIkQAIkQALXEHjAQvHeE79rcH1H33OKvq/y1X5AFbdvbCcnjjcG+njiRoXiwsrNxdui5xMfYKH4xMGj6SRAAiRAAiRAAlcQeLBC0SZlu92uyL/DL/FsdKyUopPWfdm/SdtD+ayT2OPHXvvudFVOii6TtcPX+7RtPS59HWAtiI7vcA7a5hUX7Vb1ZhlfcvJseeirrJLY/v7NfDr8sn3jUuW7f42X6z2Ug7IRX3DFBZkc+urlBj/Up1k8nOHwE/1KK1fCyH1YW13b0u4c3jN5EG/kZoXUMeTTV8i17ri0dZ+GOVObjtudeFAwsxvj8nYsksOqe2NcQ6GIOlpeQd74mAms8jjal73E1WOKMv1YR7bcCnZfkD/BNsz/UnrcYi6O47Ey5tCPFX3BuQmHwL/Gso3zIKCUsZ35egDxKLFPl4sx3ZV2jRQbwbexvsrlHcYvXmMnfiZXuEsCJEACJEACJPDABB6sUBRScWKmkxSftOjko0766sSsTXrqvk/MbXKDbX2yaJMj77eUnwvUOuFS3XHypXFVvXAcJ1l1smS6fLJ7Ql7BQqH2aRMwYKN63ac6+Q2ckh9Vhvpbt23CjIxW/Kiyl7y6DbM8Vz3NB/Gv6gk+ZF9BWo1t4NjkQbutvKd6ga+IFXnILdjdJ9ZLpuifxwHslCyXItPjFfyL7cLe1O6xvIsKxaBjJNdz5NQ4grwI/q3EGZ3VPqZrPX+cL8qdx9Ef3thDjz7W5vEAWYHNRn0Ln0b2WiHn1658DUQRczu7L9Je2rm8wK9xNfu9jT94O8obCnANm+ur/nsO69jDa8nYT/SF2yRAAiRAAiRAAo9N4MELRZikKUeY3IRJW19h1ImOT8RbMQGTKJ3Q+GTX+9X9dE4KgDiRgn4e1zbxqgdgkqUTLi8u6lP9k/IGhWLrk4poN0E/V/T2iSJw0E6wf8oPnRCuxCMYE3e6/sFxn2jKKfQBm+bjsg9cW9N0fBa/MPkNerN/TbKt4ozyKeVa0OkrOa2fy4M8rodyP2+Jn6fs7nkChcemuEJ7VCjbyB5lJdZWaIzHUZDhMkfxQ92ga5Y/c7nrcUROpnItHl3WKf7+8AndCNvIUk4Awxj/rjP0r+Mf7e/9YCyHTjNZoZFkan+7oNl5mku3ZdS/6gA/s1bukwAJkAAJkAAJPC6BBy8UZfLhT6YNYpsYwURSz6T9OLlMkxh/1bF91ifhbYKUdMlukm8tBsdRBm5fUSj2CWic9AkLf8VRP73oSnobC/UBVnpwcpj9QxlteyUeDch4A211f9SuFgP3ZVmMLybo2VZX2ey0Ay1XZBf6rOs1H41rtyXIQm6hUFxOrBt7t1E/rV2InXBYFJShU3hlsvcVG2NeSK9mL/it0pARbLf2XtxiXDyvUJb0xTa6PR5Hm+OH7qKuZFPIH7dN+oY+ozgu42Mq1+LR2V6SN+jSGgfkP19RXLPT/a/jqHFZjtlm0yKGOX5r+joXkyd6rP+an003N0iABEiABEiABB6ewIMXinkyYvv6FDtMCn2S1Cf2OllpE+8+iVmsQmCIYOIsh8PkLevzful4mCRdIi8UIWP/daKsk7zub/Ar6e0sgIPaD/ub/Bjb01cVHMraZ9fZ7Vprn1a1pGn23bsnv2fxu0RvkBViVF/PrLkW21kOLQvAzNEdWP+c2w3joopodmyKK+R6ZotMURYez2bnc6P9M1YUo/iePyHnpZHoGckF/Y1LELoWj35uzj8Ii6uweArs0MNgb7RLfIwPyExMtwXFjra7rbM+WceI66yvaMznRv2rZeDnyFYeIwESIAESIAESeEwCD14o1om2Px3XCUd96o2TVmGb9vtESU7CJEa3+yRM2/nkMk3kwuQtyW/h1OMuzyZP7W/PLpEXbM2TMdgPk691vchCfPLCxXxHnqf90P6jeDQgy43AUf2rBa6yq/p91chlo5gR4/YQABpu5T3Vi3lSC49hARjbId+QT8FusNNXIZuvIg9ec45N+97U7lqseh5Xvf2Byra4avu1vFK5/nDCbA6re64/xcHGZrJhFL/uaRjP6/kzkhvjowVk1afxSdzFh9FxewACY27Kf64PXVrjsBxX7leQMLczxCb+jaLKdt7Nh1H86liE+G3ioiYCA9Xh9tdrk+uP7nCPBEiABEiABEjggQk8fKHoT679NTefmObCMO/rBKdNTmASI8GoExmT2QuVvEIRJqhpIoYxVV3+6t0HfGMgTLik/TZ5dWKlr3HBJFUV4r63k1fN9uX461j2k4n6koW9nrb/kG/x7P5v8wP1+mRQvQuykI8V6v5qKfbxVSA/50VI7K17WsDUdm2in9qdwxvlOTcRNzkeYheK+biiKCJ0Yl5fy1xbbcV2XrzHhxrJvxX7ot5DOcDf126Ja/cP45vyqj5kaat2G8eReoFcIX4xN8HfMN6sqFlcAzIPkDuLY+QUi/NxPHDMzfNjTR94FfML7W1s5UHOsRzf0jgBIWM7/WHBaIxgTLvcnhfC4bPrHIwjZ9/zNHFJYyLwAD+n8Qb/uEkCJEACJEACJPAYBB6wUHwMMC9hhU7eV4qzMyF8fRyKf5nQmV3Z/MYEeuF3Y8E3F/dZDu2Bzs2FUyAJkAAJkAAJkAAJkMCFBFgoXgjuObvFlQX8ncDr/fkqx/djsZ8duF4aJVxH4GkKxV+H+nup1/nL3iRAAiRAAiRAAiRAArclwELxtjwpjQRIgARIgARIgARIgARIgASengALxacPIR0gARIgARIgARIgARIgARIggdsSYKF4W56URgIkQAIkQAIkQAIkQAIkQAJPT4CF4tOHkA6QAAmQAAmQAAmQAAmQAAmQwG0JsFC8LU9KIwESIAESIAESILt4nuMAACAASURBVAESIAESIIGnJ8BC8elDSAdIgARIgARIgARIgARIgARI4LYEWCjelielkQAJkAAJkAAJkAAJkAAJkMDTE2CheHYIP8thtyu79O/wCwTJD9m/8TcFgQg3SYAESIAESIAESIAESIAEnogAC8Wzg2WFYigMfx/LfrfjD4efzZIdSIAESIAESIAESIAESIAEHpEAC8WzozIoFEspXx/7voqYVhQ/3/sKZC8w08rk+2ezBNvv2nFpvy/H360ZN0iABEiABEiABEiABEiABEjgLgRYKJ6NdVwoFl1VPBQt96BQ1ALSi73W5qsc33Zl//Fl2vV4LQKhbynSjsXh2SFiBxIgARIgARIgARIgARIggasIsFA8G985haIVhH0VcaYMVgtDoThrz+MkQAIkQAIkQAIkQAIkQAIkcD8CLBTPZjspFKXA2+UVxUlb0ant+yupO3itVFch65fltFXHs+1kBxIgARIgARIgARIgARIgARK4jAALxbO5jYu/8d8ozlYUswzZH71iOut/ttHsQAIkQAIkQAIkQAIkQAIkQAKbCbBQ3IzKG+YirxT7+0T41lN4fVS/mCb8jaIUhFGGrSBaoRj+ppF/o+jQ+UkCJEACJEACJEACJEACJPCNBFgong3birztv6Noq4Le3v9eMb5e+qlfbmPnYvv+6uls1fFsB9iBBEiABEiABEiABEiABEiABFYJsFBcxcOTJEACJEACJEACJEACJEACJPB6BFgovl7M6TEJkAAJkAAJkAAJkAAJkAAJrBJgobiKhydJgARIgARIgARIgARIgARI4PUIsFB8vZjTYxIgARIgARIgARIgARIgARJYJcBCcRUPT5IACZAACZAACZAACZAACZDA6xFgofh6MafHJEACJEACJEACJEACJEACJLBKgIXiKh6eJAESIAESIAESIAESIAESIIHXI8BC8fViTo9JgARIgARIgARIgARIgARIYJUAC8VVPDc6+ftY9rtd2X983UggxTwkAcb5IcNCo0iABEiABEiABEiABM4nwELxTGaf77uy243+HcrnUNZXOb7tyuHXZzns9uX4e9hofFALj5nccZdSRI/om50/73jz9+1YXq/MXYuZx7XzFFYXxbmLOLklOp7mgcOvQ9ntzs3fkwgetMFtx91FTl50vbhI02WdNubDuTl+bnsx/utjX3a3uqbVB0RyX7jVdfcSwJdwuETPvftobN7Hd9Pb6l5ew28rn9JIgARI4PkJsFC8OIbfMDG8aOJ3S7vkRnpmcXsxz0fsuFYoZnu/yld7CIDbud11+081GdxYGFxH5FF633LcPYpPN7bjTvlwyZi4aaEofn1LYXPjeDyouO8rFB8UAM0iARIggQciwELx4mAMJoZa2O3L/k1WHOtKCjxtbsdEZzg+KcZCoWhPP9tqZngaDufeD3FFMeiB1Z1wfKQfZLYn5aNj7gv4Xe0+ylN7XX0V+cZL93FSpZPHvkLrT+Rt8ndUX7yPTiCavEHgAq9SisiunM6Xh76K/ba/fzOfDr9s3+1VXbDS7MdV7/tBXz1WP0LckInErcbhpB++tos2rq1moJ5jXeH2uEFOAC+h23lLfDxHqt/vslpY44bxhLw6aJsk3/sEDjGWY72xTSnR97DKijnV9FS7P+w1cLFd+kh8zA+ws67KD/0LZoANW8dd6F9ztDI5qG3VjlEsGmfQK33dx5A3sQ3y6Xz35fiRx0jMLc/jeL0CVmrn3vLb7UAfL8gHG6tmR4/PblqMYfvZOFSTWl7sy+E9riiiHmSFx4fFYJMJcRgdK0Vzza8fqKPZ9nYsR8xHYIfte/xwXFb5/ucN0Dfcd07FC2zfno+m28cL2oqpINvIc9gO9DfeeAxyTONer8cuC9n4sbneek2Qt29GXCZ6VUcbi9lD7pMACZDAzyLwsoXinz9/yn///af/ZPv8/2aFIk7YYxu9SeqNDm5QolhuSKMbD0z8tG9rM5PbJ542wbN2fsM8W3+djPtksfd3PVjYgN91khL0eqGh57CfFyDxdTDVlYvt6n9kAZEDXnpUb/72yuxF8rRgcPvqxLvFAGKIPnmBVSc0I72BZ5VnExzkkifj3Y/A1SdOOqlxW4GJT87c7jr5URtWeNmEu9vQmVcOQ72DfEsxNN8zS7BX7RvphTbO2H3SONU+6pPnIuoZ2e2v8do544rbXkyvcA0cXO/K+AxuDNo5L8hd6YKT0x4LOQMyIJ7Y3togH/cnMhG5nlteiFu8QIfnU/Db5QXnmm0ucz4Wqh01ns2OwEDajPW09ivj0AvdkH/VBxt7yKfGcaN+KzLqq/nT/KtFknPLqDTvPX8qD8yF0bbHInOrORG4u17VM+YYcsllo16XkfJR+fm54H90cso5Ngu5bnFze5d50h6SiIwQL8lZ6ze3z+RpTmQuwY+oN5nLXRIgARL40QReslCUwvCff/4p//77r/6T7fOLxTh50ixJE5U82babnkxI4Aa1ll4qr05gQjvsj9vSCPZz/2YftAly8w62w+2uRycjTW7tn/b1Rh0m9X7jT/rgRq+TykmfKA9kZH+vlTcoFH3yFTiDCbo51YsM+0TGusP+CT/MBpQlEmy/2+dG5TyFfit6vLd/duZZz4rdKB+YqEzZ90moKxl8dr3x5Ox4nCz6A40+5hoftK1Oitu5oAr8C8eBox6H/exb0tXErLVLvKb+Tsb7rL0e90m9GAJ6WsF1jj/Zh+acF9lw/UIOoFe7gJxmR26DsmG7tYdjTab7mmXBfu4v+/rgDtpk0WEf2+G2NJL9mudNbuhcd6CdHAk2IbfUF+Pc+uT2ul+vuUlPEJfPoRw55yxDoZivB8l2UNDsq8dmPNCnU/mKD1izfFOzZt/KmE3+YhzBJW6SAAmQwI8n8JKFoqwkSpHo/8m2HDvvvzwBH0yM9Mbrr7b5pxdJ1n/52htYgTdq3XYZ9jl62u8FQ39KChO1UPhs0I+TUFy5qCa2GzPaKefSPt747am1M4ivIimLOhlpslVXnKxHeRNechhu9hfJC7xgUqEq477I91ev5n5AH2XUOQQuid/Yj2X+RR+dS2QX8mNFj7cLPmnhDj6oCpCv+Q75BvIXEz4455bap8lf6o2tZA+Z21iIq9Lao+lJdrfjJjewW4xbjJPbkfmD/EV/yY2BDMjPaGvMXTkXcl5tj/mm/g98co6BT3sAE/UEBjj21/zJPjge+dR+5+cD2qF+11dzx4V8LEwwJ3AcLvKv2W1xQ9nIeot+HJ8LPRAT9Asx6Xazx86EtiBjPi6BQ+aO17GkJ9iRz6HedK4zGoxXiRfmmCpZ54x2dNk172v8PZex8O5xW8o3mWv22bl2r8yFMOznexray20SIAES+MkEWCiWokXj3QpFvNnMMindhFuzdqPON0G4weGETjvCuda/StT9DRPWZgDImujRG/VQT58g4o0/FETiN64qAYcwUcKJTp40N1snBeo1hWfQiyxEKexv9gP6BNkiDwquxBMnn50Lyur29ImTg1kpaFb0oE6VLn9veqpQTPLC5Apiq5ZlZtXcuV73Z/QJ7KZ6Eq9ka+eaeYHsoDrJy/mwddxj/qNNyQ+xD/n3OIMd2D/Y2n3IfLHI6QykM3BItgTRa+eyPbif+8k+rLx1/1wb+OmH6mezG2ToKdSB23IS9lt/kDcrdLzgrk3tA2ShXD0p51b9qpJQxsqKYo6f7teirPmBnEW87tfrftJTtdsH2KoHUE7qJ7owH4dcgnAoZOvxLiM2RJ9wO7YayzsnbxbXcByzyV+NK47VbAz3SYAESOCHEnjJQvG+r572AilMtrzA0ZtNn7hpXslNafEE1m/wIs8mSe0mqDd0/3uWtIoSztlkz/vpjVlvhhv14+TXV3D8Zqp66uQDJxTiUNqPN3vQrTKcl/norze1SY8Cgj7OccrLudxAHhZvicViktEmEWt67ZxPqnCipIx81Un5nfajx9NXbwYPAYZxq7JX9Kg9HutaNIwnhhgb8y/km3MZ6RrEcK43XoGX+VHzaKonss852uWJP87eVzTGXIOtg3HncbbYep6jH6ZryAvH14C/97EJLMbT9HR/RJ/oWeFT47zMJ+cwYlLliZ0tT9A32b4sH9x25dZyRGSN4+Dt42TedHfb0Id4LsQH8mer/lAcQn/336/tzc6MSfYTx9BWZRpvtanxNp+W8u2454jIahySnmjKoJ/nzTQf0/2n5qrrRvkzzthGtgN39b3HHX0JjLxfYiNjcMTM7LM80HGauazEMdvLfRIgARL4yQReslCUgEqxeJ8vs6kTKM+aesOx12b6Dc8neHY89Ql9YULmr+C8f8a/YakTMn/VSr45zyepNiH219RAj974B8ddt37CjRT2/RWgqANkw8RGuoUbv04knEOdsKlf+3L8Jd9IaXLiJEAmMN4nywsGmy6XN/1Gx63y3D7RPWZhDLxdfcVw6keWYRMz4bn/kG94TT6e9AP1QswjkjZht/w4lIP+rqc1sslbtRt4+UMOi/WhfLaJ1MiHbjfmm/kEeYE51yZ02djORFdimt61dsn3oZ5kd8pRzLfOROLy2b8lNpuwedwBnywDrg97/XbczksnxZoDh6LfIOxFE/qH14LgE3Kc89Fvpm2xgD7v8O24YjPYGV6jncanOgr9tuZDj0XM71HxIVrG7eP1RK0BbvqNns1vk+HXta5nm/5c5IVre9LRZVc+/pE4dp+cvecFxEiuldBv2WdwfYf2rjp8Yry25qM/jNJc9ZXGILXtiI1Lzu20bXicRvnuhWuIe+8/k4/HvbD2Qn5YKIpIt0P8gjjG+1nXzS0SIAES+IkEXrZQ/InBpE9PTEAnJT4ZvKcfqWC6pyrKPo9AKPTO63px61OFw8WC2fHpCfyNfHwKaJ/l4EXsU9hLI0mABEjgcgIsFC9nx54kcAWBuFoRVmmukHq6KwvF04z+Uou/MTFnofiXgv0Eav9GPj4BFllpbG/TPIO9tJEESIAEriDAQvEKeOxKAiRAAiRAAiRAAiRAAiRAAj+RAAvFnxhV+kQCJEACJEACJEACJEACJEACVxBgoXgFPHYlARIgARIgARIgARIgARIggZ9IgIXiT4wqfSIBEiABEiABEiABEiABEiCBKwiwULwCHruSAAmQAAmQAAmQAAmQAAmQwE8kwELxJ0aVPpEACZAACZAACZAACZAACZDAFQRYKF4B75Ku9sO/3/F7eZdY9719lMUj/x6Vfj28/Oj61/eCoTYSIAESIAESIAESIAES+MsEWCieGQAr9HZlt8v/BsVf/h0q2X87ls+PfdndrUC65HfyPsth4Y/4ty/H32cCas3BjsxB2lQWWIIJ2+8qyr5OxsDtFzZnchj527jMNiwGN/l9rov0z+xKx/m7ewkId0mABEiABEiABEjgZxJgoXhxXC+Y2P/+Kq0wwu2LbRh19AJndG527AJfZqJGxzcWLo9VKI4c2Xhso79R2p1jEJVxjwRIgARIgARIgARIgARWCbBQXMWzdnIwsdcCYV/2b7IadyifoWCwAq6tRL4da9FYC7v3Q1+lxNVGWcGpq30HaVP7xaIKi0Pcrit3bbVwsOqpLg58ya6jHR/Hshf/Simb7NjAQVf4qp2+qiiy3Xc/Vor5t3/b6zlbgbNj3ravyolfg9VA8EVXdnPcxHc95vqBWzg+kN36ep9om8fP8MK594Ou6jbbgx6XJb2gz26yAqt9e5/Odl8O776abXKavpJYIaPdrrR2ctxzN7Tp+mJOmKf8PwmQAAmQAAmQAAmQwHMReNlC8c+fP+W///7Tf7J9/n+D4qpO7tukGibsWvS0AhD71ol/mHx7ATJvFyfjOOnHbevvRZba4HqCw6gnnKg78bzKubBQnHOIRacWN25r4Fp5NZbWLxYvzm/kix0Lr54G+XJ+4K/agmxLKVIogR1N26a4j+z2gmwet2C32tkLtJF+K3idh8k1m5MvWCiq/d6nlBCLViiKrNjG86zZwQ0SIAESIAESIAESIIGnJfCShaIUhv/880/5999/9Z9sn18sxmJCMyBNsG2SPpjI11UhKyhtwt4n2TABb5Pyml+wv6lQhIJlaF9L21pAtJXHupLmhZrorYVhl2N+XWRH0xuLlS4rM8EiMp+LMnzFrfNsysJGKLhy3Kb+Zl1BZN/J3NsZ7I/b0gD2c3+wL9jd5KYN6J/b933Qp90h75I4LYgxF3R7pX3uz30SIAESIAESIAESIIGnI/CShaKsJEqR6P/Jthw77z8rrtrqoXSGCbrKwn3d9lcZ7RMLxS6nT8B1Uu8TdBF4bqGYCx5cNQrODnzB86A3+9WLOzmDxQdsb+KwLAb9VdL2qat3IFeNWdoebUJH+nYvmAZxU24xVv2LfUyf2TR6CJDkTeOe7Qa/TsRN/HMmPW+6b5iH2hZWPbvfoK9x7CuEqEN1eR5iLqBvfh7M4CYJkAAJkAAJkAAJkMDzEmChWIoWjfctFG1S3le5cJKO25JIUkDUCTtOyuUU7MdiCIsOkIcFmvTX/V4M9LTF/v1o28qFC8g9z441DstCcVgEhWJULAR/1eCso3kRNnrB5Fyg6APOoVPembVrfLItaCtui2DYb/2rwtW4DeIJ/YOfoqV92yvoEzWoI8cb/cRt4NHlwkFukgAJkAAJkAAJkAAJPC2BlywU7/vqKRQcbcJuk/JWKOpE3P8eLU3YsVDU7djO/xZPV3x8FWcqzwpA1xv6hJQ9UShWO4Ic+BtFt0kL2fbFJ+DXJg5YKKa/iwv6QW71IfilLAbFU/AXCyYvkiBugXttq/4KJ5AtumC1rqnY6K8WV8MYzuO2LMzR7mpB0+++uc0mF/9G0e1XW9w3ZehyjXeIsdiMOmoB6vnROHCDBEiABEiABEiABEjgaQm8ZKEo0ZJi8T5fZuMTbJ+k1/1azOlrfO+f8G2hufAZFCP6t4P7cvyAb5ysxYzJO5bjWywo22qcTuj9VUWwLaRsLSDy3yi2os99MTn790P71lP/4pdVO7ComHLwgqx/k6cWgG5TK8gyL3GkFjO1bfNdGXmRFBy21VlpL3LRPm8WuIEMtB//btP7ySfKw/Yh7tIQ7H47luO7x9BljOIWY9V9BQNQv68iKptDObYVxahj/3GEIhjskuLxV/+WW1zVtuKy2ugF7+KbcMEubpIACZAACZAACZAACTwNgZctFJ8mQmCoFk6tYIIT372ZCpHvVk99lxKwAvDuK3+/j+Xw0X4x9FJj2Y8ESIAESIAESIAESOAvEmCh+Bfhn1QdVrV2/ffrTna8cwMWincGfEPxuKLpK6g3FD8S9fVxKMffozM8RgIkQAIkQAIkQAIk8CwEWCg+S6RoJwmQAAmQAAmQAAmQAAmQAAl8EwEWit8EmmpIgARIgARIgARIgARIgARI4FkIsFB8lkjRThIgARIgARIgARIgARIgARL4JgIsFL8JNNWQAAmQAAmQAAmQAAmQAAmQwLMQYKH4LJGinSRAAiRAAiRAAiRAAiRAAiTwTQRYKH4TaKohARIgARIgARIgARIgARIggWchwELxmyNlPyI/++H7bzbmInXf9Ft8F9nGTiRAAiRAAiRAAiRAAiRAArcgwELxTIpW6O3KTn6TLvwbFH/59wZl/+1YPj/2Zff+eabmrc2tkDv82tpe2n2Ww26/7bfv5Hf57mZ7KcL37j8Ifw6aWVvh8HYsj/qz8l+SY6fsy/k589WPn9te+0lu7cp5+egK0+dF+pOM2e6Dx3NmNo+TAAmQAAmQAAmQwL0IsFC8mOwFE+DfX72wwO2LbRh1vHOhOFJ5w2NPUyje0Od7iNpUKJ6r+KJC7YJxcq5dbE8CJEACJEACJEACJHBzAiwUL0Y6mADrRHpf9m+y2ngon2FibQVcW4Vsqz21sHs/9BVKXLGTlY66cnmQNrVfLKiwOMTtUora4Kufg1VP9V98gRVF0Kl+NEbRB1wlCiutaL+uVg70T+wa+dX8h1XSsb7kR7NbNsD2t2M5tpXLxCuvrgYWsDKGK1ChDTK2HFH734/l+Fb7h7wopaAsMRXlpTzZv+01H5B9c7P125fDO64ogu+4uhfsiG2Gq7qhfSlajLZVdcgfZP1+iCuKk7iH+Owmq8pT/dVfzbv7xTPmZqPODRIgARIgARIgARL4kQRetlD88+dP+e+///SfbJ//36xQhGICJrZa2LQCCvvWCboXBDrZ90n3vF2ctOLkGLetv0/61QbXExyWdqjTt60YGPZHO2W7yRX93h9tqYWFMkC/7HVT749+BWab9AWnwk5YYavFivkVbQyv4Wo796Xa7342n5HdgJfHXO3fUCgGnWabveqL28E126k+WQFZ21ZbQ9yRo/axwlb5uK1aLGPBW/VBeytmexuM1VKfj4l5Pp6tP3AyucipF9IQn9Dn/HgOqPMQCZAACZAACZAACfxYAi9ZKEph+M8//5R///1X/8n2+cWiTU77hNRX73phYat5fTLdswiLE9v2YiwUKq0YqT1hXybjvc9SntqFE3sRkSbK3R6YTOcVtdYIdchBsBvsas1lI+v3k1qsABdo1/3KfEH/TJ/LX3yCrfVc1wNy9RyySIJQb9uetV+xH/xVDU1WKl7kZDu39CFY19rVo20/+wdywI5YqAXJfQfa94O21fuP9Z3Kx94/S4Z90J/b9/2sfxYfZIvbK+3BFG6SAAmQAAmQAAmQwE8n8JKFoqwkSpHo/8m2HDvvv1wIDAojmNjGV0DtVUwrMucTW538+gqWGNcm//lLX1AGbEt7eQW2OTabBKfjand9XbTpX/rbi61a4NTXEFsBu9BfDdHj/jqqf1qB3WWaPn/t1D9dtrLJ+pqfeQOY1FNdTz4XWUg7162fzgNiEWLr5xcFN+jBvBB7QBb61fVKDKF/dk/K9vzlNU3mStySHehreADi+kJ7s6fbuKtfcpT1gd0a93k+nqNf27YV0Oq/7oM+tftW8XQI/CQBEiABEiABEiCB1yDAQrEULRrvWyja5NWLHF+NO1UoYgGh6dgm/7lQxMk5TJTDxN4LWVjxbDkeJ9PtsBcgwwl49sl7rej3JuCHH/JPKQCM09wmb2ufoC+egL2lrV1P6q/MKqNc2KDduI2a2jfaYkykAehJccEiT7ehAOqioX8/2LeyPW0/97N9ZZzs6MIm7KE92iz9ut1jfadWFLtu2dqoHzhN9d8sntFC7pEACZAACZAACZDATyfwkoXifV89hRWTNrGGyblklEzi25eK5Ik1TpKx2LB2+Ld8vj2XZ/29QNVVmLbihakNOpvNdl4m4MP+6oMVVH2SLn3ETi9G0X5c9RocryufvYCrf7voxYDaZX/rNteHPsVt7eOrq1WW+VW5Vj3WblQoRv6tiD+LF/6Nov/dXpKr8pwf/v2mtRuu9KmryDTKDHGHuOGrwcjdCjXIY0cJviqnlkum2382JZwLuW7tRvl0rn6z3Tmh/ur7HeLpGPhJAiRAAiRAAiRAAq9A4CULRQmsFIv3+TIbmGDDxNqLOX1V7/0Tfi8wFwAy6fUJcC8qd3LsA780pk6O5fVL/EZNXLkSR9UGf30SbAvZHXVasVT7tGJAOtRJeH3lsxct8bgXAqpipj8c7/7GgmEmd3Y8+hFclHWq9hrpoRzaymVktP84An/Usy/HX8ey92JTCqDKZhOvt0M5+Lee+gqccsxxxZhLDDxmZktnnr2L/Q4f9pud9juP6IcXqO63y4d8ag8xkg7MZ131g7wCHiFP6jfMNrtD3F236DlXf33woAwP5dhWct0vs+2W8Yy5mdhwlwRIgARIgARIgAR+GIGXLRSfMY5a6PgK2zM68EA2f/+kf0Oh90B8nssUYxseUNzDgd/Hcviw0vse4imTBEiABEiABEiABB6JAAvFR4pGtiWsvuzaClZuxv3zCbBQPJ/ZQ/Wor7S2L9P5hgcoXx+Hcvz9UBRoDAmQAAmQAAmQAAncjQALxbuhpWASIAESIAESIAESIAESIAESeE4CLBSfM260mgRIgARIgARIgARIgARIgATuRoCF4t3QUjAJkAAJkAAJkAAJkAAJkAAJPCcBForPGTdaTQIkQAIkQAIkQAIkQAIkQAJ3I8BC8W5oKZgESIAESIAESIAESIAESIAEnpMAC8XnjButJgESIAESIAESIAESIAESIIG7EWCheDe0Y8H6W4jtR9THbXj0bxCoP0r/diz8pby/wZ86SYAESIAESIAESIAEHokAC8Uzo2GF3q6032/b+fahfGZZ+juIcFz2347l82Nfdnf73bdLftj9sxyaH+6PfO5v/rtx3//7hRIU8W/dl68ak/Ptu4R3KarvLkWpxfLwq5QS8q8WwhLnu+WeD4DTvL0lP0mABEiABEiABEiABB6TAAvFi+MCE/KtMn5/9dUq3N7af1O7SwqXC3zZZMuy0fmF2FLG+UfuWbhcwvubCsUAShjAQ4tw7tY79+R9a1spjwRIgARIgARIgARIYESAheKIyqZjg+JKV3D2Zf8mq3GH8jlb0ZFVnbaaVAuN90NfpcQVn1/9+EHa1H6x4MJiBbd9VclXCWeFwsCXxED0tVVUtE9X6/wcyAe7pZ+ucMna3vuu7D/85U6z1eV6G18JO4DOdq7K8D5dVpbtDqAOWVW0/f3bXv0xudim2yoS5n5H+c2+id/aup3bl8P7vsWylGv0i2To/37Q1WG1p+WfxdeY1ZVVPTeJ29u+7DFHm91b8hZsaau4eGwLXxaanl38JAESIAESIAESIIG/ReBlC8U/f/6U//77T//J9vn/DYqrOvluRUObqNeCoxVY2LdOor1w1Em5vyY5bzcquLDosW3r78WUFj2uJziMesIJ2xGbWj+x1+0z291ff33TCj1vE1fP0G61x5mg35XjyG7V4StjmffAdDsk/rk9lbfr9WLQ99GOqd+oCBioPa4n+m1MvEiqNkDR3/ierb/mlsdH+1c9kH/x9dsY75AXqF/cpEZpQgAABX9JREFUDD4hu+hDCf2Qd8p9bCfbbrcWu50dEuY2CZAACZAACZAACZDA9xN4yUJRCsN//vmn/Pvvv/pPts8vFuNkW0MXJtU+yYZVthZfKC7qapAXRWFCHybSpehkHIqL3mcpT4u3UCi4PaPJuPniq3Tt0yfx2Q73I8v34/kT+vdCMfMDHzLHYX9TEorNrLftY+Fiejq7FTtAbxO12AC78znsj9vSru3n/mBfa5MF4/64/zL+wEDkerEtojCO6dzibymbTWCnmgPyw9+EXssXfeU2CZAACZAACZAACZDAdxF4yUJRVhKlSPT/ZFuOnfdfngCnCbcIwwm4bvurfvZpK3F5ot8n3PNJuq3S9GIHZcB2mvSHIjQ4O/AlnK+rY/ULb5rehfzeSQu42l4Lz0WBazpbUYqykZuInBYn1S5fDezq01Zn6q9p+iqoMYlxEZvcR1vBtPN+LAoH3r46OfB7Hssl+15MT7gHA3J/sCdwBAYat+xzfYDQWJsS9L/HSh5+gB5tCvIHhWLvG1mi/DHf4Cx3SIAESIAESIAESIAEvokAC8VStGi8b6Fok+o+EcZJNm5L1GHCnSbtvWDKhSIWCyAvFApeuM5XFHvxtJZ9K/K9mxYisJIKfvQiCPz0fv6Z7R72t8aXrih2X1fscHv0E/yeHV/xG2On3ZtPWa7t91xxZbnd7Di0CxzBz6bbZcBnOqeF3LAQBz3aHeRjDodt0LPYzPIWDXiABEiABEiABEiABEjgGwm8ZKF431dPoUBqE3WbBLfJvxYU8e/VxoWLTL5jO/+bLi2Q8NXQ1M7kWX/XG/qEJEM94YTuxGJBfPFiM/bTdmKT+ucczHe0O9jjRYiyGv1tHa4o+gpblY19lmbDEbHTbV4WJKHYBJlzv0E0rqyt+O0rlxaXJRPnowVltXWb/spklAst/8ReZDCIm7+KmgrF+DeK+PeQmWOW77zT3yiezRdZc5sESIAESIAESIAESOC7CLxkoShwpVi8z5fZeIHkK3h1vxaH+gre+yd8++fahLsWSfoq474cP/DLP2yyb/KO5fgWC8pWeNaJub36B7aFDANZ+NpkKj799UEv9FTEUH4thNzuX8eyr4WIFGW9P7aD46HAiYWi6NTCrtrZZeVVVnTQ9UjxknlLOz8fX4ucH1/KDgXgwG/tATlw+LDf1LTvf436W+ymdqH+ZP/bsRzfRwU3FnKem/76aS/qFiufIh7s7n/bmDmifPfH5fr+Vr4oK/vKfRIgARIgARIgARIgge8g8LKF4nfAvbWOsPJ1a+GURwIPRODr41COvx/IIJpCAiRAAiRAAiRAAi9GgIXiIwc8rNbhb9g9stG0jQSuJfBVju/H4r+2ea009icBEiABEiABEiABEjifAAvF85mxBwmQAAmQAAmQAAmQAAmQAAn8aAIsFH90eOkcCZAACZAACZAACZAACZAACZxPgIXi+czYgwRIgARIgARIgARIgARIgAR+NAEWij86vHSOBEiABEiABEiABEiABEiABM4nwELxfGbsQQIkQAIkQAIkQAIkQAIkQAI/mgALxR8dXjpHAiRAAiRAAiRAAiRAAiRAAucTYKF4PjP2IIG/RMB+uH7/wR+O+EsBmKr9+tiX3W5XDr9Kkd87lU/+RwIkQAIkcAWBX4ey2x3K5xUi2JUESOA6AjctFHWy9J6GdPgtwC0D/rMcdjuddMnEq/174++qXRfqn99bJugXF1Gap1vy8y9ylJtmHl/3Noc36g2EpYCX3LFC/jYTG7sOPn3B+ar5A/e954uh53G9/97i3vsM11cd6cl3Fint+qfzu1vkQpN4akNisS/H36faXXPe4m3zBtt+2PEq19Jv5T/iirxG52977Ko53W1NeWlp/we5/5OZlCGMXwAAAABJRU5ErkJggg==) ###Code var1 = float(input("Digite o valor do lado do triangulo ")) var2 = float(input("Digite o valor do lado do triangulo ")) var3 = float(input("Digite o valor do lado do triangulo ")) if var1 == 0 or var2 == 0 or var3 == 0: print("Você digitou algo errado pois não se forma um triangulo") else: if var1 == var2 == var3: print("O Triangulo e Equilatero todos os seus lados são iguais") elif var1 == var2 or var2 == var3 or var1 == var3: print("O Triangulo e Isósceles tem pelo menos dois lados iguais") else: print("Os triangulo e Escaleno nenhum lado e igual") ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA1QAAACGCAYAAADaf8vkAAAgAElEQVR4Ae1dDW7jOg/87tRzba4S9CbpSRY9xgP2Dv5AUhSHtKSkbdrmZx6wL7ItkcMhJZG2m/xv43+/z8B/p+3w8rK9+L8/p+3f76MiAjLwzQz8206vjPVvJpniyQAZIANkgAyQgW9m4H/fLJ/iyQAZIANkgAyQATJABsgAGSADD8sAC6qHdS0NIwNkgAyQATJABsgAGSADZOC7GWBB9d0MUz4ZIANkgAyQATJABsgAGSADD8sAC6qHdS0NIwNkgAyQATJABsgAGSADZOC7GWBB9d0MUz4ZIANkgAyQATJABsgAGSADD8sAC6qHdS0NIwNkgAyQATJABsgAGSADZOC7GWBB9d0MUz4Z+DAD/7bTn5ft8MYvz/8wdd884N/bQX/e4Ph3295fXzb55H9kgAyQATLwBQb+HreXl+P2/gURHEoGfpuBTxVUmlS8ltBPv6V0ycR4347+u0v4yd9g+u2YuHn9ksh+utjQOL0kPn+RBtlc6vz6bjjc0C5gWApdiR0reK+TANg6ePeF2bPGD+x79+dDj+P2G4jX2HvvYX3VmV5sZzLf1z/N764RC13iuYb44rCd/jvX7yvXzd+WN1j7ZuerrKU/yv+IV+RrdP26576U010Xypekfbig8ju0OeHLScGw4NrBzGN2l3mCDEwY+NLku5sNf2L8d51+1oT4u/i8WO6DrIPPGj9i90/f/Lg4ttYdZZ+OG1NXSqDuYn21ORe2b9um8fvdSf3aH7dy9ecLqp+w/Erx/RNQJRZZUP0E01fX8aGCShJZcfS7vPaCm8inNpVziYRdf/GnV6hvw2vwtEEXxXa37WX+Ok5OyPFuRWu/nbZD0yuLrtqtx6ALXdE2kVN7HejlBRZmnRwHk9cmScirT1rArtfjdnQ5Kv+wHf6IbQ3DzFbVd9pO4ivHrOPtGDeRXhxrP8CMtkkbdB2VG8AAE78W0nM7qwKw2+0resUWv6OU/WevXnmcWB/0qQgS+c2+suFfjBE4QCxbf1qx5xetVMx/7HUx84FhdNy4gEaig7yAP5vgKfaEdRKzIgPi4vhaXrlAGeBjtMltd7+MeM5zAuyB+XxxHBauQ+9nYxTwyBwATGbLnnOcCzkOMgb0Z+ZMjsD3Os8jtpP8Ke9FF84ZlA1zRjGAT9XfLl/Oe1vQ9fW9zSONjcYFcvTl+BFU4ANc8xRTXjdncZLXgzr39+zbmTO2AVc7P7tI7OP8jc6JlfJUHea/YT7FWxqv78b7bi2GWJFrrscx4Cfq7r6vfAjf47Ve+V3JV18t1hPB0tbXPO/jfH+vRbAudVkc4n6FpmIb18G0JmAnaEd8w8kU9/m8HI38Zb2MX/ct7j++tiOmiOHDdnyNPKrGR1ojkt+bPy/KUWBu7da3YmOPnYar+ybHn9tifMQr6YlTWBd6viLq1OeXzemMbmxz79Ox7+dHj40/x+3YX6Nv8uR17QGmLtcbM/l4Pq3BPtDjZsJTGl/2gM5/zm/6fFCOS044473sCV1GwOytMV9yeRwHfSA0IsZlzxivNdBdmxZPeT2sffrx1M7eozcQy9zuy23rgieNDxVULiNNnrYQHd6AjElw+Xj7tMnuEzRfMwM7AS147JEwTAZcBFOflhRAUKL8vBigvEasj2sBbzgKJhToDm7JhvKTZERQmYPbptTGOQcazCjDg7H0s02ryEz6fHI2e9wfag8UQ36+bRg5oXQDs59swoEM14u+8LZfq/hdtH4i/5DM6Zixjei/FIs6RrBlmaNEXzb25KcVxgWWpH+RcChvzseO78yxyOyxD1yhrjl2kZV5G8nyRNavJb8me1sMYSLdMZ3jOW4YmPyGC+VjTO546Yq0oTIch45zOzN/yRbpB7wHh4bd7c9zKtvVxyDuZfysOGsbpGNSO9p8Vfll7rq9iYq5jy/jqOFDDN52u1TvoF+PLePc+UucX2xH46LZqDy7/ORfLyDbulPiRHQ7Dt/4fU1NtKWDhW0rPycZnpCdNk2ZFnYrP8DxiC9ff/Va4wTbPmeHti0xi6+MO+F4OL5wWs2045Azvu6FU9ycUJ+63eLTHlfhy5ksGRt+HfdS+Y0rL+Z60TYcYn4fcqAxN8Zl/hqtNy2OHIPziDZ7TCcf2fxJPvcxLqPLtL6GeRS3Hv92zTjDtvvF8RdiRnHbsKT4wzkp7Y7XdBk+xDpa6wBD4TvpShCbzZ6vII/Y9qS/8aaxkcZknhQv2pR0toMRNypf7AxbprE64ynhLrkIjBnZoLgTLsE65z3NkUWOMtLl8y/5ZsWZXos5lMaN+G3ntF/nM9uSh+VrOq7HYe5pxXJbm9Xu8Bf2TBhXtuGgSftqBRVWogngRLEHgN/Fsc9wRB4GwauBNOsHoyAo4aw2BZ8HSt6AR4tQ6MrjQGqZHGZbc94gwEI3JhRgo6GMCbuTD7qlibau9C24y5MO5Bd5adNCvYNErNqJx13DAlPvU2wMP+BCjr3reeC26yu+bpvYECOKLlimvJUx6zmR8YrMikP19IVjhR1sLRjSYeehnYXjrKvEVxKScae4V3mxgGWe5hhzP1QmY7zYkPOg+1MxirKlDZiAi9orHWP8Y1s6VUx9IODWc3B8sQzA2uVK4wxHPX6KT4ve8IFhi1gEvZUjPC7y5lyAPLUDjqf8mcGB0dbQwAh8Jm7qwcK22rXag9fxGralD9hQ538+BrvTOoqKpH2pbcW/juWP3NH2BKPIBqzlSrPDn9bGZ3AOI8q8T/PK11jBMf3CHbMx5wWiM/bh0PYBPvqgxRiM4d7fGtlfcOOv+STsqfLtWK5jzIpUPK7ys3qUGfK0T8EscgILSskxhldS/iAXehzLmMmai35GDDWOVtcSiMxHvlRs9jhyTjGmO/a6LuAx8FnxZsXARbvQ+y/4RBkznrCPtAE3tqs/e5ygXB+PcwR4xziravF4pMtiaREHKGDQ/ohuv7kgYqbjOv9NGdhZ1Vd76nU7/rxtI3nXK6j63ZS8kYyUjg0pPZW4WLx7wVYJhWEabPrKRBuHE630i0UHJlfdsIqzpg4q/dImghNlt/hC4KiMSECTjJ38VoiNbE36cBHxu1S+MZndaeNCHzpfRd5HCqoke/bKwSf8GX6ok8FBo0/lHCx+ncuB/TOMbQFP9kBsYdzZXTrHEZ+BuZ1THBjfsXHJYhLxOZpPZ7CjbMAZaAYyOy8tJjG2tO1xg1Iu4dn65wUS/NHmROJ2FIetWEj9Xlry8KkYdQ7QB+ObIGgx+lqxNH7VPuQa+MTxFovha0ySL5fhc7muczYfRhztZCNn2E6b2cK/dd6CvTtdcC1xoecna17B5Dwl21qc5LlVMSeNcFD7YUwu1leQoE3AubI7YyzrMq5Pif/i5zYnV2tM4gfjse5taIf6Ev2AF7EtHI3WAeiz83XmNe0fMGzUFD7TOrjrZPE+42PXXU9Uv0OvHfa4Vv2nvtb4q/L2mHysfM6SRu/TNSoWXJt8zSj6CuYkR/2KMsY+3sVtj2mzJcWUr7kQT8HFaE0V/bCuDmIyyR+u/cXmWlAN9yobg/ET3IC8bmtnPjV23CDf6KNkF4oIXYmnVU7RMe1t6DIQh6jb+Rp4L7rG82WvK/haxQHaKm2Tc96neVzosvPdztztrJ3RfW9PXMPWR2zDceP2VQoqrKhVjTr3zMK7u5uKAOuiJMdtUtZA8mFVZw9K7xCf2Xmoy5zQA67oyuNCnm0SsFjpuPEiUmXIsS2yYKOKhuOCwyYP8Iu2YhsWHhUJcupCsQ5g0AUyqt/DlsIjULVrojy8uPBncDjTU86rjho/pQ/qru0FltwVfJYvtHfw/V1q0x2LfcYivujXhvzk/kVVOpz6tcqF4+mYJFkOCo4hzzYoywye9DxsRrkfKowxeFbb1T9gyzxGRZ4nKSIF5ON4VFb1yLFjx7aMqX27nMIZcnixjC5MG8EZ2JC75Dugcg11Yds332myWOdRU4ScFXlzLipeOC4yVnES64FgERno10qEH1c/FN1YOBQsLkE/8Rq25aIcNzkZ46UFlWHsawHGSgKRde1wtSLNXsuHtdyLN7S1yk3HwlEeny7LAa4Bu2Ox57Cd3vzVv93odEJ8HranS+2g+nDUZ38u5ku+NjsvvWJvszFxXDGMj8WOKh+Pc3yYjLAdZWLb+Q6fhJw6DyC+s9l5LZBrPY4XYzx2Xt+Vm54z9bFVCcq1a2q/r58gbz9yzMeIUxwbXNjZOAYOV3hlWL0ux4M5gL5EDNL2a6I/8YRyUA+0A3PYoPkirrcjnBVEP577dKTLYnA+pottjct9mkeKbrzZUI97b+Cmn5s0qj3jbpfbNh6fz16noMJkxCvU4Z0GVC6GQIDhpbIpqpO8oBpdk0mZAt0mTE92kuy2OPpE1nGOAyaajClBO3WQ9otH7SmoSgCYLW0BbON8kmEQJZsLDtyoPantthZ9CTPISRgbpxjQQZn5yRd3xegLgXLnhaT1cxkj+S4jZEsrx0Efp7J9o8j+RJuQs9jMW/8WgzMuuy4FlO1MGC/EYrY45iRhXVCpfI9BW4CVK/WX85vlTbGDj2WE9Bvzbhz5teTXolev+XxJMC7j2XF4bGDxMrIj+iVlOalRjM5Z9l2yZRqjg7j7yhqT8GReshVtg3U+0feXytB+EWfo4/18yBzZWtPwJQweZ8aL+cD6+fqEfvN1Zx4/rnfNBeLVWHAfCC+Oz5OSfowYV+t5ZR6PF7apT5zfwhWKkDbiXPhP7HSuZFg+Fnuc/0jAKse27juvBcgKs+By7qSf781pTJH32cPGgduK87vGacTVZ5WVQkd1B49zqRY/jlH7KRfzsRqnvu+lPavG0Sgem9yEbx/Dgcdk9mPF5n4v+lSmxyrGlcl3jtPc2hGDfXO84/y0/dVxeH4kT0JC/3A/9+tih8fhmTmdIWZMsc87hvCb4nUdGN8pLoHDginrdfluc8Mh86fwjrE9k5F4QmyeMyPu1ja/NX6bDerTon/F+36tQX8BWsSU+BrNM+cExn/Ip3ncfH7lfis7a0/lzjlNczb3XMZ47nr26EoFVQSePurrRuAEr1hwEtdrbVNpj3IPb+/6Q6e+OPjEtseKHhwt2HXMYTv9lW/q82tVflvMpO/rCWTDRJMhJWhzYILM1u+I36znlwcT1oLHHsX3RVP7By67m4gLMdqysLXoS5iTPaFLJ3oZ5/D1s00u4fvweky8hi3HTb/ZyTdrv6vnj+PhfJItByA/Fp25jckmX4yanlGMrLgM/PkOScY4x+IT3B9xd/1ZQEmgWiIG3KBNfXHWBc7ixOX31ycW/Noi3MbBXCyQEu+7u9dJN8ZekQK+W/GsmHoMSOz5JviBOCy+TnMHcVwYo8jTVdYY5GzFO9rxx76Rs8fNhTIQOyYpnoR7vCSOuuz2lAAwxjzAeVzWQ92U3G953i7jB/SU6Ok3VHRt0S82avIFaxq3ihO4ltbz1f6zsm0134sFFWfnOH/jGM5vkZCPcT5gQbVeJzKSGeYsG3WHz2GNSZxnDRcd6TxsMaTrW1s7Kk81li4SPuqEdkeSl/k9Py72nVHf5i9dV+q+XeNIxo8x6ZX+TcA4z2o8rPxe9DW+/Ys40G5cI3brWzUT4la/ybfHQbYlrSduZ1/Xm1BYi3G/SjcftCvMW8nVdjHiIJvN8G2jfb2ULoC9+jFi3Pg2/MDhVKfrLvI7LzlHzWsVjNVm4zDxhLyWfLVgChvgpkzxu6qZ8d6KCd8TEncFauhCvqQT4gUcZXzOh7JPcw6QB1rcxhfb5TjLfXO+CPtR6SaHYc8K86W2DRSUU58qqIqM3zn877Qdp3/Y+sOQRsH9VQi6SCwS2a/K/+z477D1s1g4jgyMGLhWjN7SGjOy8wvndKNJG/wXhF1r6HeseQ/sw2vRTjnnGbCEz1/ZPt//fA9L4paJ43khT9LDuFoVAk9CxJ2b+b4db23PuTKj91tQ+ePFW3DQVRK4XCWnOztXdvqXxF3F1i8h4GAysGbgijG6uqu2BnFjV5WTKz6JuIp537/m/Xs7bvZzG1cBTCFPysBVCiq9YQBz8BZyl7vwJwuqu3DTOZB/j/E3ZOf63un1Oy2oYiPmXYs7jTzCJgM3zQDXmJt2D8GRATJABsgAGbghBu60oLohBgmFDJABMkAGyAAZIANkgAyQgadlgAXV07qehpMBMkAGyAAZIANkgAyQATLwVQZYUH2VQY4nA2SADJABMkAGyAAZIANk4GkZYEH1tK6n4WSADJABMkAGyAAZIANkgAx8lQEWVF9lkOPJABkgA2SADJABMkAGyAAZeFoGWFA9lOvbN5PBj889lHk05nsY0K/UXv9A3vcoptSbYsC/Wl2+zlm+4plf63xT7iEYMkAGyAAZuF0GfrWg6r9izAKgRMglv7sgvzKek2D/zZwv/2aGJFPf4ZML5d5+XJh/HuVHGYXv5c8PXOi3EsR3fnjJHNw2n3Pfaizw/536RLbEgep4ORMT32owhZMBMkAGyAAZuC8GfrGgkoQlFwT3Rd1vo90XVL+N6Dr67yEuHquguo7fnlPKdxY4I0Z/Wt8IA8+RATJABsgAGSADmYFfKqgsIX15sV8Nt7vjUiDsf0Xc75Z6X/nsTwb8FRUdd9ze3Ta9o3vYDnJ+9KRFroOuuDufcXU9Lrd/IlbQu8Rz3I5/3D4oJBMWl2U4HFd/YiOY9TUcxOmy8BzcXVZMx+346rrhmtiDmJ0ruCO+bVku8rnH1QkaN0CujD28Hs1H3U9Z18j+7hPFfdgOyulxe292nt4OzbfCC/gJX19KnAMfim/ip23bur1/rE/G4vy6DwcUzPRKV7zmfqgiFN9p+9fOp+Qa/QhPLpXntz5iO/0p9o7mgeqB+VP0dh5wLhasY/96J/BLj2m5lv3f+fVh/olcvSDfebzHj0qGuDi+HvrrbHN+TJbKUPsHcYE4PL7wHPpx4h83yT4z/m6/6j9t/1C26PPzTUiKh8Ilzlvpjj4MnrL+OL+Iz4ldGUu2kkdkgAyQATJABh6NgV8qqIRGSFhauycQukl7oYCUSyLm5y0p801fEwRPYDTx8H443guIuKYbfxuXkwCRj8may0Hc+MqP4XEb9ng8kW1JiyZgaI/JsvGgIyVNct6x57GqLyV1rV9LeIa4tr0M7Qc6k1zt3+yAPuZLx+U8DT5hjMp1fhtG8yXYLlGiiXDzA/bDtqga2emxoteQj8CK/veiBnH435GMcBinC78jBYjB7fJ4TbZgfKCAltT6GJehPs+c4d+/CM/u+zTnVnjET86dQAC/jXjwOYhoz/m3YwIcKttjWGNtNP9yzMoYl5ViFW0AHVuLYffrlB9cn1TWaP7i/PcY9NhCP879g5xN7a/8O0dwXuTg+MQFztvSz+aN8axjPL6m/H3cLrSRbTJABsgAGSADj8jAjRRUldqcNNlVS1x78qYbPiRcmjS143qtisdjSEowIcEuqY168EI9j0lcwRN6RnaKUEjAAB+qs8TQk7fCDY5HHCIA5WEbhc/Oo9xpHxRU2jAmJ3xgL+pod9I9YRZpfVy1qxwHxzoKCvE5JiuoIqZQhuitOPRY9cYYS1DdL0UXHgIXySfSR655sTkbkxJj5A8HGF+Be94vYaj6AeuIBy9OUHP3k55c6NVk3/hCvlFWbs/mzHwOVLl4nO1BnNAufOD4XdsLEgHdeQNZ2Zh0hLLShS4nF00h33pPx6c5NcNSz9uxxI7K/YJdyRYekAEyQAbIABl4QAZup6DSpMVfm5JPTEpjc+8+2PWHMZCA9P7Q0GQPXnXC12HwWi/eYOwy2U1JMCR+BU9KfDQhb3b3pCUnN9q/4Y3kGOT7XXe0yV/Hqgk/YNklSm4n9PEnP6NXJMe4XMjgE+RelMi2RDBshoSy2lWOE8eQtAsq9LHa5bwDPukXMvbx1/HLmJnfCwUzvTs/FFu6mCk+tQpemY0Cr+NUITmuZnhqoh7Hex6Co45SGyu9NofGcx0xDeefSFd+6pyxggrjVNoSOyrTn+okv64KTuBqwTvar+0yByM2EF/4J7OWY7PbD/pRX/jFpKRryFHDZPIMR5fdAezPuw+vYVdXwwYZIANkgAyQgQdk4EYKqrqZy3EUVL6xJ/4hyUjn5eDcNUyAp30zhq5jluzW83rcbCg6UuLTBe8T+H3SA0leKhImWEV2xYVYsA04gr+aQKN+HDA7j32yX7JPcTy2a8LbEk5Jjqtd5ThzDPyIzTP/Fz5QRsYLuIpewxWx2xn4gF4rOAZJd8EnmEZPh8J/gFOBCA/wyuaFPMzlgT+6odbIfKFPAUPHNOArxXcRDofhI/AxXJdm9LELeJxxIjbAXHjH8bN2gZAPi7x80Y/AHuiP+tAvMiriwbDHjQiwJT2tcl3yiX3iWGQknTiktgFnvcRjMkAGyAAZIAOPzMBNFlS6gbeCKpKE6gZMflrS5AniamOXa96vJRH+hGqfXA2S2vY0yIsdxapPOAyPJzGK+9yTj5KMiywbH8lNTmbkvCefkHClZMqLqJY4Fx0pCdNrLg8Sss6f4XCbdGxLyOe4qp/guMutiX7YWxM71eP+UrwTu4qdGR9wtfB/4qYm4jiu4TBeFn4H0407jyez1+POn7hYTLVr8ESli1EM7i/TawUV2CedpV8bv4vDYUFV8Mh4j12X146n/uggrZHnEvrXcKf5A3O9x5rOM+cLhA/87GPUVudtFysj3lrMu63Kb/5bKcVZ+MDYwnYtpoP7uX/AMi2I3BZ7pbfZD/qTPsU7ssv47rKSXTDPRbnyZDICb4shv6kFfWRI9LvMLrSRbTJABsgAGSADj8jAjRRUXhDZazyHt/f2bWQt0auv0ZSkyV7z8cQiPwnZOw1lHrbT39N28IS9FUv+2pAnfTsZLVmzfpD0zc5DQiSyMCnSttvniV26W4x48e94/Lzb7cfOYftmN8UEGAsWL5LUFtePfVoyptdf3yHpm+hbPVkAuZKU9YRvYC9yrwlc46iPqXaVY+Q4/70Z4i7+B3zVT3IcOI6bfJtgxuKvsAHXKXAWeqUf8ux+SOPtoGLwwimN7/EsY6yAMf+d4Fv+FngKDyobMAUG9GEGK306P8m/s7lesHrhl8Xq0XjOyCW0CfWjTvNd5+0MP+cKqs67r0noR/TD7HyyD3yF9qM/XA4WzDo3il3eT66lebvnKeZa5i/Ol/i8wK48/5KRPCADZIAMkAEy8HAM/GJB9XBc0qDGwL+343b6j3SQgVtkwIqGKPZuEeNPYPq3nV7jK/h/QiN1kAEyQAbIABl4VAZYUD2qZ3/NLiZqv0Y9FY8ZwKc17YnNuOOTnS1PdZ/MeppLBsgAGSADZOBqDLCguhqVFEQGyAAZuA8G+muT/qrifcAmSjJABsgAGSADN8kAC6qbdAtBkQEyQAbIABkgA2SADJABMnAPDLCgugcvESMZIANkgAyQATJABsgAGSADN8kAC6qbdAtBkQEyQAbIABkgA2SADJABMnAPDLCgugcvESMZIANkgAyQATJABsgAGSADN8kAC6qbdAtBkQEyQAbIABkgA2SADJABMnAPDLCgugcvfRWjfm307Adnvyqc48kAGfhVBvTrz+0HfPWHhq/8zX3+jYDyQ7/yg83pB38/YjjXoY+wxb73yMA3z8V7pISYycCzMMCCaunp9+0ov1vT/91jUSI/ZHpY/tCuJEkf/aHTy8bYj6gqf1dO8pZu+82L3/DbPprQ/vncj7CKn15WY78Bb6dfEuiV7t6Rja8wIPEhRQ4WPl+Rl8fKHJZ1z+fyZ9fA8+tQ1rttWhy+fFbfTtqvnfjK/P010FT8KQauPxctB/n0TYyLrfD53W7MXDzuFjoCR9+5ny1NNf4+mkctRU4uXpZ7TQZfePondFwI5Vu6fdeazIJq6i6bpDhBLGG5/w1+avLVLwiHT8bXNyzon578guXPcTueKaiv7nYXyILKmeDnZxh4kCdan56/n+GMYx6MASgWvtWye96rf4qjlQN+rqBaobjWNRZUn2PyaQoqK4b8adP6iY1SqYlx7VcmrvZxmavCAe7+vMxfmclBbGP6nSlNLqqu0mcTfIF5ZrPq+XPQJ29SMI70+lO5rr/EVx/TCoijPAlpT/JsjHFl5xqmGV+aeB+2g4yXJxotET91mcftHcbui1zXHbZXGwv8bQN5L7Oir+H454Obre96jPbBXb3Ux16Rcl4Cd1t8mw+GHHd/H7bj6yE96RHb9jIdZHyK/0WnxsHsCWHCC3GqhZjH6jrO7ElCYOr2VP4CWnv6MBgDfXL8Wt/O4cx/qhNiSeR1Ls88rQPdm86lwPcy42+b+ViEQYy8nrbTn8Zn4rzh60/ywAc+HwAX+r7zjHpkDGKd8QQyt/b06fh67HGVZCB/Zf1CPN03SXY7QBnd1kFHwGt4YF29UAZiSnaA7DrnI9bafFMOF3HffLhf96q9df5m/4YPKxfQbzAXD2ntgDhD/zeMtl7VOFvoKz6uPZHfqc/FV39OW1rDVZDZlfGDrVU3+FzjwWNH5ed5Hj6Ueet7gck+vp1sf3mxtzHCBoiv1TwCEmTs4fXY5eWn8BNfyPhF/IH4zeejrvGvR31LpscJ8NH1Vj+rHW7/an1yrYi5jUM94JPgLdbGJba2Prq/d/HSsJ/eLB8JvzVsiMN9r5cgZpAj5GIQI26xfYIMsFGuJTtxPc0Col+bo2HfRLZiKvMC4iLGxxsHKYdyPt9aVoL8DPOYFv9/A7jGbxs/njNmf8cC+NK6OeIX8aC/koyIzUA1aKGsZNuE2+b771uTBxjLqecoqNQxsXDqZFlMEuMonNYXjESeLUJ+TWViAEHfdE2xjAMKA90XVZMvumKMTAIL9jpZoN/C5oSnTNB07RKsbaL45Evj08JufA37VT16nJP5PpHRLmz7Itj8mnGAM7S5wJK6Ap+yzcmiD4mW22IbZfNPm9SSxNhi1eKu8WT+bLE1i8FR3xZbKtPjLPVLwNum7DEjdkT8p8n/t2sAACAASURBVJ4Vr8se+MBj3QoFtNf1NJtRhrdRqeqcjMF+qY2+MP85nuRrxR2yPYm5iPeuz/wz9G/vY42VPxSX+xj5BM5VilxrPKUxLcFzOyP+PDkTn66wGk9uR+Ip2dHi0X2FHC58NY/vJLwlku6TVewP8HrcKo6yJji3qA64tDXU9S5iJtlo/XCeO//7uI9XpRO3I6zo3xHPaIOvHanfzPaF/xUHzPvETVaY4g79n7st1rTSUWUUfmDtxEI3cZd0o88WMSqqdVzYGvaMxjku5A7bPr88drJtKnsRlz7f0r7Q5rJfSzZn8Zakj3yf4rTZBZz2OBUu2vjV+pTVCtfN3qSnrOk4COMrjUFsrThxe3C8tHVc3ATa43UfLGS2WFP7EVOKpaq44MK+wF9eQ7KM0fqX/OvrU5Xdi7dmk8eS9msxjO1BbmN6wGdtzXD9iFRxOhbMyS7W4fNlwVn35chfZmeKz44HkWJ7blvMbZ/3GLcTrC3ODEPjvcVkmovoK4RzYfs5CqpCRg6wcrEeKsGzuzGxgNvCAMddTgmmmgD1fuWugN811jsLObhiSJU964fFQJsUENASUDYRqzw7Hk3SPiYtpPUuKODBhU4MwHFlYo82x46hygkyoODZ2wjdmm7wFWJJHdH+yg12HNvZOWpddeIq7ygX5bS28IEbUD/ej6s6ujSxCWRIv76g9U7uB+Gi2ofH2JbBYC/KknbHWtq1Hx7jGDzf26IP8Et/34SkD8ZEvVZl1+tdx6oxs3flj4IZ+UW8orZi7FCQd2z3DoMGYK169Ng3PBxa7QAZ2E3agLXGXsR3HpSSpCIj9RzibfMU9OqYmR9rP1dQ+4Ouuh/EceUceKlcol5sJ3urvMq7gx33w2Sgr4c+pH9WjLDOVVxpDMwvjNXexxqX+ryu4TFHq81jW9W+ihePpY1rQMFZfdj5Ar/LkGpPiAEe46S2cpxX/NgZZBS9af/DITvuQ/5qHiGmsKly/Tl7cc53qCX+L8XWx3ujyMG9ZS4zODExcIw8L2MExqgQ4ArjzHEOPoNnuxjH4vfJfCqYYkzZx4q+iGf0IcRX6Z8OUefCtqEO5FOEor9Qbr0JJH27rsp1Qjc5mNm24BaxJf2IpambYoM4mCBbnX6SgspI8tek7FF6fxFixU++hg7TYIpCqz6WjYE1AHBCRC9ppclVF1bV3fT1RLkGKgbh3OasB/V+Aqvimm3agKdMPlw4Y+I1Pnqw23HCm/RdbmNieoUldYRCNOltE1Rer+n/8C5JFCh9I/cFBwqqYYHj/bqPcTEY2Iuv+AB2XRw7toYTiujetdtVfY+xhW0ZCX5tcRs8wGt1xY9d52oMdtK26UYeLVlD7qXd+C86d5tyt3enKJ/QGEEd80Ik2d79kTlKT5wrBsSs11Cvb8zVPwB3hlXPw9wsfgsJn/Hv3i+4IYfsNodqLI6S4YoXePqIHzH2e9zsOIqYkfUFn5iEHQteAJvaKvLbnN1h7df2PkxrWyet9kMc2G4DdrbVtQj64bpS9NU47tz1fpf7fLemd74q/mpr7EdzHnFNdHAmN9kwWms7DhuX+J/x6CraZxpT9+mZDD1/yVysfARfGNdhZ5Mp8tW3Mt7XqgEnfX0qRpW1QecEztkUNxXjeo5nvore4g/cW+b2Vv3BURTuoxhB3VVGxJ30Qt37eaA99BVuvBZ2muzwka3n2rf7ybDEGC9WPEYGvmv7935M2y+Sj6qtFhNpbIvdhLPqWMVtsQU5C5luD3Li5xDjoK2xUW1DOe1ae403+V7EAT7Fhvz0a+s4GKBannqKgqqSqcej5BKpEsJ3fWDidofgoFEbxuhlO8aJ6KNysO8d7f0Cf5GtAWgTZ2Vz1oMLSZHXJtwSa10QEy9iw2RzB6wY+GpjkoH48qLzERudO/2smBFL6gj6UjxU34ztHPE8fpWoKC32Bz/VP2VcP6z45AJg7P3AvpoYpOOiF/kSrJgcI3Zso87VGOzXCq9d/M3kyth6bXSMeIs+O6z8TbhLHFVBVQZwWOIv4tj6hL0wZqqr6gGsRU+6u5jgoh65ADIWvprHdxKenhznK+VoiLcmjG1MxVVE2SHYVeMA+sd62kaVV3v7jQ+M+4oV5WNbRPZjwKOq7Dj87aDG/fAJVcekvvKiW8aD7wrGiDPX458wxk9NPi/1udqM80w40OOVbaIUOOm8NTB4jG1PgCFhCp8WfYWTsEc4mPBYuIgxgdf8sZBR9F4+FwN/2FQA6WHz4V98MyHGjkbkcxAD3VetR+J6LHOFLfOVte54UJ4gh9nlYDK+YoBj5DnhLnonMs7PxZBT7Ypj4DK6W6tgijG4F7eCbhjPJR8C+ed98J6+8Vn7n9OBfIou8E+sawZipR9gwnqYzi4PQvaC24oVuca2aOrHEDuKwI73cbCE1y8+YUElDsl3JDsbqWH9YuNqTvACoWxk6nDcQECWTBq/e2kbTSsyoI809/3aAl8CRXSZw835XvgZBliM+mTJNqdJXJLWPYY5VsVQsEWgqkVRUDW+PFB3ejpWDHYjKOEFfWpvH7e2MVNtfYdYcse+eMsdl4iFHBvIuy04lgTa+ZYQKm6XUSdxVYrym4+bnSOb3Y4uRRaLXSxOFooZnyoj4x3FmcWz33HKWHMsdHS2mHV8ZQx00xgZbqjIj99NnCTeA97dDlBVmiP543mw8scuxj2GRpjUv8VHyQdtfXA+VIZgWmG1ax4fCU+y2PTm+G72pljKvprHdxKeN+G6zqWu2X7F63Ey4sy5ABmKqZ8Xee63EU84N3M/vPHh8WL2tn6Kx+O+rlmoK3OWfKDcul4wohYIKQ5MXvaVz1OfC4jRr2UcWdsotnxc7nmxzxNmlF/xj/Y952TOY11bFNdwLyj6it/EHzY/UFfhMVOweJNkJcOunZ+LJZlGHvucN0Aplnxe+d36hnnEi2PIZgm+xrvq9NjGuLH2cPwCW3CcNeqRjnMfFNsXMpNdO44me0FRn/hTGWa/yh6uIUUA8lTsUNkuo13TOStjepxa7Hc+tZ9hT/a13MnXoc4n9BdkMqbLKlBtn4Y8tK4xMx0fydsUj89dnNcQW4JLOHBuKk4/Xtg25baMyWsEzk2MacRZc3wHc/nnUxRUdtfOHw8et3cI6h6cQ87MCfH4MoJFu6sDXW65luQ1B7ZH6LEZpk6yJFqxpwUffCuYB78/gocJaQm8YTi8nWJRRFmSlCxszhxchrWPORvEwEviyxfsmozsj7suoSvpA77O2FiZRt72xUfurYubJ3btkp1z3uXOT0tAEr42WZvfYrEzjudx4BPb5Os3VIHPhY8ek4OFSa8Pzo/syHyC7/Fb6Trvbi/GGYyRDVnukDpXEHOFUeXLbChjekeUO7A3xRLE2EinnBvMHeVjwJNAmPq344vG3B9gQ/qmNpR/2E5vsMki1tf3YfLmtnj8LLEmnmDOBXyxNuJXz8u8ck7BhupfSOIEU8R3Em4HaJfHx6AbzktbzwAzyoD5kMUg3oIpceH22ejg8LjpN455XMCYtL7q+YINMQHWPH8zPvdhtkGOoF+ai3YexwV2sRfWohTHJc52CkHfGV9ivE99Lva3mNd47dzs8Sdb/aaD4+s8FvwqH3+bb7YXFH3Fb2KL27Di0eHIJ45x7O6PpQyIpfWeA75o35To8ntirOsZxJ8AU65yXDteXzPmiewlcx45jjXZ+Zthy3whk7GfxzezjWxyXXhtwhH6dxcjRTfOsRR3IPviuWDrRueiyO7nC6bEDWK/MH/DeMNCrVrquWXHoR3Qn4sccRa3xRYVqTE48NfkvOL3tbaAnts28U/ib59HYnx+bk0uAAeHT1JQDSznKTJABuYM6OK035znA+7xyvt2nCzm17fGNoGeGH1awb/t9IqJ5KcFceCQAfNTTjyGHX/u5D3OxVGy9UXGJPmcFwRfFM7hv8NATYJ/BwW1/hoDP7kHf7+RLKi+n2NqIAN3wYAmLP4kJ92xuwv4Hwf59wivcX58+MdGXKugiru6n/hanY9Bfpbe6e7pbSTtdz8Xr1FQabLtd7vz60rPEpoPbycLqod38dLAH92Dl0iucpEF1VVopBAyQAbIwPcz0F+D+LEna99vEzWQATJABsgAGbh3BlhQ3bsHiZ8MkAEyQAbIABkgA2SADJCBX2OABdWvUU/FZIAMkAEyQAbIABkgA2SADNw7Ayyo7t2DxE8GyAAZIANkgAyQATJABsjArzHAgurXqKdiMkAGyAAZIANkgAyQATJABu6dARZU9+5B4icDZIAMkAEyQAbIABkgA2Tg1xhgQfVr1FMxGSADZIAMkAEyQAbIABkgA/fOAAuqe/cg4NevVO6/SA8X2CQDZIAMkAEyQAbIABkgA2TgWxhgQfUttP6OUBZUv8M7tZIBMkAGyAAZIANkgAw8LwMsqFa+T7/Uftzeva/+CvxxO/7xX3E/bKf//OK24a/cH//6+X/bqfd/2fp51XHYDnrtuL3XYxku+l5cF4xN1w7b8fWwvfQnVBN92/t2fMl4HSE/yQAZIANkgAyQATJABsgAGfgYAyyopnxJ4RHFixZJXqy0AseKola4vFq5pU+JWnvT4sgKMR3v53V8K2pa0ZYLrNBrMqIASk+h0tiGo2Hc4w0ZU5N5gQyQATJABsgAGSADZIAMkIEPMcCCakaXFj3wVAqKI3tiFNeiiLKiphdHXXYuzrYN+qlcKHbqcZfRGoILCztvy+V+DeTrMDs+vP2r0nhMBsgAGSADZIAMkAEyQAbIwBcYYEE1I08LqnjNzl65a4VPL1xscBRUtXBy4XYeX9uTthY4WKhJ93q85VcIVUYrotLTKhnbce1xyBMrFlTuD36SATJABsgAGSADZIAMkIHrMMCCasZjL04GHcq1KKjqkyEfKwUOPIXy0/JZC6h6LLpe4mlYFE1YQDWBHVfFwSdUSDnbZIAMkAEyQAbIABkgA2TgWgywoJoymZ/yaNHkhU0vXGxwFFTtaZL/rZQWR1ZIpb+h0vPt76RqAVWPU0FlhVF/5U+/YML/3ipf499QTR3LC2SADJABMkAGyAAZIANk4GoMsKBaUdkKn/S6n/RfFFT+91H+el/8PVUreNq39fXX72oBVY/b31t1DH9P28ELO8fSZB7fTtuh/01V1hc4Fk/LVlzwGhkgA2SADJABMkAGyAAZIAM7BlhQ7Sh5/BP/3o7pa94f32JaSAbIABkgA2SADJABMkAGvocBFlTfw+sNS/23nV5PG7/v74ZdRGhkgAyQATJABsgAGSADd8MAC6q7cRWBkgEyQAbIABkgA2SADJABMnBrDLCgujWPEA8ZIANkgAyQATJABsgAGSADd8MAC6q7cRWBkgEyQAbIABkgA2SADJABMnBrDLCgujWPEA8ZIANkgAyQATJABsgAGSADd8MAC6q7cRWBkgEyQAbIABkgA2SADJABMnBrDLCgujWPEA8ZIANkgAyQATJABsgAGSADd8MAC6q7cRWBkoFnYkB+gPple5EfqpYfu+4/WH0dDt5fX7aXl8N2+k9+AFs+ryOXUsjAozKgc+b1/VHNo11kgAyQgS8x8BwF1d+jJWYfpqoldZLY6b/j9mPbiWB+OadPksGX7fhXDBOsP5AYSnJ7FtecaNmUD28f+xWsf2+Hj/tPcZrfjJ85plu4ojZ6svLpeL0FS66E4e/R4kTnwcdjZo3ifTtqgQZF23rAzV39zDzKRuDaka/c1tHXcKZ5tTLsztaLlSnfcu3sTY2v+elbMFMoGSADZOAHGWBBNSXbki1M/nVz/kIxMVU1usCCqrPyqYJK+PMCpUu63cbFid/tmkBkZODmGLh4Xt3ZenFzRBMQGSADZODJGXiqguqkr/nIU4tzT362bdM7lvWJjxVZ/YkH3NVcyvxEv+NreULV7tTrk7L++pPfFbRPe4rmmNvdd3+6tigurFC0pzlRQKLM9uqVTBa1BfhD2xyXYPX2tm2Y1KQ762jTiz9pazOyXztsx9f8hEpfPWl2BV6YyX0s4B6dk+d68sTsz0GfQO5kyZg/x+34x59QOrema4oDdKkfKxfuE3+iCP21CFS97VW3FKvi08Aw1Q9U1Cb62l55qz3aMWIC/NsGcfHntMmcMt48Fl1exrrSi9fCB6BH+JphqHHj6htOm0fNfzAHUGfl4RJeNW7eTvZaouAD2YmjhC/bNLO1n/c4aDYp5qbH9O+f9Or5FM9ZZ/AY/spclHUA5zfGYjofMdnpl4b2OWwHnT9tzUjjYB3BuEr+DpwiEn2TOU+a+wFy5mvXse8Dbc0ZxfoEZ+XX/JDjIPjMvCD27uOO1Bszf8n1fK3vQz5UPzNf9c0FxND5U1vBFynuss4aP752Hv8WvRP+ElQ5+Go/xXrZGh18ZZv6ecUC8VqPr4EXCVD5x+0kb2DongDxonYdtgPOhVGcqjzY61+PsUeM8KMMXJtUn63n/W0cHV/WA9/PFa9cC8w2F/ZrEprMNhl4ZAaep6B68cSvbcopARq5OBbdvuCmbraI+TXdqFLS552tn2+gH+rnCUxbGO3vPBouxY+bGCawdt51emIz/DuRtpDaMhgyFGfnCGxtm4C9+hj9xdq+oCaZk4Iq2dT6OH9tITdum73tmiUrOTlzHzjj+okYRvIgMY0kIUnYNpHRNx3kfYUXuPIEyO1SeZG4IMcp8evY0b+b4fkoD2jSQj92y/Fy3m6Ls4IVX0Fd6e22CoKIJ+TGzkfBrdcSp7Gphx0N96jfAs+l8aUYejKBPm/ry0Bv8rHaarEwO6/x53I8kYG47fM7jLaCA8bMeaz+ciHhgznvZazw2dcKlxPJcszPOU8X4UyxIhhGfgf9hTNP3p23XRx13gznqF8a0wu8vB45F2iT+tjlp/Uo48Uxc/7bWtDjD2UU38CcyvEE/CmeWJew3xyP6XFbvdgzX8/5Q6RL+1LHedycXaM9LsHGZJOuBS2Oql/qsXI5WYcGeEfxk7o1+c5hihHEJYMSlsw92qMyPC7SGJcRc2avz21r8j3/UCwtPrDt8e8cJ+N4QAaej4EnKqhgw9CFBo5XftcFpN3h7sm1b2ggYyazntfjWNS66mE/k58WPhkgmHRztoUvNrGBXFUgG9L4mizGvvB3LLsG6EGcHUcZUM4r/jOJYNiE9jW5IK/ixc0koYAxSbZ0kmtts5iOL/3kMOwwPpC3jgv1ugxPpOQY/gt5KDvbj/i6DixemzzsByqWTdSPHfU8Yu42Lez2J0L693wibR5zqBdtQgy5DfG307PHZGPr+c/hmfFaz4dNiFWQBI7oU6yTO9SjpKTz3iyCfjPeKq6iCf7msuKUnjlxxXmicvrcH43NmqI/rDsw7+I6rKFdBMqHduGjd180Eud17UV52O52NsEwrvKbj3OMhW6zYbheLLB77Nj6DjzomL1ME1X7ASa0EfVWe2f90tyr+kFvlQf8odp5fKVeab3WKyhfsHriP1ijo5h3mSXG0aaKsx4XXVbkDOIX8YnaKseh7M4XX4Fd8z0MxqhcON7Jd8XtE/1cbEvrS7UHxESMw0k2ycCTMvA8BRUmiIsFYhkHuEDpAhSF1u6RvQsqC9U00az9AKMuWv0Ru+uUhRw2sZrA7vBBYuPYINnrp7yh+l2XfermVHEhrz5WdMN5XHRxodZkBO1qY7Q/jI/NpG7ipRBx/fIJGHbywAbEg8O1DTLkOOwwHOZz4Oj13foMsasETWjTuJZIh+yMPeyAjXLgtzR+Z4ifGOAeJPIqC/2i7RpvJjP4w1iUa3u8e7v3/nSkfkcWx8SNA7+TWjH00b2QiYTqc3hmvIbdpjP6iZ45Poz5wJZfZevnp/EHT4PR5EGhPeex+mvgC9G/iwNfR8xOuz5IKgUXzDOFuZI3W2/SGtfmYMOEBUqhoR+GXyZ4fK4i14oTbYrYqX7Px9FPAIRu43bH5WDuzf21jqtucOErz8MJfyM/OS8X+iUVfwv+Aqff2IL1U/3q8QU9V3GDfkuc7/kyiXa++kJjqfJQj1c4AK6u2VgMpbUQOlb52G9kl/tERPhY/UTOIAa9D6jE9Uc5cJlFX4rrJGcQy6M4Bp1skoFnYeB5Cipc4HRhxA1z4G7ps1soIAkpC9BAgp1Ki5EvhLgAtpHDfoYxNuaqBfDgYqxtTOpgkS0i0sLZr5ncSFhAD+KccVDO6yLeuOz6pE/1yWRxj6Jin0ii7A5fGogB236t6e540uB2UMaFH4CPOq6MQRw63m1Mmz8mXwW7+/Vv/urwinvKA+Bb6YdukAjiWWnXuEB/FE40TizOV3qrHaax6kHZ2B5jQhm9OHEe/2tcT/xQ8cx4refj+FJ8szkJ50sshQ7k3az1/0ufOm/rsXGSceZxTVrR7zp2n7N+uFbIoFm/XVwhNmyj5tl57FPm1QoPYqv9IJYrT/kYfJfm92VY9/MLx2FbbLTj8K3bXfoBdu9hn9Cv2BvzteqAMbvCDa4VeZb8D/Y85DyDy0erfuWaYte9BvAkadlH6dIQN+QJRVcaiwdDOQP7tR+cx+Oqa3Sse1i1B44rDpFx4Z6b4hrkRGyYwcE3EsA2GXhOBp6ooIoCQxaLfbFUA0AWphijV3VB8gUwX9eFBRerLs76+canuiGZ693KBqn9XB4utO0u9PqVvxE2xx0apZUXSB9XNlK1u3EBi2vdKDuvA56cb+mjXKTF3fTFUy3HoQjtqU7jLPGsWIqP3DyR7zynfk1XLfB8HH6iDOeqjRvxZj4+g90xtaLXeVF5TXZNPC0WMFFufkvxMeEB7Blhdv3QbfeKiupf8G92Z17NTxZzK72ja/4H7j5nlA+Yi4jHro1i2/CcL6jMX86D4W5JVIqbxFD7cgRPttDnVuz02IO50GNfRckYGz87n23LOPOYwJbPGwdjHoMfGeP2hyRpZbuCGzkPnIuNHrsoANcKPT+TdxlO1d/1yBjAgHqhncZUPDi/sd3sdt6Unxb/md9a2GZeULe2y9x3+QF3xcM8rmK8tEyG+0P1Nl8hHuvX+Etx3sYr1hUeuxbzC4/Nz24f8pexzuIh95rH4b5IRxtTXKuNZu/+/GBvEwg1XqbzYYz3rP2Nd++XYiTFo2PxNb75qM0FtAf9vcMvMn3P8DjxmCz6RKbjQjkJY9nDKgs8JgPPxsDzFFTtG8nSY+7RKzIpAmzBj9cDygbeFkS7Xq6hnNTPkzDs0NrQ76DfIAZ9dTH01yP8PG5ibZHFzbO/GvMOfzux16sLcu/bvqUH9b2+xxdOKEbX3za0NrYnkV706fn2LUaw+NtC7XjFpsN2kicwfbHPco9v+6cz7pO+6FezygbhSfmH/F9k4GYt6pA3T2AURufusJ3eoLDzDajx8o7yfYzwhOdFoF7bxxfqDx5yUpdpwXg+bkl/7th01nizTqH3uMk3pnXdu/h1zGu9IQ9kOR/CFcafQsDY8SSjGoBzQ64hL5/AU8QL5sPr0b6FC77wxrrN8KFexD07jzE2m0cZmOLC33mb8uj8ZKw+r3o8g0/xG73SfMJ5i3DqWiHXLpGX/O04ZXDG2uMu+RYBfPIJ1Q5nrHeV33yMMVZ0r9YLhDz1197+KGZQQObY9hGfhzP+/AZNW4txzZriQb8Eto4p+Tn4K0jn8VA7Jnluz7qgqvHSsc3iqMZrPRZMMxxLvBP7m/z41knoJ7x7seOy0RfpWqwfyd87/Oj/sucWfSmuk5zQpcUZjEtjHDM/ycATMfAcBdUTOZSm3hYDssn05PSHoP17O27Db3T8Bv3X2kTfXyGZ+Aac1xZ5LbuvjYvyyAAZuBMGUqFyJcxadP3SWvrfaTvizZwrmUQxZOBeGGBBdS+eIs77YEA3SX+yU38/6SdM+LedXk/bT/0ayPUKi3x3/yeY+oqO69n9FRQcSwbIwN0ycJWCCp86tSeM//0OIz95I+93LKRWMrBmgAXVmh9eJQNk4LsZ8FdZ0mss362U8skAGSADZIAMkAEycB0GWFBdh0dKIQNkgAyQATJABsgAGSADZOAJGWBB9YROp8lkgAyQATJABsgAGSADZIAMXIcBFlTX4ZFSyAAZIANkgAyQATJABsgAGXhCBlhQPaHTaTIZIANkgAyQATJABsgAGSAD12GABdV1eKQUMkAGyAAZIANkgAyQATJABp6QARZUT+h0mkwGHpUB+Tpz+wFa+Tph+AHQRzWYdpEBMvBBBuyrxuOHoT84nN3JABkgAwMGWFANSIlT+Kvgkqj94A/mXfQDfbYx2C/A/9Dv+HzxtzM+8/s9/94O+1+NDyeNW/B7UMbPuNvyLPwK/LLfJy5+hodPqHmyIe/bUb96vc3bO/wadsbF94Tsr/B60VqJa/jC9musZwvxT3VJ1vXX97nJF/ltPpxXyAAZeE4GWFBN/W5JGd7F0sT+p4oqFlTdM58qqM5tml367zR+JcH7HVOplQw8JwPXTMxvfD17TgfTajJABshAMPBUBZUksS8v9u/sUwvdDOsrQ1Zk9bFw13D59OoT/Y6vx/xETAushr/fefe7m/Zptjnmdpe+2bu6I2eFosmOAhJlvsQTopokoG2OS7B6e9s2ld/uCKZCAm16edk6rxKf/dphO77mJ1Tox8AbQR1jAXeXB+e2bVM8fw4aFztZyQ7g489xO/5peCsfaUyz3X3w4r5pet/+AWhvgh4ZBzx6j20rvlX58fR0yI/iPGyHP+Ln1hd95+dCSbSQu+qn6CWe3k4qfz/HIsYO2+kt4iPFQxsfcVDsxLvKiB04Cj2CIfgWmENeEn73y2k7us9Q59S+bHfE0eT8KEZG8yNhy7IiLuz88e20HRrm0L9tG/IEPlbeX499TMhTpsJ+kQkcIId4PqA2PH/9jPiw+UGxHLeTPHFWrHIefAx60hyexZzKKzE9idUeZ4kPi9Nsu+HW/mVdQNsTx3U+uh3NXnkWgmugM2OfwFfrf6z7FNrksZ7syHP/ALjN7hzPMUc+Pj8aO9P4yLaBb0sczeIyqmtmAwAAFMpJREFUjQf+9HyaN3k+dH+kPpX3CR7VA3E00tvmFsZKj6kEmgdkgAw8KwNPU1ClDa0umEPvx4IdCR52tMXZr+lm65sddmubrS/4H+rnCVBb8E//ieCGSzdt2IxVj2+Qdt512ubl1xI4K1w6brHJ+ilOTwyaDWpr4i76i9S+wSw2td4n2dQ2Pseh17zAava2a5YM5ILAfZAsQwwjec22uT9aUYd6HZ/I9iQv8ZHHWFKYkx1PQjsPCXRL+ke8l354iLGtbceJdmNbB1v8eoxMedBxETtJPoLwgsWxK0dtXJKR/Zl5wHhexfAi7nzOFDyXxo3y4AUAxr3Lc27BPuTfil3z+ey8xoXLKcl25iMIVlzObcLV+HS7E9fmY58fKqPpNTtH82jBO84pXYciLgIp+lDOgq9aDKaYc64RN7adH+Cr62ry3L66zmGsrnh1PF1u9XXFkPQu+NJ+Mf9RfrSBrybX8aC/cszM524a43ak2IgCWfu2mEKuvNjpvAbYvv84xsp5dF3w0uLXZVTMXUblD+Lv43NrgSf5029CzOeGr+EdJxtkgAyQgW3bnqSggk3ro27XxKndyfQkWmToedgs6+Lveup5PR4kIsN+Jj9tdq5bEwy0CxIX190/59dkM/ONrXffNUAP4oQNLg0p53Hzm+rDMdgWwXBcx+tm3BNNQAFjcLz2kGstyZiOT3rBfhUAx8hHGqMd0/8u4iGP0Cc+48TGOubYMFzoz85XjbuKu15POOAAeYXT/tQssAZHGePKnzEmidYDiOEphjxqxffM7/V8yKjYguvoM9efrhT8OL77Kw2oB4glcHivLgPiXK+Bz7OdKM+l+OdHea+yYHyJMbQ7FV6u2j8LX356nsy3HjCuc9IH15sXcMELkb6uLDjOwwYFJOwRu75yAvgq/KR1C2wxu0EujMt+rTaCL7xIVBs/Yl81IsusV+MY+kEc6nXAH/1LYSMXgIMcOzAK+sjZab9doQ97MuIr8hSDF6iglk0yQAbIwJMUVLKY+xOPLzgdF35ZaP01gP4Ji7Kr0X6w+eFC7n3ks/aDRV03ha7DizuRCZtxlbvDN8DWxmMC3iGpftdln5osV1yjO8eiG87jpoaJjW7+aFcbo/1hfGyk+40fZXfszudMHtiAeNL4JKPGD/AOsnR8st36pThpSdpU74z3HbhBzDR/Jn3Cr+gc4UyJASQ8RdfMT7mbcVR1S2ztfAQcZR6AVxE+ieFdfHQgM77tPMb5DlOTkfFgQlZjAJ7IegLeYjmKypbQ1vNgv6hFLFV/N20aF4WzhkVt3fEn89jWgaynyNiNi7VDsTZ7kM+OM61JchbiqsQg2p36FT41pnA9cGVFnmqD1+VwXLa3PBF3efCZ+w/iyueVjJnxNcAHKloTuK/9MU5qezJ3M+4co5Xj4H9hX7Kt7WPpXMTUzrZZPz1/wZ644qPESJ9zIhtiJWz8gJ9Ar44Hebt1dGc0T5ABMvCsDDxJQQWb1qWeloW536H0QSCnLNzeY/cJi7Ne0+NIUHr/YT/bdNKm0AdIA/Bg4qJtLCAhqUnj64brF01uJEygB3HOOCjnZZN3LvuGL30wKcAx2BZIcNzHN6go29HrJ4zB8f1a013ljWWA/doBjpEPT44nhRz6cax3wXsCNrh7W3Gd619wT+/2C48zPyUd8xjbJSXgm8wDFi3YFkUgH8YjhKpnxfcsbur5OAafq9LqK0cCOP2UfsL5gj90fGI+7m6KAK6iB+GIzuH8vnjtqHy49HJe46ytdyXm0D87/14Sc0WeFTaQqIP9yV4476jrZ+qf1tldz3KzDvxc8dWhegx81f6IE9u1nx6PCuUaT4DN1yp4QtWLkiFOPykyLtlbFv0W+F2LfpZ+dX5HX7ALefKiS238KB4oHrGgEvkYmwGCLTJABp6cgScpqGxj8aR+mjymYKgLsN/h8mIoX9fFfrjQWj9PXjR5wgW664REyDcClwcbpnQPGbAZY8JZkiLD5ri7Qm3kTcptylj8Dmx9QlV5VFyyeemm4/pMpnPfE5W0MZm+uLPoOARivpZ4Vl5wcwfbcGNN/Zq8Vix3PDC0N0FG4kmxN70j2cOCasJDVyaNBe/Yr8QDXko4Wxxo7OkYSDbxWooplOYx7+OyL0pPi8vGq8XGeY4ill2X+xNjoD1RGP29jWN/fbcnPX1uZb4vjRvF4/OuzKM91lESK3qNrxxbcf6i+ZHIXcVF8UmKjRGHI2wmwxLq0RizUzl0/2qs+hxHsA1P62e8t34lBrM80dv6LdcG0FXk5YIq89J9kWSDrNLs/dt5xVpiy9b0OV82B3zuFAX9ELgf2eM6YS2y4jMKYozLijsfA8epoKpP7Mwm37M6VG0s7E0dV/2yfMSfRCgfvh5M/NkxNZ7Vvx6Xpsf2ngWeyjsejzD0OZDQ8oAMkIEnZ+BpCipPVv2VJL8blzecGg1tQW6vuPjrMr1XW2xNpi/i/Wo0Ur/FBgv9Dm/yzUzQVzcKfwXPz8Nm7IVHS0oskbH+h7f35d/i6IZWX+NBfa/v8YUTitH1RxKMr9eI4SGzfatX24SC77ZBqt7Ddvor31I2lqvfYOaJRZIdSUWQ3VopAVnjHCcObUzXC3hfT4nP4Dp/g50nPhYfx+0dMAUPBfmMd+yGfQaxGdzHk8FhcgfxNr/rCnZLbFU/Ia4egx538C2GgFm/wbLzCnNsyqv4ucQwyKuF+IhvgYm8zHyufoFvv8v9kAtP9FRy+tYzX1uy/7E/YpnNj0Rsfq0M52Pj3L4V1HgP/f4k09eNWKNy/JldPi7iufKe7c/cAF6IK1vHmt6ydqienpxKHDg+1LOIuSIvr/F5nNuLtlmcCDew7jQzvD9YleLHbxDJdZSZ4hTwZVtRKnAP/bWHxLjPE2zLRe3rfg38FXc+Ro4b7s4/xiSsGwi1taf2lr7LfhP8RQRwu1pbL5tbUzyV93o8XGuMr+kcqIbwmAyQgYdn4IkKqof3JQ38KQZ0w/Xk76eUPpCemhx+1rS/x/xV+5+VA+NyAgoXbrYJCfnNYiQwLa6heCEjD8DAf6ftOPzpiwewjSaQATLwYQZYUH2YMg54RgYk0Y472vmO6DPy8SWbr1VQtadO17xLzILqS57l4BkD31D8z1Tx/M8w8O/tuNlPmfyMPmohA2TgthlgQXXb/iE6MkAGhgz4q4J8UjikhyfJABkgA2SADJCBH2OABdWPUU1FZIAMkAEyQAbIABkgA2SADDwaAyyoHs2jtIcMkAEyQAbIABkgA2SADJCBH2OABdWPUU1FZIAMkAEyQAbIABkgA2SADDwaAyyoHs2jtIcMkAEyQAbIABkgA2SADJCBH2OABdWPUU1FZIAMkAEyQAbIABkgA2SADDwaAyyoHs2jtOehGbCvb5dvtpPfH+I33D20s2ncgzBgvxV2za/3fxBiaAYZIANk4GEYeMqCSn8x3X+BfulK/2pm/w2i+EX65bBrXNRfZz+nD3/UU7B+IsGGX6w//r0G8C/KwF+pn7W/qOJ+h79vR43bFpcXxfBtWXvZ7zzdXgJ6Ge4rc/2l3+vCtWGB69bm/wLq3V4SP65+1BfXubs1ksDJABkgA8/NAAuqqf8tacW7ilqIvZwrcqYCP3bhpwqqc5v9x1BftzcTjevyeTfSbq+guhvqPgr0luf/R21hfzJABsgAGSADv8TA8xRUWqDIk6bDdnw9bC/n7u5rMl+f+FiR1Z/kwN3dl1Wh9Yl+x9fjlmR2/C+A3e9C2+fLi9lnv97enmLouZfxHdKRzNG5bdv0Dv2fwyY6sMjUuJUxf07b6XX8JM9eU7NrOBbP9zu4vYhC/MftvZ93m2PGpKcHE/zRe9DCMS8vW/dv7brwoxXbjf834+Of8/YmLfmvYF/pRV0Qq6EHfW3SkU/k2a5Gn8PbaTsO4wLjCHnI50P25HyLh27126HHX/IVAmtcaQz/OW7HPx5nhbNN4iLm5ZQP5A/6J5WK87AdhIvG8Uxex53ktngH/7h87V/my9w/GOswV5HHpLfYP3z6AbypHOM0rRFy3uPAbRid8zgGe4yPHEfBXeATPuZ2O1v+OeGhzRvHGvHn4/xzNn7btoldPlI/lWO4Yaa8nTaN4wn/xkOP9O30B+fNBI/KOmyHPxI/uLYZmhlfWVdCzgMyQAbIABn4ZQaeo6Bqm6Elyi0J9ARi6oBIFscJtm2Wfk03waFM6+dJwIf6eZHWNmArlBouTaIgaUqJpp13nVsaXwweJA2Jp5aszXFHsuL6tG8bZ0lWS1LQD6hXE6aWhGkf7L9vq8yeRAq/ONYTGuSp2IyHhRuVvfDj0N9JRtPbZAgXzksqqNKYbct6wSZPZqUo06QwEr6LeEZbe3LrCe8ihlWX9dvzbRhm5xUncIj9Mh8BTvukeHfeMMalP3Az5aOMkX49XkKn8elceByP+Z3hXp33Ik00qn3Oifo+x2mPEYwLwa1jLrQHTEuxpjxlfZ2PrmPbbJ0Y99NYc/w9jnBuRiGofXH++7hkdwLbbzaMeMD4Mf+Hj0KKcTQav7Irxrv9ILtzM+c/+x/7XYon6x3NA19zElYekAEyQAbIwE0x8BwFVd8YG/f1eOUS6et3cfHphZ6HzVeTBTh2mfU8JkzeRz6H/UxeSsakb8ePGzgkmihX24trXRbKbQLARkySduKhn14DW3LC0e5WS7KFelEgjE2c4HnUh3KwLTKxH+pYtasM71tlAZ65f9qTvdkTKpctn6gX29intDHRnPJcxlQ/hgyMJRkUCWH0ycJm55MtXky0BLvidIn1fBxXXPNYDjx1jGspn9Wn5XLIq360jpVLHJ6vBZfeJ+zzM/4J9vU4uNAeF6GfMKbYiXYlX3V9TRCMy/bAPNaugDn5+yN2J/CpcE54a7fpMWBa2JWGw5zW830ccJkG1LiY90s3AlQPFPKgt8ZF5b2o5yEZIANkgAzcCANPUVDphux3SYX4vlF+0Au4EWqy0V756QUXbJIuWvthoQUbvfdxTH6HXo5hk1X8XYfrFJm4gRe5O3wDbK63cbPjCTDUjR6h7/js4/YJFSZHaFe+s9z46nIyH5icIK4V/oS3HGjSgvxirHjfHZ/iB+MUbdLu0rfJQHxepPgd55nenR2Oobz6pIW+Fiprnvvw9mShc52SX4kffzphIxA7YnX80mt4HuyXPsgPygxce/zRD2NcNcYTySkf3g/nSmjrrYLT/YM3UPxJTuCx0XMf2fXc32xIciXeWpGp8wfjz5+4JnzmH5OB60m3pjSAtyQn+wPn7s4mmH/ZnlpI5LUn/L2wWzB1m5s96VzMLzFM9Ht/jL9k9GT8yq40HuzV84m3Mf+ZF+BcBEzw4NquerpeGz+enwkpD8gAGSADZODGGHiKggqTBuU/bZQTj0gfT3h6F9gwL5Eh4/pm2YTo8aC4GfazRCMSlA6kNQAPvgqlbUyOc8KTpKAd2JZOctyKvJw4JAmpn15ZjNPEaMUr8jBr98LgPX91+AJ/QRyHgLVjnxVUo/NeMOA1wJF5s6RME8KVXhgfQMtrY663cZn1tAR0x/P+fPgDY0m07pM7wzKLJThf8IeOmoiHdSP8llgWXDB/aqI8nScFT9dazq/kJXxlXJcHjdQ/3fiATtqEmOjHbX2Y6ZmdT6KBt9I/8YTXsC2y5Hgy/7N94PsUl4AhYRsdLHhI3bOuuLQYv7Arxu/X6hoPvS/I2/Pg6+4CD65rIhSOs7z9fO0Y2CADZIAMkIGbYuA5CqpUYNgm708Q5t6oG6InGF4M5eu6+eITpi7Y+vldR9kwx7oNV+rn8iCJFLEhAxMW0bPC5tc6MGtAcmAbuycEjadJwp6kaOLl43ISkHhRO6xfSuo04Wz4ILnARCO1RXnTmbgE+V4Q7IvihDwljX0MFke9+8LfI71NRvgqMO8LqhKTKi/8pTJe3/Pf4bSYdvtmPHf4raGyPK7SvMC4cqyGQcZ4XOLfsMzOm28cv/HmOPMYQKf+xCeTrjPHodlpsrXdfYV6cC40WwbFpeLs42vBivKMG+UAcQL82qx2jrAap6bHn7qgfYHvQnsSCFgbBHO10/nAa6M4nsz/bF/Gpza0cXO7E1hZ1dITUuRhr2v0hG4+frWuJRQj+5W3bJ/6BXjp3Gps+Dp4Dg/YoHrh5pnPz4QnIeUBGSADZIAM3BgDT1JQeYJor40c307boSUYebOu3rFN0V818Ve8eq+24dl1TyD71WikfrCRRg9rQT/7Jjbo2zZr0+XnIWnqrz9BstleqTm8yZMc3+iLUkyo5BLqgSRsyZPKgG8Rg3EiUsY6h5GYt0S5Y2zflKUcuH3O//6bsDwBC3nNrgl+TPIyA4jjsJ3+nraDJzS5Y7uT7LYUf4Ne/YbGzoHbIK94ncAPZ/SCvJ6weRGlnB23d+W9fQvZlOdshPrx9WjfbLf7xkbEhPECNqTXAmfn0efH7fTRb/l7sTHdt7t54dyj/sIH8jfzZ+HPY6rPMbju8W+JvseAf3q8BtfeP84gJ/C6nz/R6fMA5iroT/MS7DkX17147/E4f+VPsSJvMKbak4/FD+6TIr/EpRfWyIu3kdu8ZqGfMS59pH3Ox8/XtSyhYVdfHLYTfFvnjP8UM2l+oyy5OQB+TWtcfkIleITb/Xpp5/ucqMB5TAbIABkgA7/KwPMUVL9K84Mrx8TvZk19345+V/67MV6Lj7/H+Ve4f9KGnAh/UgiH3RADPxjXN2T1U0L577Qd+xfcPCUDNJoMkAEycLMMsKC6WdfcEbBrFRDfafI3FCdTuFfk49oF0LXlTTnghZ9h4Cfj+mcsopYJA//ejpv9dMakA0+TATJABsjArzHAgurXqKdiMrBiwF9zilepVr15jQyQATJABsgAGSADZOB3GGBB9Tu8UysZIANkgAyQATJABsgAGSADD8AAC6oHcCJNIANkgAyQATJABsgAGSADZOB3GGBB9Tu8UysZIANkgAyQATJABsgAGSADD8AAC6oHcCJNIANkgAyQATJABsgAGSADZOB3GGBB9Tu8UysZIANkgAyQATJABsgAGSADD8AAC6oHcCJNuA8G9Ac7f+q3sO6DEqIkA2SADJCBW2ZAf4j6N79ttn3jrfzQuGCBHxy/ZdqI7fkY+D+DG7ueK5aOIwAAAABJRU5ErkJggg==) ###Code a = float(input("Digite o valor do lado do triangulo ")) if a != 0: b = float(input("Digite o valor do lado do triangulo ")) c = float(input("Digite o valor do lado do triangulo ")) delta = pow(b,2) - 4 * a * c if delta == 0: calc = -b / 2 * a print("O valor de delta é zero possui só uma raiz que é {0}".format(calc)) elif delta > 0: calc1 = -b + math.sqrt(delta) / 2 * a calc2 = -b - math.sqrt(delta) / 2 * a print("O valor de delta é positivo possui duas raizes a positiva é {0} e sua negativa é {1}".format(calc1,calc2)) else: print("O valor de delta é negativo, impossibilitando o calculo da raiz o valor de delta ficou {0}".format(delta)) else: print("O valor de A e igual a zero portando não é uma equação do segundo grau") ###Output _____no_output_____ ###Markdown Faça um Programa que peça um número correspondente a um determinado ano e em seguida informe se este ano é ou não bissexto. ###Code var = int(input("Digite o ano por favor ")) if var % 400 or var % 4 and var != 0: print("O ano {0} é bisexto!".format(var)) else: print("Não é bisexto") ###Output _____no_output_____ ###Markdown Faça um Programa que peça uma data no formato dd-mm-aaaa e determine se a mesma é uma data válida. ###Code def calendario(data): # faz o split e transforma em números dia, mes, ano = map(int, data.split('/')) # mês ou ano inválido (só considera do ano 1 em diante), retorna False if mes < 1 or mes > 12 or ano <= 0: return False # verifica qual o último dia do mês if mes in (1, 3, 5, 7, 8, 10, 12): ultimo_dia = 31 elif mes == 2: # verifica se é ano bissexto if (ano % 4 == 0) and (ano % 100 != 0 or ano % 400 == 0): ultimo_dia = 29 else: ultimo_dia = 28 else: ultimo_dia = 30 # verifica se o dia é válido if dia < 1 or dia > ultimo_dia: return False return print("Data Valida") data = input("Digite sua data") calendario(data) ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAvsAAABNCAYAAAA4jasPAAAgAElEQVR4Ae1d4Y3rOK99PaWNbeWmjv0XTA37FZCp5GLKWOBiW/ADKVE8pCjFcZxMkuEFdiNbEnl4SFG0nYn/b8l/yUAykAwkA8lAMpAMJAPJQDLwlgz831talUYlA8lAMpAMJAPJQDKQDCQDycCSxX4GQTKwBwP/npfj4bic/91DWMpIBpKBZCAZSAaSgWRgHwauLvb/fB6Xw8eX0c7nDoflQP+5PjOwHXwtJxmPn7/Oy5825hUaAztWcfAK9t0X49fHYTl+XvD479NyeIG4IFtOv+/L11rpq3jli5PTYldyoOFF+A+QP8epiGc+F+dLzKU+nsivnGNvvqj8s5x/3S9eV8Xfc3hnBxS0B8hF/oRXWkeHFevNICr7i48DM+TSwSa9l4Re04/8XDPvQWO/nZ8H2blWTZSv6lzOTdfuxXfkdxOetTy84biriv22EWExy860ye5iAbfskMSewhmRHZOE/xSYnwfEzyoKnod3RjJJ6k+G9HXhtKIeizzMGSVXtHyJGyPPlby6LGZjw3Gvy86bIF9ZzG7yGcbKRro26d2oK5y2kp9w7gNOfjs/D7BxJxUmB62VeUd+N+FZi/sNx60u9vmu0q/z8uXu7DPhvvi/ePV3KYmV/nIXyz8twD7YRDmo5M7X4K6VL3BojmCt7XO7e3ZavtpmPboDHdvRiti6YR9/Ea6KFWS2czWwmEt+0nFcTh/yBKUWBL+OfFev3OVBDoCfat+ZfFTlnP+Fsd5P8FRlePcIeD190ldVqh3I3VKLEZDP8VLlt2LGLaDGE50f8WL0FC5aXIjvZnIXuPi6hh+QyTg/TstR+BK9VV67O45Ya3tVPNFYL3tZFtZb/S4cXsVrxXdqMS3rAmJC/LmC/9vwnPVp3sdXKV7ZZi1oifLQvqEd1UlX8AdutRg+NRewne2JE8QPT7YxOF83x+X8G9YMzXcxgxuWzaVFD/pd2sv0ZskafGDT2jiNxlUyfVwof1UP542Sm8kG9THk72pTWweQS9BnYvtV4z7O+iSj2iHPEyPOm2xZ55JDPmCdMj7kmuIYeG2+LnafeK7Yi/MOugexodD3ceI102JstEYtQSaXWr2D9eXnSz4P8rf6DvfEavdFfkgR2HeQfKTros9VBZzujcQn5Axc+5LLAnvMKeDR87OgPPE/jB/Fhpy3vjotd7MnwmmMXBa2RWwQn0LcFttLfLZvZLj8pHzUmqTJs35sdRRhAL5W8evmGP9GNtV9g2ukhmddbJfcdMNe5PYoz9uq2gv48TWgN3fP49XFvii1ybEv8kpwSFKTWf4zLpLLqBJEbWNjYmRx16RSvy7RsJgx7i4YquZxgA0Tf108ZbFKINex3AfzmszIDjhXndoSQN3QxDZOnBKsxoZajMHCbEFVk6XIKAur8lP1SV9JzNgXtCUJCI5mGzXAlhbkwAnMab7w8joOVEFZeLTtBnpENviI7WlFgJ2jUmuRHBVra/lBYd5utIfbEBeAVZLkxXhiGTa+xdcmPoRX2dAQR4CXY6COMfEgvBrslkujF2wy56/E05JaxWRsrD7lGDL4ajFw0Q4pGuq6rfI8XkMTy3S8V900TziT4kTWMMuUGOS8IDKMdD0wPPcbMOZLq7duXqzL5j3BpBhV3Tp8II9tcPxJjHEfrPeD8oJ6uC1+q+ulYKv+kL6qC/v6thQLEa9FXrPb+FA5oBbiM2uRMAgeiWGMF/GtyUmRHYKP1o60gdco11deDTajp+IWfMY3kzVqTC/jhCPWVfUO15eZ72NU7RvPX8tP5JfK3WyNYxwa3yo2MoHwid3eJD0e82P2UrkoafEAEiA/GF8yzgfYY+K+ch/g5LiXWDKxPvEX5qvqE7OHVXnGbhPDW/jFdVPjL7JngodjU/KWGad+oxauh3ZRUnWhTcNYN/mDcE/8LXmB8UiOCPgBH1m0+x7dXOyXRCqGODKHWIvBcjVcPqFoMvNoLBI6GgeTjEPgPJMO83EcL1TtI8e3xOHnNZGRHX6eclOCS3VgcuHgggDX47IQGpamWxqeH9WnMmgsjJOp8ok8yDn6dJwY/G6O6urxGi5Bfjt/hR6d7hKEdvCCVr5gnFl0/kJ1zA8mASm0OAH6uEBOnE3NVsIJ85g3XOwgw+ota0vtchtnZL+z1yR/wLDWz7fgkeLex+GquJnZAXwxBcC7xwsU2a/FUAfIMb6Su7p8g6Gsdyn8TSygcGwjz7Lhen/zJhWvm8JbrBdjoaiMxyleAQZrAviiXmM7YnfjMIY9zyrD2YTyvC6Bxp/jtWiGDXPahAfwM8nS+LOSrW+dHUYvYgVena3Il9UEc0ysMTr7REKKGery8kWoP9+OvQ3O1zJ/6JfZfN+HnPi2XFg6+xin7l24HgEaN9VnKNuPGhw3Pmo/HM9ysUizsU76H2/PGpyMdxjrE38BH50PnDzhxKwVnE8D4HiMu+Dp85RqiOxBfJpzyhzrJ5Vjz9v40bjy/MBaGXHAdmr8qizSDXqAD0bl5inS/Vu3F/s1OUjhzo9tcDMLMftF4gYRofK1BnzUz+ehWIZp7EScE2HwRKPjsO0Tnp/X9F6ww8/r8GsQ2CDETShYCCynPn5Dfpy+YcA5nzHXEV+OE1y4uNCIDtVV8Fr/wVeNGnduAaHv0CbEwPah3ZhoVTBxqYUQ8HcFPyoNcPLJsTzDCeKexBPz1tleYjyyQ+1CzhEt4HX2GnzY18UTPCoHO27GwzA15ulwVdwgVpoEmHg+xi6MtXgtR6q3ngeZdh74m5O2jT+Kc/SJ1WI3O+4DPe24FnFWb/FjKfYRA80qx73eko/82uvHgTyHx2AALpFzxg19Zo6JddBDk2AOHZp5hMOsA904WZ/8b9U4G2PCFxcTzl4TB4zP+rcUIM4O3LxNG8YxTtir0PahnuI/LXq8PIst/LrDUG+RZTmO8nIZN4qZeD7gZD8h/77tbajrB/khGcZPAXa5MYZcYh6QePGfQ35qLjIxSFjVh12umeWDO9rDOCY4m8mGwz7XapyBjwB3Zy/KQ94rlra+gDNc83PcJfZLfCnnzRbZK9DHDU8fs6xLYgSEmJxj1m7PTxzrNk7aOgHeSJ3VD/z6+HMYAOruzV2KfURljcQebBfHarDN+oAoR2ib5QlsQdBGlIabz1gleNwcExRunkqd2dFvbhj4LIPllk3N86bHUSLFInfMj8ogbTBuLV9+HPIQ8BUXJsqWbzWOnSwzrvX5Be150VlNLp8CHyH+2YJUUUXCNRcPV8aT9ZFVbO1wxVEtlvSOuc5t85y9ZgPFvsaxymgt6Gtya2d0PMXD8yAOjQ/G/uzWDWAyNpF86hsUz80m0Su+knn12NoF8YNrCIXN2sgzjXPH7P+q18aCjfcxJlRuucUe2waukUtfgCNW4JVlwbHFhnEKemgSyjO6kGMaOLJj6zjAEdiLeatt3uYuO8xn4xEftmGcs1VtL2PW6QF5DjfDiP53QW+851pB3p+lF7DY4bSa9AkE9yEno7YT4nGDvbhGaJZdJypndF5H9DGofhnL5fmAR+WhbXqWW3e0Z5WdBMJhJr9irGssgB2I281XeZMYxvmEAY634m7MDvFgzimj1dY2mxs2tsFuE1c+nq0MPYJxYCf1W1tBjxtX+Bnc2FBFu7RuL/aNA8goLERHGGfjbB+TJl/j4U1A5XMfbZSEoV1NFgfg9zIbCiZa5rtxxg4XPN5BTaDF2k5Lo5tXxkui54CUgoPHitPLuPHCFBtqUAk/Tt8w4NbyVfk2eIVnlhHhrZjELidDqKFPXXiWx+LzenXf/FL8JVhKUac8eLnN/4yzjlvLDwozOKnDL3DBsDGejN8LJ4Jd+SmADC88T3RbwG2es1eTtU3CpbhSWUZP4x/9tREPTyNfS9zYpMh6o7i5aIdgrz6A72C2eLEU1U3Izau6zbrE+Kmx0C5oJj5o6jx2k8OimK5xz/McT8INY4rvfjF2uaM1xAcxDP4lzC126ACxT3gwc4wM0OPlmXHR+lfbG5eGO5f7dBC3hj5kO0R2n2dbvBh7nR2MA2VIG8eVtshjPJw77Xmfx8waMBgijqIYsPJV7/q8bDAA59H5Yh/aTfTjGsd2zW9RfGKskQiyvcZ7pJfXoJtD44RvFw5wOObHF14thtgPEdffZA/bLTFXMUhuAEtLbMm4PtYvFvvge9n3ik8shzaGbR/G35BfEy/V9xIjaM8Qj+QCzJ2S240Am9+cXo6zqjeKOYotHFM4qfwGsdj2CaOn+EHitMWYhXmXo9uL/Zq05ZGHGEFoyRA8VguKwRps2kMtJrQ+Gjp+ftm7BkyqPAqUBVgCrGAIfgEDxKvs43KGX+DA5ELDDXbnSBU3t8NsljIpxF86Fdtp4b/q5sArtiFXOo74BX4cTh7XFg1hlYW/nq9iQ+H7yL9II5zXJMN+QrzFlrLIq58aBiFBx7T4MLwITpv0Jamwnz++rI+M6OKXMg5+iWM1P0aY02P9ob64IZ7qpl7i1/Lb+KmQkFffJ6hb7Dp7bYwLRzZBtjUkLweDTbfJFUWTtQ9DHH8Yh3WtQ3ygfS1ZTu2oMSKPtWHTi/AiLown+/VD4YbuhEH88GRcO6P8Blo8duric/Ha0HjqNyvlBtYHqCrNNfgghsG/NN9whth53Gk58S+L2V+QMXOMDNDT7I7jG+02Oc3Zt3ZcK04oLn4V3JJDlUeXt3Admvzi7DCbt/BNPnHjwM/HT/oFkGr7UA8ZK/IKx/RrXoLbxE3L5Y6gxnPN2ajXrde2vgIRypGNcTyv853dQ36cffgVOIw1GmbiEtYjcQh9GA9ycUDTfUwaE0d+Eb2SS6q/jA7XZ/x1R3sM/gHObozxN8b6xF+RH6rN/Gt8kl9nMXwlv4wb5ck6iQyCcQaPsdXGLIqxcbFhL8L1OfE3x0zb16weu441H5bzcIzAd2hfXezvoPN2Ef+el1P7tZXbxT2fhLIYR8Xct+H1ieDbgKTiZGBnBqCA2Fnye4h7aX58cfMeLkkrJgy8fY0wsT27koGAgdcs9uXuf7tyCix7tVNwxSp3rp/OhCz2n84lCWgnBl66mN2Jg5mYl+Yni/2Za9+x78/naTnL08l3NDBtSgauZOAFi/2SuKkgbo83rzQ6hycDyUAykAwkA8lAMpAMJAM/gYEXLPZ/glvSxmQgGUgGkoFkIBlIBpKBZOB2BrLYv53DlJAMJAPJQDKQDCQDyUAykAw8JQNZ7D+lWxJUMpAMJAPJQDKQDCQDyUAycDsDWezfzmFKSAaSgWQgGUgGkoFkIBlIBp6SgSz2n9ItCSoZSAaSgWQgGUgGkoFkIBm4nYEs9m/nMCUkA8nAHRgoL/CRlxXNXiR1B+WvLJJ/xvd+L2d5ZWoSezKQDCQDP5GBLPZnXoc3wfW/ff/Cv938HcXAT/6N/r1t31vebA1I38N/Z/1rOfHbGuvbM+XNjYLn6s+yXq96UR3x3DDMLzbojYksewtPu65HsnOO9TrqHpfn7Fsnr0N522j3hsvbhM1n89qtb09ub2Ol42+8OPuOfDJnKXuTgWRgZway2B8Ryhuw3TT5TmMrOh63CY4gbj6/a3GxEsVP3lD2tn1veWtcuKWIXSP3YWNuLfbnxZgp9q992d93rMfVvD8uz/2cYt/uK6tdca+B35FP7mVLyk0GkoGQgSz2Q1pGGxyer+3P83Ksd2jwrmH5CkK9g4ObPydWubMDBQQXU8ci69f/lv/9si8NI3kinzfFdldINo6K5+O08FMI6h/oPfEYp1vktYuZgBguSip2HGdsEjw4v96dZR1Vr5njsPwSHs7Ln1pknj+AM5grnLC2zfiWZWE9p+X0S/RYO9CfRieYWXx0Xk7CpfDPeKuN1Z4/dR4WODz/15H9V3QUnzZ/CucoD/R3HExfPBf4hAQAt+Zuo8M95mMgF33jcaFOsVHeki1cHq73B5nTcP4qvlXfWW7jF/R9LSf24Z/l/HFexGdN5kHXJMUPy/j3vJw+ZaR3DhyDzX49xusb5i51rf/Wcxw7rJf4xzV2XI4c0xB/lVPWK3w7/2pcOl0zPyqcC3EUrDOUS5wzHsgDJBvHCG7UKe3BOOWIBopdGAclxnicWYcQR+hzjC/iVNa74Ig+2e82lnWYYJIz5EsYO7XL5h2NIZ1f7LfjWBNjcjlY1h3wrDEh+PIzGUgGXoWBLPYjT/nkB2MoYZakLpuE3Vj5Fd28UUlxQOMk4ZZCSAoLliXJlBO5jFsWm1gh6fM4TcwdnlBe0SuFDs8xBYFcWFSbok2rFicFO44rbbGJN+ThfMG9ngfZ4I1ewY5c3IqPZcU8lI0T/SzjIDDaxi8+BBsxnkiP+EgK2sqXiQeR17gcyEMIrEf01xgCXTrU+kxjLYgTmQ+4x3wM5E5xkU7FTBxwnKJvHRdj/WohtaJxZg2gbYDBSrFHLFPmmZiz4+ZHAc9RTDu7USbjMLEhHJLsUawWvWYtiS3gX9Kj8sGnUz8iOtRTC2XU0y72imwpklWnFPZik1yEyrGdh5rlYtXYCOtL/K/FPs0mvCLb4RUuIk4NZ4RJZRhMeOA4xK7SVv8RHy2vmnnW/iifC6fcB/Yfmp3gI5YdxYzV02PNM8lAMvAqDGSxH3kKk5/r1w2pJELdPMom0QoV2dxwvitgysZUk6zvw2OzqaDAflNWPLCBeXvw2MtGvahqOK7w0DYlnINtr1M2Txoz63N4WjEYzUPO27yV+Nr4Alr9DH6t9uAGWk/xhz/fZHj7AGcbMynsKqLlLE97UB4C8G3vM+kfzffn+bgWMCDL+ABx+/miz3+CrHIHVy6M/UA99jxpnNfirBW+OifCWeb5mOjXskrBVj/O68DRw7bnyR/DRLQbTte73JA7WkxpsVjWFRSgyDsJw2NsS4HLnHquAIWb03ro/Mr1jfZhu1zkV/sET7PRYW+Kg/OAxfoK7YJcifFc5dp5EG8j+xGPb7Ov5ekhfGL8slx6IqPrgrmBY/Qd4ZPi3l+4IKd2HOwdGH/eJuDPm5LHyUAy8DoMZLEf+QqTn+vX5ImbRRmEmwKPq49CW2HCiRMSPPf3xVSV1u42oVy5I9W+2tEeH3s8sIH5hA32dZsI9KHp83GkS+zSDRrn9wW9jJfPAQ9u8zFcANab8Tk9LA+KneZDKTxwc66GGmw4DnDiJk3TVE9/UVE4E37KJ19UoTxDci1Emi8O5ilCG+rjQTq68y6GuNgocRby0c0XwWNcnd/alKJnFOeh/jaXGj1O9U+JV7xA1T4jxB0EmNr6c0Nnh54n489ARxBrWNRZ7GRbXYNGbo21QcE4jsuCR7giXcYnKE9sZvvcOLmjTH0wB+Mf2yEejGtu97mmiyfgwPKEdkGc12Jf46uM02O7ZllfxYVjhIruk/HABVg3gE4gtjIA9Sj/xX5rl7UFObXjwA7gaMZfCDVPJgPJwEswkMV+6KY+2ZZheN5vAv5YBMMct9HJCP4M+kpy/jKPh30y1mQOelggJH1I5tyFx14vHUuxgAC3jhMZM50yhj4jPVAcmA1rJnOtHaLb6VVe+yKcMOidNBFQC1oozNq4Cc425mKRAf5Feare3u2NuJSxo/n+PB/3F2HGB3gn1M8Xfd4XyDW2ZbxcBIHft/oDCzDFDVyyznKMYwEKNP086Lqm6XmC4/H67hUUe2x+KBcBcbE/XVvODxqXYPPMjwjPycIujwH9iu3ZOCPPH3jdgFn9T5MoP8rX8SBXdutw7foHnjwmPMY1heehTTwcP+m79XoxY7iBsdTs7dKLCZynPi0C2jHEn+edjwGHU52HyUAy8CIMZLE/chRvEpo0aRgnx1aAlOTe7lJBEscEW+7SiBzcYOqdFUmkfpMihYzB3p1l2Q1DkVcKT7/Z4AZW+qSYYTtEL+OWTa/aBAVro2c4DvVUzMP5snldwYPjhbCLHeZpwa34nB70IbdDvho73DC8YjHB2Krt7FMbD3LhYGzzd6ZrLEzv7PMY4djFp4Ea8M8xVc4Lv2yPxBrwM+ZjIHeGi7kRPvSCaRznbt0Yvxsj7cVPHTe0Te48OxH+MMIlMv3Y8fF4PUbyJT46eTUmWg7iAeSDGgMYd63PrXX0b+Og+LHLKzM/GnBBHAgmiCOawvbWfIHtruiM4kSwo24TDzafdfG8sthnXIIf5Bu8vF41jhGSaTs7TB8dUL/YRVxJLnXz0BabN4h7xYEYeY7YMcpPYJ88YWgYOrB5IhlIBl6FgSz2Z56qia89NpXEy3PKRnKCX+ORR90tSUaPd41MTcrd5sY6yqZpi4m6Ectj7LZ5VjztFzps0sevhPi7Ru2igmTKRhPxwpt9fTyP4/B820y8AMGNhYg86p/w0Owr8szGxlxKcQsXR96ONficHtwkSXPZKAte6w+1k7F9nPpfZ3I4VdZpOX8e24ZubCOxiPvjS+/gOXmKoBY3HBvH5fybfikK+NGB419LMfEJcx0/agNcfJH8cP4FXGhniyuJF+L8tHyt1Y82Gr8VrtV3iEkKYDd5cIi2zwqhzp8oD3iy63FuN4qQu9NqE/XSfFxj4EPqblwfl/On/UqN2oVxiXkFObsmvsbr26wzwUZ51vmb7ZZ+yX2WDD3CcS2eqBu4/Tjr38DUC2v549XIb8oNxjvygedJD9isyNz6kPxHnzS+n2ewoF3i4xrjGgNWBvLLslbkJ40Rux+gLDQp28lAMvD8DGSx//w+SoQvwoDZmF8E89PAlJ+ufBpAOwBZ+zOcO6jaIoILWHMDY4uUnPMqDGR+ehVPJc5kYH8Gstjfn9OU+EMZyM30Nse/G39/Pk8L/xTvbbTsNxueJvDTSnPXez81Kek5GXi39fWcLCeqZOA5Gchi/zn9kqiSgR/EgHy9YvDVhx/ERJqaDCQDyUAykAzszUAW+3szmvKSgWQgGUgGkoFkIBlIBpKBJ2Egi/0ncUTCSAaSgWQgGUgGkoFkIBlIBvZmIIv9vRlNeclAMpAMJAPJQDKQDCQDycCTMJDF/pM4ImEkA8lAMpAMJAPJQDKQDCQDezOQxf7ejKa8ZCAZSAaSgWQgGUgGkoFk4EkYyGL/SRyRMJKBZCAZSAaSgWQgGUgGkoG9Gchif8Aovy2Q39R4aG84LUPtWxP9G2fpt4zljbv6VsOBkhWn95Z3WaV9A+Pl8fcfgRzw21TXqOS3Tbq3h66Z9ypjzG+m72knvjHVksFr4urfZi8/q6lvl7Yy8ygZSAaSgWQgGUgG7stAFvsRv1RItaKmFD9SuFPBI+2lvmZdjm0xtEPRTAVre8Plo4qmHXBHnG49ZzhYFi78GycToW9d7NtY4Lhbw8mErjVdNr7XzKAxFuvaWTkuGUgGkoFkIBlIBvZhIIv9FTzOiiktgOii4L4vBaJCVy4sOth4p7ddqCzLwkVvfdoA51nWx2k5ytML7sOnFmoL3llv+lnfaTnBkwy8e8u8iOyDyloQJ57vDBqcmBXxIPv0cVrMU4CQh1KIypOY8ql3yb/F7hCn44LGrCnuaRz4HOM49j/pcXf2G57jcvo4gjyMlQOcVxnM58dpOR0OS4uNJs/NAd8dtsSFoygPk4FkIBlIBpKBZKAw8NbF/n///bf8888//B+1t/0rRU0rcp0QLgi58KLC8bScP48XvsYTFZj+q0JOCR/O7pBSnxbUXMh9/llKYS3na3FWi8RSyNbCthZapSCzsvRiZqnyauFW5wgvLE8KSy7oXNHMeoNCck3RCnQYPHBe7iAbPAe0L+YBRWAxbPQgP/e0m2Wvw3n8PHMR7S9Q0B6+0BOfUBlPsXnR/+AjtLteBMjFg8Y9abSx2ccCxkxkH+gkcRQ/V8aFsTsPkoFkIBlIBpKBZKAx8LbFPhX3f/311/L333/zf9S+uuBvdyG1cG3MUcMUtaXgaUWKKdzMrKsPuEiju+RQuBkhhCPoMwWr4K3jbLGGxRYW++W8FNAkgubxsbdvgIHmaJGJeowF6w68TpzFfeAnOJ7xICLsmO+x22Ko8TXyq7u4a3EnBtGn84n6wX8dCv0CbTffy1NVMMc/GYDjsX04X6VmKxlIBpKBZCAZSAZuZ+Bti326o0+FvvyjNp3b9I+KHrlLLAL4nNylpJNYJNNxKWCwUJapWz+7YqkKmp5vX6WRPxwuBXEr2lkGFltoRzlvv+ZSn0JAMc0iTGEYzGt3autFEeOC4vwSKawPvg7ix3sfAT7mZ8BDw278G+Cn+WQDyG1zW0EezLvC7os4q808rsn1F51AjPEJXnTBRRsPL7jLkx1td3GF8qo/MDb0yRD6yckb+mFjXIC52UwGkoFkIBlIBpKBnoEs9ntO+jNc2GhhX4oyX6hqUVMElOO+2MeiRgrwNV/jWV/UiQFdUSgdeIeezyH2vthv37eG+bOi1xeJQxxYPKJs3+ZCXvn33Xzsi3A4HuqniTBO5SIfejbUAzbcavcUJ8IAnXya+fHx2N/Zx6c544s9sD3Swxc2PrZhDtzJL5C1b7N9aHu2k4FkIBlIBpKBZOAqBt622L/pazyuyDFF3KiwMl9XkcL8QoF6wVW+OMJizUx1FyNtXHQevsajFyJakPknFMb2+t1s/RoPFJjAWTSnfM0ELyQqR3iH2hhVD5wN0ZByzhagzIHcrXcyuI94cOdRdmTD3e12eBpOBMZt5LHYPfwaT/u6T73IrHyT7Nj/PhbkLn3VExX7vCZkXH2CUOOsfNWt9g3tQ3tWxkXHSZ5IBpKBZCAZSAaSgYiBty32yVgq+Lf+gS4Xe+0rB1rUliIS7sjTGCls6h1z+WpDeEc88sLknNEHeropteBi3TgOz0vxO72zX4u6ViTW73cLF1Kcc+GmvNjvc9fCkuecli+4EJDir3Ck85lvkQ3GWT8I7zoPhrY/ICbZ5Q9YYVzEgzknsvUCzXAv2Ha22+CnA4MJ8PuBjKNiRn+7cWpD/ePxq4p9i+f0CT9Jizg/vvRvOVi/xFBZG+cPvRAY2ofyIE6dOXmYDCQDyUAykAwkA39wVBcAAA7pSURBVFcy8NbF/pVc5PBvZeBrOUlB/a04UnkykAwkA8lAMpAMJAPvw0AW++/jy9e25PdJf4v9tS1J9MlAMpAMJAPJQDKQDDwNA1nsP40rEkgykAwkA8lAMpAMJAPJQDKwLwNZ7O/LZ0pLBpKBZCAZSAaSgWQgGUgGnoaBLPafxhUJJBlIBpKBZCAZSAaSgWQgGdiXgSz29+UzpSUDyUAykAwkA8lAMpAMJANPw0AW+0/jigSSDCQDyUAykAwkA8lAMpAM7MtAFvv78pnSkoFkIBlIBpKBZCAZSAaSgadhIIv9p3FFAkkGkoFkIBlIBpKBZCAZSAb2ZSCL/Qt89m92hbeDurfnXhB1sbvXNZnCbxydvGF1MvV5uu7H5eLfdAtGM8+Tt87CUG3uxTe++VbeSlw/j59/VN/KFr0hd8u8leLvOsy8HRlfqOY48vbpW4HX2o5vdNY3JItxKs/3wbxr4yWMv4k8tBm5EJDB55C/Zb6uxvYGSuopnTPg/Fp7QdXYjsW8FftgeAEuef1434ECaM7sQBzR28+532AAwcMm4fTYAPsgrginj/uRCsRtOdIZsbwIm84ZtUY83K5jFrfA2Wp/z+QtC/IW+XtkP52PbL1e3hxf0f8IH80srTytiXvMYbC3rY3jKYpL+aVbY1NpP7Izi/2J29vihUCncxq8ZbHq8UTYha5I13TKXsXnVMl9O+/F5SXUzPVgkx3OvQPfm3A4gNGm44Y85yEl7+YDXEfUhuKIk7weW87WbYTEUdvMnR+NPNOHmMrmPiqkOoLbpocX4zN5pZApGO24TracGPJXNmfNSVbe2F4RHHwSLy0HItY69mp7QcfEjmVBXdaO2cU8SLfNmR3oe7ZHY46EMG9UwDQerOj4qOA/mELE2kGx6WXyucPKYn/KX0EVy4uwxVbg2REPe+gg2aO43eLvqbwL/kabfTu0dYO8KT5W+ggfeevs8cjfdtTgCDkZDFl1ekV+WUhX209WSf1xg7LYH7icF/Sv8/L1eeySMU7hxXBjkK3VVRLeYTkcDsvp47QcDlBM8MIqfRr0kizqeb7a1jmStEheS7K8sE7LiTahenXeCiXc9LjPbojICy++Ol/xmBHdwZBLt5B5XN10yYbjx2k5el3Vji/R0vg5LqePIySGsvmKrQZrSzJr+XZ3Is0mL0D0M7I39AlNafjJL8WHPL/aXfw3sqUWGL+OJXZ+KwZp9bJKT4ynyDt9nhvvpF/H2hhrsSXKgk/0qe2u2PmpB7UnMWcnDo5oTagMjp/2RKWslxLvdlzhX+0aCK9+Oi7n38QNjp/Ic7EaxcVQX+0Y81cL1ZqjxvZe0qD9RgbH5ZX2qqiuZeyY8eJyQidoxQm0w+itT0Ykbmkc5YVLe4FRWXPH6bfzO1/AaPzZuCqxTsU/YjNyLxxEdnTyhtjmwmMeBpg36kAEbIvsrTv4G+VFPIm/EYNtD2yVfbFdCJZxl+U56VRriL0b+bvKR1Z9dxTL6oYNThQOsHawAyuXdf8ajhvlF+JHuLKC82jAwFsX+//999/yzz//8H/U3vLPJoVeAi+Itsi1n+dJAdo+Icnr0Naa6yqFiCQQ1ivFBCcGka0JqQmuDZTPbVksLbFosWr0yDheeFrAjGwvFyWX8Xh8Q3ku0aMdPQ/1Li7bVLGiffIVh2qT1Vk41mJPL4J6PZF9LsER7iA2xG7jA9kwhGuD2RYMNA/9g23Vh7aMY4KxGH5VF+MzMSZ3yKs8wcpxIVyVPsEkts4/Z3PQDmqfljNtiuYiZy7d9DJWieGiVzcawIHxQwL4WHxuJMYH4XzR6+QZ/uXCDsbGGuAs4Iaz0tQYn9grgy9+oj9g8DX2wjTbdHZMeCmxqTckros30mrtII5QhnKmCFnnZD3rSGyRHoibkCfor1M9HpQ4bjv+YGAsz2GD8bPmiIc9dZB+9MHt/rbyPFbUNbNd+qL5l+JH5o4+YwyP8NEIUTk/8vd0ll+7brCxlXNyvwbMFL9uaM7HeTn/khxwYb4R9jMP3rbYp+L+r7/+Wv7++2/+j9pbCv5poHOQXrMpz4NsqssHOxzzPCm8SEWw0OyYflNoyYvlwsIJZIkVI7xWV4xHZLTPGZcOA+o1SaMW8ly8AT8dH05ewzCaTwNA3tg+X0yp5Khl5Ux84u8GgrDmNzhXmoill43DRzL8eeXayQNuSK6fh7q6Nvtdn1b4ftXJkpcTFflScPlY9ZPNccFsnmK5gm/BO7o+Hq/SZeOFYUzk2Ti4sti/wJ+9c2wLXGOv4So+YJzEP+YaGepiwOoVPiCvyDz5DOyY8WLignXLhagIHH/2drh4doWmSOJ5Enty8uKnK9YmcYCirlpDNDHg77I8hw0nTNojHmLM23T4+LnF32yK4X2dvycUuBx3uzxvr+rext91PlJtUWskKxpbzhU+9CaKH0k24Xq9NF7yB9RaNd5FB2OM8pJX/YOP37bYpzv6VOjLP2rTuWv/DQOdg22yeV2rSO7sjjYTk6xs8DPG9vRArnT9woBjubvt55DuaNNui6gsSrmryp8B3ot4PDeXuKT+hsH+wZDdYCBpgB1dIkB5tVBAmziBbOa7JLIiDzn3RtuvV0jhhTgMv4gTuDD245jq25IMgZcORunDu1JlSH9e14KTB1zTXIOp0zc44fmWggtsLXdjcc31GAfS4TTOcXZgse9seu47+1LsuXhjTnu+ZIOUmOt9D3QFzW490ZiQL8DD/YglEEynMA6ojf7HPjfdFIOub3SIdviYjeRp/I8kRuddsRby1PPi8USSw3MDjmJ5DlsosD854mE3HWxDzwkiifyD/aYdyPNYr5IX5Lib5AX4FP8jfKTaotbI39FYPudjvBuIe6TUK/bJWjfFyyTOMDeszS+d4J9zIov9C76OAp3PydcbBvPLGA3kUsTNE1ikq4n3wQ7HV81jgb7IaVr6TRsWFeuBBTbSOzoPWlqTx17gkgsA0IuJ2SZZsAv48fP1GAs/gjSYT10gb7V9wF0zGBosp9kFumFM1ET9av/EFrQrEKgybKc/T8flrrrDCtyQBD/PSh0csQxZH0V+u4Pfpji9WJy3MZcbaofHWjahUgy7TZY3ZCheL6lxnHQXKijPjbVxcUlR7Tf8yR+U9nitb9DelXpoGGKXac6Gqb0yJ/pEO5zMGS/WrkhwcA7swDUlecBfBNkxgbzwlIsj/5QOMOD0TfaQAOQPBMbyPDaYMGmOeNhDB8u+tB9ckWNG8qwNPndOjK9d3tat8kb4FMEjfKTaopa1LRphz10ev8EmlwtwP2btg7i3yH720dsW+3f7Gs8gOe8RRvNFYhMSJRv5Q02f4LmPisjJAmBdrdCkxVevrP2iInvruGhOX5D1G07D40layyWPk0KwYBW9NulCMWjsKHPwLnexyXJaihh5vGj7VvEdbeTBkw+hwfLp7/SPfULzpBBR+y3eyBa9oysIyqfFoVzxedl4mU/LTZNnuPYFtNXVjiCu6BxiYK4HvPE46TNx0SS7BsQE96h9Xq8tYi2fM0xOYTl0nPgC0spDTFZvKJtOTvizdlgJyPNsHM4ynMsTF/GBDLzKXpl0wQ7zNSvkxfmUdUt+ANmuObWDY6leHA3k+flO/OCQfIvY0I6yViSXoQBd13g2aM/iAIbH8jw2mDBpjni4WQf6wOjf5u9pfKOugb8NBHfQ2bpFHs5x8vXwET5SbVFr5O9oLJ3ruAkG0pgW98y/7C3BYDrFY/DmhY0JltfqmYGMH376bYt98isV/Hv/gS4Hlf/6y05BdnFR1UVBTwmOn2f+Y8X+12bgIoCTyfjpgrFFNm+/qEhGs68UJeUpxWn5Mn1uJRnduEh1nNEvnDZdOo5aOrb+gWbFS+el8JWCiovQyA75agv9iozoQZz+VzCu5ZuAojwplK0p7Yj9LTjqWbUTkqEUwwFHLEMu1FC3scUmxgYAGqhX+UTeBzyTDMc1+gTboI6bgr3FE5/FGNPYHWFqFxz+QssoszJRFg1T27Eo457yNwLEO/rJ2WtUyUE4BnCgPJoDsdY2wQsbZ8wf2qP8If7YXsLm7RdjnEyPveH363xgr+NmZAdrH/Aif2BbYgcLhe12IA6NK+WA+yVPDm3W8aUV4RnwAlP9uul0w1jE3W4AQT81vbzS3WOLx1lhIyzx3PU6aL74s322WAPOzPe9R7a5mJ3kTtKF/o7tsBxEY9APa+TN7RV96/mTGfS51keRHSgnlOXWrx1/ea8p48s48bPPyVam5MdJfrmw13byfuCJty72f6A/0+Rk4Mcy8OfztJz/fYz5Xx9+47mT3n/Py6n9NOiddFSx78Lfu9hhvf21nPBCw3bud/SIeNtbR8rb7v+N3D0s/223LGc6BrLYd4TkYTKQDLwiA3+W88d5uf4dxFtsfVDhxXfoHnUB8y78vYsdLi5/n8zdZ9e72+EjLpT21pHytrt/G3ePy3/bLcuZnoEs9j0jeZwMJAPJQDKQDCQDyUAykAy8CQNZ7L+JI9OMZCAZSAaSgWQgGUgGkoFkwDOQxb5nJI+TgWQgGUgGkoFkIBlIBpKBN2Egi/03cWSakQwkA8lAMpAMJAPJQDKQDHgGstj3jORxMpAMJAPJQDKQDCQDyUAy8CYMZLH/Jo5MM5KBZCAZSAaSgWQgGUgGkgHPQBb7npE8TgYezEB5ucqDfrf9wbalumQgGUgGNjPAL3Aav+xts9ycmAz8MAZWF/v2bW+uMBm+5XDEpn0bnrxFDd/yOJr5XOcHdjzi5SfPRcQuaNa8zY/fUFvfqjh6S+AuYDoh5Y1/F9/0Z+b1bz803XRAb739dV6+Po/6+vBu0I4npm8/3FHPDqKujYctKjmG2ls6t0i455y1b6O8Dwab87PgIpafNl5uWtffG2ez6KUYxLfR9mMxL+9rx6r80wN6wJlSd8x5eQCMoYp9/TBU89IdK2qDne1bV+xTQQIFLG8C7RgDDxfeDCnOmY179r7Ijgz0rV67Nrm+RbG/layt824qCrYqfd55T1u8MWXflUuKXsz5S72h87wFxmNi7Gnj5aZ1/V1xtofPCvbrbsKs03vtfrRO6h6jorpjD7l7yXjleNqLg0tyyIePvYHy/5iY56jOMmdhAAAAAElFTkSuQmCC) ###Code var = input("Digite um numero, obs menor que 1000 ") if len(var) != 4: if len(var) == 3: cent = var[0] dez = var[1] unid = var [2] print("O valor do seu numero em centena é {0} em dezenas {1} e em unidades é {2}".format(cent,dez,unid)) elif len(var) == 2: dez = var[0] unid = var [1] print("O valor do seu numero em dezenas é {0} e em unidades é {1}".format(dez,unid)) else: unid = var [0] print("O valor do seu numero em unidades é {0}".format(unid)) else: print("Você digitou um numero maior que 1000") ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA1YAAABVCAYAAABKIHVYAAAgAElEQVR4Ae1dC27jug59e+q6mq0EXcPbQLqSQZcxwOBtwQ/8H9KS4+bXtOUA98aWKfLwkJIoO43/s/S/ixn43//+t/z3v//l/+j4kn//3g/Ly9uHd/14e1leXsp/r6fln0tcflBtrTT9PS0HtX14Py3Hl+PiyP4cAZe2pzbDfFhOf0Vz8sV8ZBtFr/v3sRzd9+PyQfr9WkGbbIM+EEv2Te9EX8gelxPEhNoP78b+v+X0+rIc/yzLMvJDbRzfT8vB7CDOt48l6fss3+Qb6sP4gN92yPE2HNoYfr6kvGPZAUfWzhyg7eQL8GLGyyfaDT4X5sPyPdqLvsI1cojHxeRi2EW/5QjmmOUsxjhj4lizYuoXuZ1tZZ3hh0iF77U/9MM4FX+zLT0bykz0URfItTrfVLxmb8xf5sdih+N07O8Wf0UncmFgPuNvkZ35waonvCwLcPmiY547XO4H4oi8MgcXyVebJ6m5+BGSeDTCA9hHXC7CN8adsaFtMIG4XyZzzngcrrGN5cDYMuBBL4/77rcReRnjPvIWOEvxXnNlaLf1qR+2JtCaof/GfthV+RzJYBwwf0aypOUcPrW0mtdm+hDhLF9q33qOOux4pWsz78vaYEpWnyJn8xPm+kqUGoY2MSds/Rj27sZmIDHwn3TWJ81AM9AMNANDBv69H/2mwVDgho0fbw9ayP+elqPfOLihAwNVP4W/n+JHDtHHcpxsrLLclWePyLdb22h9lwf9Qu4eNv9d7ln3bAamDPTGakpNX2gGmoFmwBj4t5zebvPk2DTOPx9U5PLTgUdtFn8Kfz/Fj5J9f47yJL403/r0EZvSW9tofZdnwWXcPW7+u9yz7tkMzBnojdWcm77SDDQDzUAz0Aw0A81AM9AMNAPNwC4GemO1i6YWagaagWagGWgGmoFmoBloBpqBZmDOQG+s5tz0lWagGWgGmoFmoBloBpqBZqAZaAZ2MdAbq100tVAz0Aw0A81AM9AMNAPNQDPQDDQDcwZ6YzXnpq80A81AM9AMNAPNQDPQDDQDzUAzsIuB3ljtoqmFmoFmoBloBpqBZqAZaAaagWagGZgz0BurOTd9pRn4EQzIiyIf9F6kH8FYO9EMNAO/ggF+MWx9cfev8LydbAaagTsx8DQbKyn+7I3opQjkyU+v7XqJIb4x23S+LPGW9TuxeXO1Ez92cXBzMN9e4Z63wC9/jp4nqzfC35UBeVP82TfEJwyUH2eKAvXn4/2wvDwib4ZvsE+gn+bks/lwCXDOoddHvf/qswgl545/PtvvNvJ5zj+Tx7cx+fRanjZfrhrXX5tnW0GnHNzOf5yXb+vHrvlnC/zdrkndsc3L3YzvUHzbOOww+A1FdtQG39Cr7wL5OTZWVPxB0ccLrp/jIMdJboti7LMl9+zXRn70pHJp1D67kP2IjdWlZF3a76oC7FKjz9vvaQtlpuyr5hKxi3P+ojfPnreYe0yOPW2+XDWuvyrPbhEzwf65G1777H52Pdqn9RZSo7rjFnpvpeM759OtODinh2LYN6vOsXSv68+xsare0UbrRZ9alQl938JzbmKQ6y8vo6dgeA2enDGmePo1LAAKVnz6YcenN9NxXD60mCAc44l77IdPyNz/sBxeSSfyBTaAW+aOfT4sxzd7gqELx+thIRziF3LwEpte9e9ETz9Uz+kvyPpmeFnYX+PX9QIYOhzqC5nAS/7EJMH+K17hTXzweE6eEDhvbnvAkz7h+YfxJr+sXeExNvX3Ujykivoy7tfjcnyFPIDc8NgGNcuyoM/CzRoH6C855nYpRhg3sMH63o7LweKIvA7xQS5YPiKPKQ8QP/gN9m+XH8uCuWRjjf17/6cWETtwgnFPPkc+ImQ+dp91nAFvyLvhqP0zLuFJxqUev588JqQjdMJ8lZRmruPJPehGPym74Akn45nlQcpDmz+S8fVJseUCs3a1cSx+cz/ucxA+jGdqG+UsYtXxhrwe9syBinHPPI45h/OX+2sHjndnvigGy1wbJx+sL8d6uE6p3cgbjNtkHOhcLTb2jCfILYufPRl1f2XuM4ycZ2leN4L0c9IvSSnOXWtUGs957Dg3aV5Gn3aucQRugpv99fknz9XGybJsx+No60eaW21ttbUlnpjPcxLy5u24HFHfBP953kECddg41bzI4w762DxkY9lrAIhDycu0TpNNjV/UK6Yf/EVf7bJ/7pFTPKP5ifHtrdGyrbQ+DPmTnDms5mbUY+sUtsG3t1b4KN+sj5PQB59k4Ck3Vjz4bfDx4IiJQSapPAmufZbJKCYnlJAE86TVxDr9JRm5Zv28uEgyuqgYPlTNcoANsevAEN2W5CrL16Cf6xz5AW1sb70omm+8OBjO5IPokIJasXhxvcGP2kv6bRCifjy2ydFwuG8w+avtVdytOLcNCGxkojjUBcnxAz9oS3UI9iyTeIKYefxJD7TTKV5L/QtWWxgtpxAS6zAfE7cb+FABL7oxCVYciU/VzziSLxTv0IHqWV/BJ34IvpQHFl+2o7m8kQfIn3A0yH/FbBu/5E8ZM4x1kh85doRd/KU+4sNGzjtXIuNxpHbPOWANebZCXrlJ+JMc9E95Su1oV44995mDiQ+gErnJ+Qi63U/piPGZ54GOPYs94xnnEsDhjaDlDrbLBmHUX/1Ouahy1WbiVftpnBL/yp3EM8sZ546RdaI9m3MLLtY5ntNzDMDrEd6z+RI5TJpWsbK8rNyAWewTGzPxZ+g34wTfLOYwB8V4YlTL6TXzxFwjl4ZddTFHrhfA0uFGvySpfJoPkrsau6RjPscyNynX8hg758caT+Q05iDydVk8DFcZhxATwuJ2MD/LOpW4x7GRODtX+8QNKfRTYpdzYVx7JOZ0QxrrAmPk3JY8jTiETJrr0Q+bi3VsrP2NGCGKsGkb5JFcmQeQM81HmWc4GmnTijhSDnAM1S/UN/Kj5KrYynNE8oN1azxW+ND7Pr6UgefbWKUkGgzkMjmMHZdJ0+9a8h0PGHypEyQg257JQSfCMFoAan+UK7gp0W3yj4UNbPDhyI/aDwZ6tQ9c5kGLi3FZTCsEnKRBH4llncBj1YE84LWiT4o+8Adk0VaeJECID2HSLZec8xKLxD9gRZtpwi6+X41Hce7Cl3zKnGcc67gm/aP8TbrrhhV43cizxGXRhxwmbqucnd8oP9xv06ufs/aUh54P4H/Rk05dXlv9fCMeSQEUQtyOdouOEoe5P2hgrY8XYscpshgf0hubyEl/wOrzGpqF4znOnM/Rpfi9USxijnF/8osLD8RNV/B8rT9s0xHgcn0ikXwp8UAdyCe2D/Hy2FxjClt4Df0gnFbAkhW8hlZn7ShDx+A3+BY4snxuRxt4nPug/9Q/8qzI1dOSr365zBmZd/CnxBHnrewHjsd9fjiW0QHgDjsbepMOwF/8RB7TceqfT4Kbar+eQz/AD62rjW/KndrHuRc75+YLszPEC3nJcmjL7YiGYX++NMPxufGEfnhsR3GyjRDZBvyBT/Da/7kd12vwMY8ZjBvkiinyT5Cr+FymD65h4Lk2VhxkXBjWTwp40sDEHHpfB0QR4gFnXwOjTy3my0DEXpzA/kgaHqWiEAwSboYBUCc7H3gkWPu5zjN+1H4r/DG48gDETREMMrM746fYyxNB2CI1F/GFC7ndmUHO4W4TTmLCH8az5JD65Zyv/Cs5oJNY8g9jOdhYfR7PejLfhc9ixJ9rzgOH6M83F6JwYd+U2+iTlMedTm6GPNnIs5rLW3mA1+KOHmAo+ZYW6935sebZLDjf1LDKCZgTfFGT8Sicjm/AzBfB7XgYJvpMuFJxDDEgwcJP7gcaWW40PkDf7vyGPjxe81ibYgA4c5mcz9EFbUqr6xjh9nghRxK7yDPUicdqdVc+lFileIjONP7sSVI4JjenEK/7M+hP41V1+NyUbGJ+RrzX47tyAYBmfrsdwbXWWbjYyFuKXeJF/feYAhw8nPVDmTomnCcWgvxa+WlrwNq/wCXXLId24dlYC0PvNfFQ7z1vBjejnaBBTnE+VfsX+On5YcaC69Wc6LLZjvWMzxle6Oe6tBfwUGuuyIXqb81dQyByKVfLV+pFEvBoV4/tCB/WNHwcN5MxpyzPGPeqj6w/boftIo7gny8xjjzuWH/Fp/j74zoGnmdjxRNdJJi7VQK/GqQuiAfrgRNX6zVIwGLL+zA2KKRw8LoQLuLSmLCWPmlAzOwOChc0VxeR8blwGpMKYONJFQcjXdvPT9YJPH6KL4g58wB4oeBAW4k7XcBjoa/+BGPer8QiJPJGHm3WSZp0WZHjelmR2P8UHgXgerbwJbDA+WZBnjqVkx18cQ+Qq/kKcUs5uDcP0oYa4KFeaoZzjs2u/Jgtmti+kfOzWOxtdzngD1wcHXoe8EXEVnSUOOR+pln6jPMR9DlO6Ue6tvJbFn3oz92qLcNQPostvzprn4xx9qn2GZ3vfGJlhczmHFj0J84hHlv56f7SQdEX55Xb1Cs21dTfN2x5Pig94HSmG3ONxEEf+JZ8Bq25HXWBPcKLN0bB/9wfFNPhRr8kCTipPc3h6A/YTf1X8yjOE/v8SPo2cIe/oDd1Rg7pwjge3AX9wWPQN8/Jah/ON/CD6jQ3czvHQdf2isd1gp2kTE524R3F29aFYjdyodqV85gjDQzwbU3Dz9ofzgu+GN9DRdAYtgM3XNbDyCFqQL+iv7WHfyBX8a1NdMsFDDzHxgoH4coJShC7KwoJu5LDBuyD7XScr3HS2hOr0TUapD4RUH/BMP8qYMbqcmWQpwExTe6MtXqSili+KPI2gMiG208ci5wsyDDIQIcVGYmfgjMPeNIJE6kvnuf5MrysTydFPLaYWQGRuKtFF8fKYpAZi36ZV/FRN84Qp+Qf67VNIPKHCy/ZKzm6gScVC8ytfc1zA19yCTgfFAQjDonr5BfjNb+S8o0nJ4LP4rbOM+BykgcRC7JJ+uDGhcFInOQ7sSPfxvmR+1kuUX4HhhHfkMuck5lrjp0XtAbYfLH8y7k/wmwcJg04blP+lLFaxmP4g9q28hH0fTK/bX5IsU86EEM9Vl6QP4216c09VN4KJpat8dEeSU+2k/jf4nW0DuDcZjjqmIN4JFuqz/Iz+4a5l/0c6Yh8UVlfG0Urx8N4TVwUq7BxjhsWiEXHjfl9xjcbT77ezPjldhvr2d9x/irujX7JM8BJ7cyh8cFxsLlu5Otg3lIOhXfByzm6F8+GHPq7jhvhHGFU/MXPVLDzNfNT5jnKvVE+WU6ma4xZ57AN/Gvebf0qc65yKGNbY84xAT6TMjlJmNIYgn4j3TY+Cbsdl1xgvu0a+xh8IZR1XGxuRyn1yfQh/3xs+U59RjGV65gPaU1EfTrnmF+5D/CScl3afe7A+K7woV99fCkDT7Gx4gE0edTJjnHw9TGmT5JYGFX3c/LWq2jv8P4Bf2Rrd8XtkakNCB049tj2D/0ql13L2kP3YTm9w8AugzwNiGlyb/ux3ljN8AvGwHZc+FeTJpNbyNFECfwUnCzn8SCsNjnt5Ev1xS8bIafiu3/tCvhL3JFrOlGw7NsHFMw5Nqkf5pTjLneQTa/6SP0NT/A3yEPrR/mygYfQVZ0++c3wJZeMZ+E9+adyoT+eQNjmz77i4DaT7uqX2PLCN+EbxY3aDB/xdlhOadxgfEeLleXycbk6PxLPsfgjX9Och7xLeTYZ/0whxJ9/yc4W3ILDippCO0nxzSTJ5xPMT6MYBPfoT9IJeHI+Zn2RKzg/nMmDFGOIo45t+xW5hEdPwp7mB/+A0EhScb7Fr/15HmJ8rCv6C9ynvH/b4NWKMF2T0hxY7CXOk88QQ8qV0s+g8ifg/Uy+SM5G/EUnjrnI9WSPT7Kc8TkdB8k3nLfQBvg85Rft5jkhcbkCPO+XRAvO+Rpl84vN6bZ2ibbITRkLq41VyvvsR8KzIZf9Rf9iHO2NR9pYEQDIKSvC07yyykmw/3pa6Fcv00boXO2jvI/n6hkesWm5l3mjM8inhDf3C462a66cC+BvuTmRcWS58VqpeEbzU8lH1s1to7xDf41/RYPxhLVnlEM5bprX2B9rkhU+wpDHQuajz/Yw8BQbqz1AHyrz97Qc4WdQH2r7IcZkIhhPEg8BIEZWg/qBttvU8zPQ+fH8MXoIQi1c7Oe6b2GTc6sLiFtQ2TqelIE/xzMvP74h7l89V99hfrphaFrV4xnojdWE83x3YyL0nZrxjoU+Sfly+L96Mv5y9p8fQOfH88foIQhvU7jEUwi5Wzy/U/4Qp9pIM3B3BvITjTua+9Vz9W3mpztGp1U/mIHeWK0Il0FCX8PphXdFTjc0A81AM9AMNAPNwFMzYF8r66eyTx2mBvcjGeiN1Y8MazvVDDQDzUAz0Aw0A81AM9AMNAOPZKA3Vo9ku201A81AM9AMNAPNQDPQDDQDzcCPZKA3Vj8yrO1UM9AMNAPNQDPQDDQDzUAz0Aw8koHeWD2S7bbVDDQDzUAz0Aw0A81AM9AMNAM/koHeWP3IsLZTzUAz0Aw0A81AM9AMNAPNQDPwSAZ6Y/VItttWM/CTGOCf2O1fnfpJIW1fmoHKAP9Mvb8Ivl7t82agGWgGmgFkoDdWyMYXHPOi9Xpa/n2B7ac2SUU78zJ6E7j+JP6TLvb8DrQzMSUZfkEzvV/sjOyzxolyd/uVBBIneRH1Ze/64PFB7137So6e4h0tl/G3K3eu9I/znWKU/usN9y7uN4TSuxS/ap7weXgDKL8j8bh8kMgtcV6ZlxuIr760Z46/2kgraAaagW/JQG+svjJsvGgdl+NrFyGrMPiCThsrXbRNiBbvt+O63a5/8eeeRZdkfGP1pBvE62nEjdUl2qh/j41LmPtUnxsXsP2E41PsT4XTxmoq9QQXcGN1Szg3zstbQtszx9/SXutqBpqB78NAb6xuECueZP1u7f5C0IrrzQVU7wCe3uyO8HH54AVHzuVpgDrBC5zK+R1+vdP9dow7ylDI+xMBwg/tfOfRfcpPJsLfw3J8O0A/sWV3rv1phi6Qp/eDYiCO7AWGxa7H42M5Mp5/y+ktP9GzJyX0ufL/9bQkrlifFvivYl9wjbFWnSk2yG99gbRfU04K/ytO/hzlac/f03J8P/+8chonyIWXugF1LvVO8iSeLvaZOG3YdayvdNPAYqR5+EetOV+Sr54rDmYUn1HbsiyM5bAcXklX2YQvavf9tBzUf8oZx5jkISdxPCgvfEee8CH2EudDyjF3Rg42+oX/oye01H3O35HGtuEgG3ZMvWjM+bje6R/CRsw151HOjitX1j75jDjgHLMV5+PyuXmEDE/8TpjEZo7fBAdp9PkY5i/1PeMDI8glxIgkYk7V8YKyFD+L64pfzJeMF/MAUDD2w/tpOdp88PYB9vP6hX6muZZx6NjltWX0xGofHsttmyMdd/UVbLpM4e7lJfAT9sPb0cc99sk5AXMG8l7z3a9tz/FhB2ODEejjZqAZ+MkM9Mbq2ujyZBsTMy9EXshsKadFxxYBmoBDR+qlk3neDMAiZv140TF9uqDZxoQKTlvIWZ/K0bG1c+Gm7UmXLvoml65pwaL+Jt/Rji6ItjDLYj22lXwfngBXpNdwkaxylewgBxAXxmB9EeuKEy34kt8jTqwwVO5V99TO0LdJ4wqTxVn4t6I82UJVW9hXcrYRsuJxFKe5XSkQNT9T3IUXxroXT9lMJP8wZmrHeECXvGhLsTYfBZPkCx7HZu30146zT2k8TnIs4UgYNUegX2Anbi2+qAH4041CwoD+2bEVnWDHxoZtRlf+ocndcYpOFCO3Ec3Do7TpY1vC8bk4m36WM64K1jC4EdcQIqaWE82TdY6wc8y32XjUGJsO9s9ikfAVW0lfxD/x4zKYB/mrd8yH4U05khzVTWHOZ8OMOtb4bY6T8Z/joPocp84fe/GM5CAnZCMU44Jwsn2yZ2ugbXhVl+RH9hPHjI055znF6JNz/Ah/pr3PmoFm4Jcw0BurGwfaJ+lzemkSt0VXFwSb6FPXwcJhC5oUR7JwpEWQFPgCVwoLXnB1gXKZZHF9AnLVvzjPxbYVKuzTaMFKC1EsmGvjpYWweF/yDfrSNVhkg5/KQSlOtKhiXhErH+vCXGAEv8i1CjlfG3aqvq1z11eEpv4WuXo604e+p6KcFETBJxtY4AV48oJHbcZ55QJAzfCkjVXtL+ermIFaOQQ5agCsdBr4akfwF/tUrB6DYqeqO9Mvxj7YTTrA/5Eum0vKtRifSVmOJ/pXxfC86MZLfLxXDwuDP0lRbQdeWX+M9+zbjLekPPudLoEdbid9tpGgBsA146HgwzHDWC1GpA50zHIw+VfkbQ6c9U14k5+jDc+I08oHjJUaZzwHnNks8JcvlDOQ26U3d0fOiBvjKfGBenP3fIa+4DFJ1XPvCfi9rQ+agWbgNzHQG6uroy0TqX+NAb9CtKGbFwD7KoZ9+oYBOpYJPC2ksEAM9fEmo070uQDBfr5h04Iz+aRFQV6s9K4e45ZCJPV50buKgJM8w8UPiw/wenrI9o0v/XTchasookccYNEERQMUUBmnFiRoWzlhuWHRVIsztDN1cXhhGCfyF/HwcRRJqGjFG+I1wb1xmtoVnj0eaeOSY7ALD8RC8mQSs4Lb3JHPbDdyQq4SDse78ku5BP2rWPu1YieDkJxHzqf98vgMNaF/hQHzHo/rWNvhX9gLflKOoQ9FmHGN5rAip5rLxsWENsaMcyay2d6MNy2C0zgZjZHgF/El320+M15Vp+dPwYdzG2NNGOxrq2LXdRgNZsP4xLj6cfGZ7cvX8wx3bNhDccp5vGGSbAou0+OfhIfzaHxjJW04duKRMTnADXyuct7dGeBUzrKfEN+K33VdMsfbzZoBftDbh81AM/B7GOiN1ZWxrhN+XuxnytfFAy7CqZcvotKaFou68NginBTAgsLtZTF2WZCrCw9gqP7F+UyvLTyxEEcfMr7Rz7HpAftbiiLgYLXgux/gG6san1txY/iIay9MXJdiAU5SMUGX/dq2HdX0yQ/Q6XbOqNjCjl2Ry1RkkRDEacNuys/ZxmovnrSxAr8Zs5xzzApudCndqaYLRTbwkn8Qb/QX+1Tf3ZeKL6OInND2WT+2VXKcu4D+EQbb8JRr5J/ctd/pH8J2jIDZ7KBcxbe6NmoAf9Ll2i7nozjbOJXukJ9J34bfSa7anelLnWiU8FcIR0/mJdcklhlr1hE5mNtTnxRXxfYHv/UAPLEawJXVlqe02c+wOe9fx1A6d5x78WzIbY079YnxQk4G/noTC/xBvcjNVr67X9rBzzfwo+4+bgaagV/DQG+srgx1nthlEY+vH0yU1wmcxeoErX19ApfztAjjAlEKMi6oeMGBBYVVxEKKi5AUCFrQJXzS3/8WK9nJ/kYRZwWsFqqIc6tgV5dnH5lrkxIMXNgw7iiOA0/lQO9M2oLM/aCYZbx2R1ntbHHCRbjZzXxFHOzOOdgxF858TuOU7OqTQPwqpOndxG5CFrM9G2Dg3OJpdtGW8igbVogBymhh6vkFcFLRqps0l2MdymXJr6QCC1+6UGQpPoJv5NNAv/okG26NNd/QAP8yADk708/mDI61/d1Q0oP6EatiGOayyI02VslO4cTN7o4T9SBbn8vtGJ8WF+k/HTMFZx4XM/vIlY2REU7kVxhY45Mxnu1SP8wTyye1ZXFh7GEXfWR9JgdjOtmhWLhMPFmR3CW8gt/POXY2J3lE+YBsu1yJG9oc4ZJ+2Rb7guOfcWYZuek1wrMhh/Ee8Wc/uuG8YL5vbKyAY2aOfuCFdGzmO+aRYJZ4bODPtPdZM9AM/BIGemN1daB1MueveRyXD1gA8wIWhnghGjxd4oXMFigTB33UlHTiwkMXdTGVr21YgSwTvz95SQupLhD1Ky1W7HL7YTnRnVHAJThl48G/gOW+TPQVnLh47y/IygJm/GDBzVzJL9ExB77gVg6oc8Ya/MA19wvaJpwg90f6Bbpi275Kk+2AEzW26VLGGkWRFaT2NZQo3FL35Os6ni77mTix7NiuFFqRH1iMpQ3JjEsHVOOWeXAuC27vzgdFR5HF8RR5TYXnR3kSYeOpjLMSZ8eUQcgZjk/vl2Mov9Y2imPxw3UdltP7uOimvzfE8bnbP8eOfG/kDckXXl3FrJ0FUD8W3ZP2omvvPDL120HSQeF3gC/GXcbn7Yrv6L8YCDlD+jxmMjb8VyZtDqvzsMnTPETHmDN8reSJyZOet4+8VoCvmPN1/s2cxgaO5y+cD9lXGf+Ss+or4tyJJ/GCuEu8k5xzsXf9LfEF/PF3uRjXQb6DP2mOh/bMO2ErMYI49GEz0Az8TAZ6Y/Uz4/ogr2Qh8sLiQVanZnBRnwo98YWdP73+xB40tC9ggDeyWPR+AYYtkx9vZYOxJfydr9WNwHf25Tdgf8B8++/9uPAvb/4GPtvHZqAZYAZ6Y9WJ8DkG8O6c3hn9nII7Sn/zjVUvwnfMjZ+kOt1th1cpPKWP9j66pwR3W1C9sbotnw/QVp/Q3dbk+h2Mt9Xf2pqBZuAZGeiN1TNGpTE1A81AM9AMNAPNwJ0YiK/9bX59907WW20z0Az8XAZ6Y/VzY9ueNQPNQDPQDDQDzUAz0Aw0A83AgxjojdWDiG4zzUAz0Aw0A81AM9AMNAPNQDPwcxnojdXPjW171gw0A81AM9AMNAPNQDPQDDQDD2KgN1YPIrrNNAPNQDPQDDQDzUAz0Aw0A83Az2WgN1Y/N7btWTPQDDQDzUAz0Aw0A81AM9AMPIiB3lg9iOg20wz8Lgae7B1nv4v89rYZuJiBZ38v2sWOdcdmoBloBh7AQG+sriX5yncn8SLmb5G/FswP6k/vhGFe4O319f099B6tZ3uX1rkQ8HvAzrwwdeT7Ob3Pdp38PPfSWuCCxsGnXzTN/SkHDj/7JZzA0zOEmd/9Y2PPP394DB5AfHqnEsX8K9YFn3s2HMZ8vCVOnt/PzI0bsO55iWPzFfG4p1Otu8ZmJVYAABh4SURBVBloBu7CQG+srqX1moWFF7HjcnztomQVBl/gaWOliy0vvJUruv6yfJt3kWBRsnJaG0a+z2S/c/seLjb8o2Ln05uxDX1Pe+lKnu7tVz/huA3DaWN1G5X30XKvfOT5vTdW9wlaa20GmoFHMdAbq2uZpkXm9bSc3vTpiW0Cdui1wnBzQR3p5wVI7KXCkhc8xeF31+QrWce3ozzdKU94uCiyu874hAF1lY0L4+U+h+X4doAnE/HSRXqS5JsdXTBP7wfFQJsj2RDNnzh9LEfGA2+vZz11Y7Us+WnHZzBokJjjw3Ign4w39B/b7Ji6pkIA/Ckci5zEReIAxcPIzjLwfSufUEfKvwkXK10b2F1W8+j9JDy9yBOmyB/wadmwC7mLXOQYnscTOfgS+Yc8eJwEy+FVcs9z0vyiPnX8AsZrxxfG/gVjw3ZLzhkm+gQMyBOLDP3EznIcHB2W07s9AdE4/jF54hrGFNhNeE28frI8xr4K5PPIF5gfZvmiuj83b5C98/mzqM2cF/O8Rdz+JHaID/zdiFPERp/UoizNe5wfp+Xfil+MV8brcxdAYDb4afCJb0DZfBv2IfbEnK9j5Qky4xjMYYaTbe7DY9wzFpxzq69gE30L7IQn8PMc8nb0+Qn75JyAfEXecc0if/yarnNlTlnhV/7SnFFi0afNQDPw8xnojdW1MdbJ1yZTXphwgzLVT4uQLQq0WMJkj31UvxSEtnCpLF/TY16ETJ/K2cbktW4YVI76p8VC25OuZeGFzOTSNS1g1N/kO2NDfbFQy+KN1/QY/R4dJ9smIBisYD6HwYqi5BNiJbVsxwo/5BKLGuUFOLYckP7mk+Cza+I7xmxkx3zb87nGlGxZLlYfXbX4Z30ydhdavBiyPGB9FtOsYxoDLXjNFnJBx9Kedc3xIP9bMcP4oT96rH7cZXyN/E38WY5UXDfImTRWlAO2Lcc2XqTgNBxi165xfAxvhajnEbeJADTzmLN8ZHwyDpIdzFMdhzlfFGvyD4zYhun9nzSekbP5gIQZh+FDHHTsPBB/iCE29mlOSXZLDiZ9MX4TPy5T4uXtBa/mmsUOGWG/bH1RTs1v9HmN3+amjXy8FI/xjLgZm86N3G55CTfPyJ75UmI28hPHtXHjPKcYjdY581/jpzmAnNmGzXQj733cDDQDv5OB3lhdG/cy0UshaIvDhnKa1H2xloVjODkX/TSpW6GBttKiSGZ9wZNFwfvgguUyGziTrlLMUrlNT6F4kcxFmRXi7NNoAUsLayygm0hYjz6R4ydmcpx9s8WQNEFRUjCkgrJwHNwpGr+OXILuFegoljBGLMY4ND8q/25npXCjAWwlqY14JLl6MtOHvttGJvI88nLDLvpOZuE8+u/Fg7mH+a79ncuCu6p3ObmQcAC+i8YX9GftfK65XuwmWMN+n8uZc3hjroF4V0wVRwKZ41cvrc9n46W2Q7yQrzTXkHbAvTYGLTM5sMPSG3lLvMBc7coLPsQ05x82Ca5IDmIuzflMOYkboZjvUEHlMa5hf8RIEmGz8gE4ax7g+YwbnHsDyuAIcO/Sm1UEfsFrPK3n/pirsgY4Q1/wmETquXcD/N7WB81AM/CbGeiN1bXRrxMuLg4bunlBgM2BfUVj1aXo3yz8qj6+s1cn/lxoIA5csHkxRn3Du3W4MEth4l+P0L6ss3CCi2Fd6Ff+YwPrgU0YcQN3L0XXZONVMCS7hWPGh4UU9HXs0MYQGQvanhTQ0G/LDrp99ph1qm3HvRGPqnCGPcmVPAI/SCzycsNujRfoiP5axGDuwdd9EJLHwgpE9x2L/oIbFdBxiX3CAfjYVsJEfFOxVvXD+Kr+4mag2E2war+KY+hn0gAFs7a7vXN4MYfpGMZbNrG2Ua7nU8mL2NDZ1XW7xwD8JmmMdxq/pso+mT/0Y+TDiAfsI8c2J2L8rQ1vDIjpiD3Kx5wY+eI6DHP1z+OFORr6uRvzkzGv+cWxSb2yjuBU+AisqpdugG3kYxo/O/EIbwPcEG/GhXnuPA1w6k06zxuWhfhW/K5LN2M4rtXmyj7GY6+fYKcPm4Fm4Pcw0Bura2NdJ+16PtS/LibqgufdcEJPBSwWj7Xo8N60XC+nV3yKkxfWkAS56gNgiIVYesb5TG/GSb2iD51t9AtwcsQLWi6SaDGNu8kbumpfPAf/2NDo3DZw3O+4fJBMeuo24djkzRc837Jj8p/8DG43uEg6SW6CPclBflA7+pHycsNu6YM6oijai6fk0ZTLgjv5hEWrXAgc2cfgtSqo+sH/ob+w4R4WjtkuW0M9Uz8zLsaL+r1fwcu6d2DK6gfzykqgNBS7frW2y/nlN2T25k+1C3FzbKMD6IfckSicz/OlbnTCRurj8aLriu0PfssBeGIVgCtU8lHKadP1V4TC5rw/jlPuNczHvXg25IZ6szM1rwN/5RX8Qb2ojji2eZ3akXM8Ttc28KPuPm4GmoFfy0BvrK4NPU/OUZhyoe8F90R5ndBZrE7Y2rdM8GmRxAWDj2PTwTi4sIIFhlVGAYGLkmzAoMDyBUf6++Yl2ZEixjYYyXeWU14Q5403VlJ0TPhfYYivUaYFunAshYTpVP89pnq+2oyYvBb8fqdf5O0ONXNk3CI+3QAbl5PMWTcPuE22DHeyhWokhnanm3lx7CgnfphcLbYwL6d5oD4mfMpF9N+Lp2yskn8Ys4IbXaLjEvvAYYWyfoWI9X9ufFluJn9ts1PsZlg3yJkRHzAfWJ7leI+4n32FimSDj4x/fLbOC+nP7ciL6WUfwn6er2b2Rz6McK7zYo1PxnS2S/1Un3Js8WU582OaL5q3JscbnYGdkh+MTX8wRtjNOSJPlWIOwghQX8Pom7TVxmqMS/plW4JF4+I4s8wcz4YcxnvE39uH3JQr3FkuZz/FjsxXg5wgHYTd5mKbf4tu6S+6ZA3cwI+k93Ez0Az8WgZ6Y3Vt6HVh8V8F9Im53kELQ7wwWcEbzbJo+ESvF3zhkvO0eOBCRJd5obCvWFhBggsMCWFBogsGfm2PzWD7YTnRnVLAJcUY2Tku/Itd7gv2g8W84MyFCuIRH6f/L4utyQme7K99pcULCsVw9F+9Mvl1cc16kUuIKV3L9gRFcEJ+f+SnhGxb4nJ4p1/nKrbtqyjFjvnnNp1nvGJ4NO5JxyQeubv6Y/gKdpcVXXs2VvaUdBUD0jXhAvN6k0vHo34jJ8OYFdzQnw/vOr6yv1HETXIOsU14csw7cgbnA/5lQcuNlW7YeMC1ra8B1o21Q+f+kN9+gQ5yPnouzdqLLs4Lj/d83tiXP6O8yPh87ij4vF3xDecUchfzEce8PuFdjQ+TJx9LXoouiFPV//YBX8dNpJf2zFvmtHw1zrnOeZzmMMRp+Ck3N/AkXlCuxDvJWe7qRlS4028O6DWcQyzXPMdYt86RHguM93qdQ/tH+jVUw7DhZ8aQ49BnzUAz8DsY6I3V74jznbyUhckLjTtZuYnaumjfROkjldhPsD/SZtv6MQxgAXxnpz7eZhurOxt+tPpvP6c8mrAvtvf3tBzt1yLvBeURNu6FvfU2A83ATRjojdVNaPxFSvBund6Z/Bbef/ci6M8x3gv2LQhvkE/FwMM2Vr/oBsB3n1OeKkEfA6Y+obu11X/vx+WkX7O8te7W1ww0A9+Dgd5YfY84NcpmoBloBpqBZqAZuIiB+Nqffz3wIj3dqRloBpqBbQZ6Y7XNT19tBpqBZqAZaAaagWagGWgGmoFm4CwDvbE6S1ELNAPNQDPQDDQDzUAz0Aw0A81AM7DNQG+stvnpq81AM9AMNAPNQDPQDDQDzUAz0AycZaA3VmcpaoFmoBloBpqBZqAZaAaagWagGWgGthnojdU2P321GWgGmoFmoBloBpqBZqAZaAaagbMM9MbqLEUt0Aw0A3djgH+yWl4oyi/kxJeS3s3oEyjm1xb8kvc9PQHdDaEZaAaagWagGXgEA72xegTLGzboTe0v9kb3Dblfd4kKbublYzm+HMq7QfSnc5+0COd3pTxpTDnf6P1j+B/yuOfdPCiDxxckKXFFP3/MnL287HpXF/ngfRD7Bfa/pgvlb83pioTyXvm4iOP4eemXSzm6yG71Y3J+w/dqUT7Ul5RHnp/jeYKvm5uBZqAZaAaagQsY6I3VBaTdrAsVLq/H5Xi2yLqZxe+jiLk5Lf8WKjDLnX0qyt6O6/Yn8e7pN1ap0L5gk3rPgntHDHFjVQvqHd2/iQhsrC5CPBg3F+m5U6cbbaxsA4V5kMZfPxm8UwBbbTPQDDQDzcCIgd5YjVi5oM0WeHoSsPcFhFQAUEHAhUAqdgGAFiAnerLFTxmOywcXtnKOBQV/lcqeRPgTEymcj2/HeEoBthB3urPNBYnZzD4xXrZzWI5vhyX6wV1y5EEL8dP7QTHQXWQpHNknwBOefyxHbv+3nN5ogxX/rLCmz5X/r6clccXdBNfhVexLfMZYq84Umw1OgnvlpPBvT4i2cgNjEXJjnMtFnC4L26h8sy69s696P4A3wy5cQ9xow4vynKt0o8DyJj8tGPuH+vQrgRZq1m26YnP97/0oTzD/HM+PtaRjjiflkdnXz8h3wpJ1uOjV45Q0Qaz5xsH4idV5PMgpxnXNJeft62E50Hj2nFWvMLaEDsZvjHvBHPlKtoGj2ZhRvnBcG5dhZ4Nv4+rtg3Ma45fHsHAR+MxKfzYDzUAz0Aw0A7dnoDdWN+CUCwErVksxMldPBYkVILT4R+GY+mhhIoWBFV4qy9f0mO2aPpWzjQkVulY0cR+VS8UN4Em6tKCy/umaFnDqeyra0Q73iU2QFNiKIelLnk9OgCvqa7hImm0WO8iBxWjRDYb1RawrTrC4NX5HnNjmU7lX3ezryE7xbpZD0/4Xcppi5BgEM+cY65WcSpjwySHI2AaPN2LKf8pV5Tzp8v5i14tibjeOJbfsWuLBcZ87AL9IlPAhHosL27X4FZ3sU4zNMX+Re8l3G9OoI/mouaKYko/IpfNldnbg4Xjt4JLtmFzxHe0m3DjuC8doN/UpY4bsGv9oFrmycQrjFkXtmHizPLHNqcSBJEqOWaf+bAaagWagGWgG7sBAb6yuJrUWFjsVUtEBhQUVB1EMgI5BoeFFBBQ+XLiCPrkTTU96amFBRZEWUrPiBszzIcilAtnuYnPhI8VW+AC8jAosL5YAT7U7Oics3pdsQFFYuIqiv3IA2NgGXEeswO8KCnASXKuUX9uwkxRWObtY2yc4Uxyo75zT8cYA7IDPNdaGKnhdlnRc+I/+oN+VjA4AN+BgSYzLqOuwbWYXeNR+uTgfKuPG8KnIFN+TPvCF+2+M0/n4ic0UWp7iwRwA+9wXuSy4UTfGttqJ88oxxDAp002h+e5jpArl87CT2/Escc1+41y6jjX27eNmoBloBpqBZuCWDPTG6mo2qZDAhXyfQi4Y7Gt79ukbBtBRCpBUREDBNNTHd8y3Cx/s5xs2u1NsuOBrQmQ/NjZ6Fxo2Vva1MftknYCTPMvF0kYhBjTYIdtHXC9wt7pwFYXhiIMcs+A1ZDNOfcqFtrVIZDkrGAmo41jnRtgxj+hzLSdX1+3e/0JOmb9VnoGdopfl1Wcv+lEGj91vQR/8gX50m46pD3KKm3574sN9SAdsoque6bnYFhu2OZEYZ7s5r0PdQHbFH8Zcenqc6BQ4Yk6Sv/R1N8JVOYo8xP72BCZhH+HBjRVzbL4TIOCyxCz8zrhr3kRsASd3Bt0b80iMkWTRnzCd9y/6Ja71RpLn6urGUvTro2agGWgGmoFm4NYM9MbqakZrYbFHYS2iqE8uSFxLKXxSEVELtmGBVfFN7GBBUgsxwBAFlSCM85neXKBRr+iz4bcTAAfsbymugQMp0qGAdD8qB+Nz21gaPuLaCzTXpXiAk1WR6Ne27YRnVc6u1HY5v2azWgtktoS8Ip8Ggz8hviiDx+63dDQebTPgXLpe0gkc4xhAvSTP5yX2rmfngeOrvM77sw+waQ6fSh/XLe3Xj1PACFzsxrOXy4I7eVXtwvwSPABO6oxxIt24OUZbeAxG9/sXnRLXupmzsTy/aRH9+6gZaAaagWagGbgVA72xugGTqVjFwmKmuxYcLAdFM/YrBUgqIqDwSQWN3SnmgrAUPlBwRXFEBklOC9eET/r730Mk/6QwtidYax60aEacV2ysatElNEFxzrijUA88lYONv7EipYzXniRoMLY4YU7NbuaLMVhhzjrGm4PAavZFbtr/Qk6THXZN8VrRDHpJNheoumkFGeFK28k/87XEOdnl/uQfxM7k/amUXDP7iQcNyfkP0gF8Ez71M+dStoV6R3KmA+Xq5jpxt+IrMKFfyRbnynr8JBnNuyEeGOe2uRhyWWKWfJriFr7Ebs4fxmecsw92oyOPi8qX2d3vn/WQsWy+UWvSkTBEnz5qBpqBZqAZaAbuwUBvrG7CqhYN5StTqbgCO1xMWSEL7VKUWCGiF0rhk3Ri4UPiXETUX/4SbPG0AIvNjDuKE2w/LKc/p+UAd54Fp2w8+Jf+3BfsB0V5wcn9vQ/iATJWh6I7MIaAF6fMFfwqnRf50jc4oL4Z6/CaY6zya06Q++M7/v3clp3wYY5n0v9CTpmr+lU09DPp1QK65LUV6vw0AuVLruY4j/2IXKJ8+VhOr7ZBtQ1mzWfkzI43cmg4JqRf4gI5MLX8iRwcl4/io4uW9s+PU9IEHOmvW3JeIse2meKYbOBJG6sNLgtu94cOkl3dsKjdNO5ZTuJ0eD/BZhb8oc0WziNTuzv5BqCJa22P2MYmdvqtANDVh81AM9AMNAPNwDUM9MbqGvZ+fV8pnEabnS+hZlqsfQmaNrpiYP3T+SuRCxv8J9gv7N/dPsPAk437z0Bv2WagGWgGmoFm4I4M9MbqjuT+SNW0ecEnHtM7/V/gfW+svoD0T5osT0E+2Xsifr8N28Tg72t+5nH/+6LRHjcDzUAz0Aw8KQO9sXrSwDSsZuCnMeBf+3umzfhPI7n9aQaagWagGWgGmoEvY6A3Vl9GfRtuBpqBZqAZaAaagWagGWgGmoGfwkBvrH5KJNuPZqAZaAaagWagGWgGmoFmoBn4MgZ6Y/Vl1LfhZqAZaAaagWagGWgGmoFmoBn4KQz0xuqnRLL9aAaagWagGWgGmoFmoBloBpqBL2OgN1ZfRn0bbgaagWagGWgGmoFmoBloBpqBn8JAb6x+SiTbj2bgTgzIy1bLi6vvZKvVfiMG+CfYOy+eK2L9jrHnikejaQaagd/GwNNsrKR4e9F3JI0Xa5LZ9zLaj+WI71qy49fT8u+bRdh/otp84M/Dcvr7zRxpuMqAFD7HP3cmZNc7vWicnMkl1fPxflhe9vxM+i67V/jO78GyeeJlH6YrzH2XrjxPPHR+ozw+kztPSN7+NeQJwe+BRONva5zee3zuwQgyt4/H795Y3nMe+HSs9uYayf3qumbHOgxj5j6H98TwRWOS82q8l7gph4M8f46NFQGDxYAGMJ4TCdz28rmN1d2L15tGZ6xsOFEOAjnu3a2/loFdOXLPyfQOzJNPZSPI88JDNxR38OsGKofzxA30/jQVny4OfxoBT+bP7ePxRUXck/D6HecBwrzvhvmTkHxzGM+wDt8TwxeNSa4XfvPGqiZqIkSCQhut/ZMwJcnLMt9YyXW5S1I3cXgNgsKY4k75WLdgjWuQrHyn/bic6M6/350BW7CxRDqGE6Xq+tAN5+FVdNrkxMWmPuGyNtYJPhzfjsuLFaTU/npYDtRH29iu6sBiVmJwiieCbx9LyOIdbI2b6TBb6Bwds+3TcqLNNMsC5wvwQ9eMI/b/sBxeqY/Kg2+kJ2KABm8fH9Ie/uPGf+a/YbBPwwe5UnQi/ybtn+kJDnCnvNoT2nVOID6JG8d2Ty5p/h09ZsB3sTvmJm6UUKxSjrpj9aDyZddn7du5tdtXNgN5+HaMp3zs6xeNG8LlOX9Yjm8HH7t0aR1vaUOuOTZpTA3G4Cd8tIjwp+ZInu9AwrHHnLMsEkubz2QMT7hX/TQH8j/Gqd9I4OPjcuT5gXyS/B7lIraZ3BiHGQJ7NreV+WbEfent856Nz3ksYk7lnH33HsvpFcbdbK6s8UOeJnmywrpzPsr4dF60/CK7A75yH5yTwDeOdZnvC0jn/FXiHnk+0Vn6L2zD8j84t1xw7LiOgU/H99NywLUI5FJsP8F5jG8cIxX45+cB80kwi8/B17JkLmJNSbEC330NrtAg17jv21FqDMqDUZ2hueKxTGtDnRvkvPoQfQP3sndsVPzY7+0U421r7pmOFcVLdZeNA/YX89PyDtuCK4IX/kE9VHDPuBaxrFvmWGwzDDDvEl4bxzsxJKyXjsnCZeQo4s38YO5yjWtjsuiKuT6Tx9z5HLtvDrMaVmZmwfZ/EKJlfEzBFcoAAAAASUVORK5CYII=) ###Code p1 = float(input("Digite o valor da sua P1 ")) p2 = float(input("Digite o valor da sua P2 ")) media = (p1 + p2) / 2 if media < 10: if media == 10: print("Sua media foi {0} você está Aprovado com distinção".format(media)) elif media > 7: print("Sua media foi {0} você está Aprovado".format(media)) elif media < 7: print("Sua media foi {0} você está Reprovado".format(media)) else: print("Você digitou algum numero errado") ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA3kAAAB2CAYAAAB1TlqdAAAgAElEQVR4Ae2di3HrOg6GtyfXFbfiSSdOJWdSxp1JD9oBQBA/IFKWHTtxkn9n9loPEo8PIEhKPvH/Fv4PCHws55fDcjjY/0//7Nb762E5vn1Au7h+eH1fXf94Oy6Hw2lJd/6dlsPLeXEpSeZ/5+WI7aVtsyHkmG1u07K8L6fDcTn/J+qz3WErXj8u539Zj9kpvp6Ws9jcfcF+4HuxU/v3PmjPCglcMNlhY9wSJspIWZ2Wk8eic7O+wWDt+/Bet7G2XzNZgP3p7bwci26PS9YTPtRYRLvMtF+/kamy6jnSchb9THIlNuu8thyy+L9j+5KrOc5jPyKXJF/edRxlH11/GReILuV0urFgXGJMWJvEAhkkEcjgtLwXH3vTcv36cSqSgNHLeTm/HhZlgYzVV2BS9HZ7KhOVAf284bT/sixJ77JErMq4B9nHt/OkvpQxM9W7k7fbvyxLYt2uR2y91smNvbUGhPOQBEiABEiABP4Ygf/9MX/p7pCALUpHG69h80dfnC4cH62Y8vcR+FjOr/HAYl+ffa0+3k7twcW+9mz1GQJPNu4/4wr7kgAJkAAJkAAJJALc5CUcf+hENlLwdife4j0BA27yniAIF0wob4cutN55+3Gbx50G/P5mzzzufz99ekgCJEACJEACX0aAm7wvQ01FJPA7CPSv+02/Gvk7/KQXJEACJEACJEACJPBTCXCT91MjR7tJgARIgARIgARIgARIgARIYECAm7wBFF4iARIgARIgARIgARIgARIggZ9KgJu8nxo52k0CJEACJEACJEACJEACJEACAwLc5A2g8BIJkAAJkAAJkAAJkAAJkAAJ/FQC3OT91MjRbhIgARIgARIgARIgARIgARIYEOAmbwCFl0jgNxKwH5be+jHy3+g1fbpIQH9WgXlxkdOXNuBvGH4pbiojARIggV9I4O6bPFtIHtpvsI0XDtJm3w9vvy8n/C03P355zA8xPzK+/c/Ouw/6eeQPPz8S+kNl2yLs9O+hSpZl128Gyji5kEtNzvvbcdn1m4i79H7Cd/2dPa8Th302fULdT+mqdeJL65vk8YXceUJ4++eQJzR+j0ky/rZ+ouTR43OPjdDm/vH425vcR9aBq2O1N9ek3Z9e1+yYh2HMPObwkTZ805jUvBrvJe7KcG+e31Xp44Xdd5MnkGBikmKC5+KOXjtct8l7+EL68ZyXYdH+pUn1BTj/jopdOfLIwv4A1OJT2ZRqXfjSzc0D/LqDyGGduIPc3ybi6oXqbwPwZP7cPx7ftKB8Eq4/sQ6Izfse3j8J5Lub8Qzz8CNt+KYxqesFbvJuTdf7bvKqFSk4liCy6ds/IUjCHpb5Js/u29OjuqHEe5AgalO8QRjLNlvjHgwcfQNxWs7yRqQ/tQJdsMlFHMOi3WS9t83v8cVkeqHUhW978+fXVCb4cHo9LQdfHMv1l+NylD7tmuptMnBhbTE4x5vS13fbiHaf3PoWN5fhuvy2f6ru83KWjb22BeYL8JF7zkj9Py7HF+nT2oNvIidi4Irk8/7xUak9pvgQYua/2+Cfbh/kilgKMpG/t+6f6c0WsGtcP1rDdU6gffZGRmO7J5da/p16zIB30Yt+YC6u7ekeTQ4qL282u770t5mj3Nrtq6qBPHw9xdtP9fWbxo3Y1XP+uJxej33syq0RX/X5zTOi5VgaU4MxeIWPHhH9bDmS6x206LZHzfHx6fXMxvCEfZMvNVD/p3a2b2ro8Wk5aX0Qnyy/R7mI17zd2A5XBPq8tpV6M2Jfevfc9GioHcNYxNvSHL+a98AJa2WNH3Ka5MnK1p31KNtX8kv0DnjlPliToKZorEu9L0Z25i8W96g1E5ml/6I6PP+DuedCtx3nMfDp9HZejjgXQbsU2yuYx/jGMVINv74OuE9ms/kcvJYls4g5JcUKfO9zcDUNck37vp5sjSF5MFpntPzvsUwP9S2OURta/iv38CH6ht3LbB2h9uX6nV2AMfV6Xs4vLSe3as90rDR7Zd3l40D9xfz0vMNrwUpsC/9gPZSNtnXygLU1y7KtxuI1twF8x3qy04Zk661jsrCMHEV7Mx/MXV3j+pgssqLWZ3iap7M5MuU81CfIcx9bHuO+Dt1Rw7Il33/20E2eFkUoku5uDYBfX39agnbAqYElSE+YBv/8nzSye96vF+fUpk1eA/tqfysubdCojNgE2GDFez64krHDN3nIR+WALXrPE7vpNH+QSRsk3k+TF/TreRRJ1ZEKcLvX5Pvma9XOFy2tyDrX5GEbOB6PkLERp+SXT0phP/JJukp87xGf/LVIYWx2hB9iwZp9xMRYis2dzwb/7A/KbRMAxrQdJx6JXdirVsqmzfv3ophjrTY2GSlm3k9sHx0rA2Mzz9HsXTpTnZGTeC+zhjvT3Cqstnz1Sa3lstneck3lR97ZoixsRLv0uIzLT42bFMc8nqfxxtiksXAhj/Dtqfo89hHIx0IRuXleqO3Ordmu7fDYpCHDxL7mA/qmNvoknGWKPM/bebxyn+SXnCT783ywO7fRXs8/YJBqQWOYbE/xM3u7X2ifsnDW8eBDNpdX2eq5W8ZDYpN8MpuiXoQNmJ/ok8bacwTtVn88nkmjnoz8cBZTmUkM2CrXRTcw9+NcxwdjxhklDnmzi777grTHGm3CGLZYhx3QMLFpedsYznW1dm5v0mV+jfhFrPK8IXq8PViWHmRoHJK+iKfa2Xjr8bBds7m187VWmmv6ptDamk14XMYu5lgy3E7UZtenbZvNyitqYFoDaLu4FzKa/aP8hrlRNEcfPYsXFimvRF6MKTR/i7Xeu2jDBWbeX/NybMMojp4jyb+tGCR/I+dS/7S2GuSu59I0Lkgu10fPsUs1DOM/5ZvGadH5pKeP2+SlgpO9F4CeKPlOPbNg+27aPmPg5daRPFZ0Z+2gV0o+uJ4mXrleZceAwMKW2qG4PhH7E0b/DBtzwluBQEb9frUZz8sAKCbYYmA66c18QilWNIaTWdVdC2gXM2fZm/gB+ubX9LPaMZe5Nz77chL14rEXFXlKPf/3otkWcGiLXWdg+mpO2Dn4v5pcNnKpjtGuy/0xX2Zs6vWeo+Da6nCaF3kRlfpt8Kk65zZlRmmsVvlJebYr68syp/Fd1RNQgMzlcj/fiDfGDXlWP7bugQlyOLUddWmf8Fn7YL5PbY8+phbO0Ua52WW0Y5/ci401zibX/hu+rBliu9Ux6K7yc9yhJ/SRq1X3qFZm2aWOgOjNHAW9WV5dXCaB6SRsTZfz5rfGB5sObaj+QAxWuYTCxnXK6tuGzCSitks34QTaiQ+QY2ntAP5J5+AFPjWpNQauTPsMx4i3aJ9FV4yDLV0b92rcgH3YCuOwmJNOwTbpG5tU4Jj4jGNp/arN5bzYHbYmi/KarMYwNRUfYyO6XvDHGiyYJwF6Mo89MsTjKgNYAc/aCs/nrEGWWadvJ0drAZS3qieYl7lhP6v843yDa+9tB9Gn3Ein4FPJgTQmUx8ck+UG5PtW/xTzHhewRcXaufJFuUXls54+ZpOnIHBgZff3BV361ETKcjRA/spcP9tGZWPQi+60aRwmeg0yDN6SgDH43d7YLKG1q2KPN9vC3Aap3ICkau1cz0pOT8yyQNJ+Jif5C5u80Af+lWJtAyQzGy1c0mAR3chJ44EyGiNs0/z8nviseTdz4k0G5Jn5X3OknouEOf8uXw5WfIQV5LLm6EBW//pFjl8eX9YvYg2FsfIf5tK6v9m+vu45mnyrJ1Un3J/2R7ukPci4zlccm8Csyt+IW9YHMm4cN/PxvBVvuyd5mJjtyiMHPpDvT7u9SWFtl8Nn1Q3jIh7ChX3aR+M1YQ+x1LYYCzwufHMcZr4UO8yB9F+Rk+ojjLXhmEm91zU3xaPNX8HFOo9s7zV1FcNaB5oBnY35uMvWjbzObgW37E/bQGLM2/wZPkl+5Lm/36uxTkrXfvR+g3VA3EtC+pqhMp/OY51jk4M2lnvBwmxNedNrcbZnPkYG7XAt0nVv6Yo4ubTORfrj5hUeVPc20kn9bWMA9btA+ey21M1b1l/5jHMy9/E5suc/8q/rIvUJx+tkbKDt4LddBv1FF/rpdqUYa32E/iow6mHaRMk9ZNvGjPuJeYGc0PQUpzZ2rf/GGKv+zph5PW12jW0wX/Fe2GQ2JD79DSx6sZbR78741NxNcTJ5Se9o3gJekZemWXxI/T3ve55v8E22dE+e+uD+mzwNEE7oa/8jUdb38pU17Lhf78l50zsLRE2eHtSQakdlIKu8seycQGBDEantPJnKPTmtTEbn+iSs2ozneOyDGHSirVl+tjva1QFauKAfohsnlX5+RZx6nya4+BPqih13iE/m4Zq2/M82CLPjm/wbx3gyWGMeXF3+JT9xgs36soQcv+rL6FxzqY4T5A3Htb/rrtflPJ7yeqv6OfNjdt03wcHVNsV2PrKhTkpmU2aUJmTwVazdilvWl2VGfM2XsOOCbzBGY6Gx0cdtbP++2RcO0bcyxzyye1s+pt44tuQGnIe/qYdYF//uRW9lTol9ycFk1ygug4dUqQ+wWdtR7BT5MF6RX45z29yMFhPFxukYgHZZtrDxTREei63ADfqrF3Ce5c1tnXMqXIChyE75dZFXjb2d61gosa5aR37YGNqQWYXgeWcENuh9kCdt0Ce0sfc3oWKf1RLoj/oGx/MxUhoXXZGLW7rGfg1Zw7itnN2Sqa1gW+6bbcP+uR3mZO6zGqPIP62Lrhgb7pB+1n6gv+jC8YHHIiZ8g/5dvq93Yby2Gnh5DqjywvjMENvhsVo3eZNXfUf7Qs8qBnAr24Dr1Jks6NwOqwy7bD4M+ZS42Jxj8/08Lmu9HjPRv6eGzccc2FptW6t9uiv33eQpAE/4ua/joI/a1yTFNvmeBtQ3eTo5RmB7YqSCboHr3wVH0W2A+oI1yS5B9kSy7vPE7zYkPXFSmZjOtrBVne4P+l18gGIskrNO6+c+ZX3Z7vAJklsEKj+3I2zXo3JP5MfiOvqYX+MNs8n3xXzxLalr99qia0tm+CICsp8oUtv1xbYz3vLf7mnxkPh4X+GAdvn1lpN+D3WbXZVR4wAxHdloRTL7lWPb8sAXMphLJZejyOVNwUiv+K3XR3Kzc+szzZVcJzRfOqvSZZpbOOlYny2bIifd9mYDMBYpI389bplt5q79NPZbeVN8S7Wq5XXjMLKjT4otjmmBmmS5j+s8uuRjsrDpcb3JJr0XcYwYwthowqbsm3ybhLP/KR89Lm1sYRySTWmcre1IvmlejeuNytyT2ymXrW5E3Qs26gvY3uedlNted8xKs2Gco8hmr61zTomKnYzya4MXxiPywOeM5oPKdN4DnSi/6fe8m8pMYvJ4DOaWBy5Lr5eNtd9TPR73aWzHNcJlJJPUj8iD5EdqiLHP42AUN9OV2+EDGJ9T3CbUK8d6vcRD9Hj7ZJpwaDWp99UGeXypnTj3OccWSxzjdixCsgxc0Mvd0Id8vLZNxkYyvm0wfW7RmLa5dmCX+zlibuO62Kv1xuOL+WftOk/Qi5zMf++fDQ/f15zknttq+ewy0IY5s702mOw2Zhsv90ltaPG2uMUaBj0ZsTz9m/PxnEh6Wi6NZPncjDr1uNmb5kiNg9cgsyFxhDxP131voTK9/0rjU1646yZPA4Bf5dDjNZCcvDiQK6OcpPUu6ju+veenxz3A8mrWbWhBVbuOy/kf/CWtKhz62xuaNohKkNUGT/Q04LPAnJz5npxVJn7NXyt7wmvPVjD0K31vUYB1QHox04bGz2SclnfpB0kcMrEwtALqPnVdtmkb2dltan99SfWBHdM4FZY+uM3er43PlPfUf8ul07/MzuUY2zl/ZYb/gXzrX9WU+xAzl+05EcXN89pydBQjueb9etwrf9SFxy0/V/2n19dM0FU9Tv76A4FVK7ugtsBfWYTc2u2rSop4pDFdfPUFkvn74HEjdon+Vjf1L+UV//xexFs6tZj7ODVS5StCPvGv82jLRxelny1H4i+wei1trcD2WmdjISdtJ+x98+Y1eaOeYa31mrI9znyMJo/gxMeNjI11vRmOGejth9Gu/dVlj8mQjfQKFgf8a3+JhSzEYU6rOVrOw4atf+8OemVOLDLcH/sc5decVx6H2A4WfrXeZIV6Fn4Yy16rPN99nMx+n3TGHK/Xv/ANteiof80wcrzag2Mw7u2oX83uGCMD58HGfXXAONtfH7T6nsYc+IV6MVY+jrTGQN1J1kGeYF+vQa4Tx6f0Rz41jt6nyphv8nxjZ35ujo1kvJxAPrY1iusP/4/LGWsPjtE0Vhrznn8yprzOup52DvGUvAl23s598b/Nmw2P9uGD25186g8sop2vIcK/Uk+QyfBrlmFLxHF7TEaMo68fhQyoT1M+/o0R54PfkHpcDctrrRyjzn1HDXOfn+Xzrpu8Z3Gq2/HfeTnBn1Ht13/RgQ4eX1R8p18wEXynGdT9Cwk8Ird0gonF3C+kdh+XHjGpkf19YkMp9yfwiHy/v5VNYt1wPEzRLxJMZr8omHRlB4HfvcnzJ6PPsAnaEYxdTdLTOXhlv6vzAxs9YiH+QHMp+gcRuEtu5Sdz/qTzB1H4HlPvsugl++8JHrVeTeAu+X611hs7cMNyPTgyu54Ze/xkAr94kxcLi/6q9SdHiraTAAmQAAmQAAmQAAmQAAmQwA4Cv3iTt8N7NiEBEiABEiABEiABEiABEiCBX0aAm7xfFlC6QwIkQAIkQAIkQAIkQAIk8LcJcJP3t+NP70mABEiABEiABEiABEiABH4ZAW7yfllA6Q4JkAAJkAAJkAAJkAAJkMDfJsBN3t+OP70nARIgARIgARIgARIgARL4ZQT+xCbPfohRfqBS/uKm/3DlL4sk3fl5BPTPdf+0fGw/Rio/nCv2z35A9+dFgxaTwJ8g8FS/rSo/NP0nqP8CJ3/kfPULuNOFLyPwG/cKf2CT976cdCEKi9MbUsaC/72/Syc2HEc/7v4VPy6sOr54Q/Lg3yz6eDsuh7ZJmbK9IVf2dhGdt/+8xzf93s+/k+Wg5sMkH/cCuGM7jOVNYq/NNW1/WA4H//+Vi9WhvlajROY1m+ctW/DevX8v9FofHmlLDfpdfltxWZaW5x7nYf2tuq85HzK8RsCVbUXfjtx6fD285YGr1bzPxEDrRB0HPzEvrwz7PZrvnq/uNfZ2GW0185Z5VHPB63fJCfHVxnxd89xYo3f5UhrdqzZgfou/xdei9Q+f3mev8GwA/8Am7x7Ib5mQ7qF3pwxdiFy5yNwp2pqZ/6fX43iTeZWsKxrfq8hNVH56YzCRy8tfT+BTseyT4BVj6DMLmaG+vIDVRcbeyXhqCy6AsvxPR+hqHx5oy6edGQuwRSDmhDG86yLpwTVu7NlPvfq5HO6L+jSufl5e/tToPcZujN8VGtKayfLKN4ppLhm084cMV9XoK0y7a1O1P29U1e4dD3ruageFfRuBp9/kxROV8gSiLzLkiQtMxLrgOS5HeBrei7s+tcGEb5N2e5rjgzxHY9wG7fJBv6hNx+X40mxqE/ipPxUqb250ALYnRmXQoc0uX3T6semyvqfXU2KAfQ8H8Dcxg+vZ4fWZ9BP7mj/p6zXoA8YhSWlFVO1s/uJEm+zyWFrxtqdp7VrShSxzjDqjZEM76TKOi2xa/Y0JssXY9gVd8/0sb/9WeVSe+EMsVe7ryfIRcnJZxOYWgxVX8T3ig/aEb43pP/Frv/8oq/smIoYxWBa1/8V8Nt1Zl/MzuvneeDzVOLQcVj+u8yXesuRYihT0M5g13fih+XBczv/Oy3Gav9jBjnWMYQ6vm4yvTPXlmJtvPhbGovzq1JaSV9oOctP7++fnmW34cIUt65zbiGcfzyWP5Lr7mnI7xpX7Pf4svvRGs+utBrhOaZ98ln6t9smn505qM/PTxtWxjcPRuFJmb+fQ8fq+xDwAPgsX162MTstJ5yuxLdqZvI/uQ6574IvL6v66jxdyF+OGzDrnOOh5+WK2xnjeX29Uxst5eZf6vbI5bN0aI8ok1cJZvMp8cIC56sa87AwOsAYIRHC0k0nLu11xxVh1X0xP5CKMC/BRme3Jy2tqNs53ryfN+W4H2rqRV2JX7wP05NBs/mhXLdetLfgod1VX5E4SowzyOjTmq/pNDRhP09qQ4xpjIGktJzVGfnt23e6PYmZ3rF+uQ9kuZIr1R9dabdyp/DSOsoxet1ucT28yN1tdEb+lf1oXqnEThu6yf7a8n63Jw+ZcD+saC/100c/6+dybPCgWaVG84MBrhdYHtA68mKzqQNQEgWTrCVX7pYjlQWGJ0Aa3Jk0rGHgs/du5D0jV7XbqPbezJblPPsnvKCw6OPTrmuZ/kusLVPUjCk/4m31QLq4v+bo+EX9RVyR42Ca9sF2W0vxz3xPrgS/eThkhZ+dlujx2Go/ui8gL/5MdKT7Zps42sZc2TWfr6wsE1Zns9EmjycUcc3tW+t0f69O5gg2X9Eif3f6D3L3jKeWsT8SJtft9YRxiIJSD+37/WM6ZoRHlWG2a5E1pKqfKBRbtPXaDtsNLVd/wPBgNZbSLU1tSvLcXJXdhtuXDFbbUnJvapvqCUWrX9Q3GVs/fDaq9f21j8rwe5rvremj1ovRBu4GZ2j+tFbAxzEr1zHIAa2W013vus/iFx2XR7vekj/qo9sXGwvQ05uhHm5Odi7bz+ljtTf2MjeutTUdMko7kS+RClePnKs/7yMUaZzn3GHin9ll9SvnWOGkdSP7l+hb69uflVE+xT06TjerLhEmzN7H0TT7aj8dtjvd5d9GYW86Jjb0GAlPLl8t5qT4692Zbl1f8XPuI6y/3dyuv5N5xOb9JrNuGoedEiUvbaMRYgDmisElmVvapLdpmxx4HWzPi+Aq+MUY21jhohOoEe+GeMuw+ww3PIc+FtNZGu63POhZou8fC1nhuf+rj+rot1tZi3/R5HVGmXouQGx77utt1Z9821+QqP3gho2RzjW1R8WynP2iTB+hKMNIT03oPusmhFhNNKEuMKCQlUVK/3FYC3gclJmkayINkE9tawqbCLbrgXpXvpvTrdfDWc++w6S802jwU32HAiJ1pQMK9qZzKFhZD1XZkWO+hfOAVMcUGg2Poo3fhvLOFa0kC2qU3wIfaR87bhCVygxfmUeaK7botOME0Y+o9yd+b/XcHwV69BNzRLm8en9Wf2PD5ky8cJ9GvHAG/m33pMmqu2cLnoh3gc7FucFp0KL894wBEVX3DGOyRObdlWGN8IQWmjGIVeZYa5pMrfNhvS1uo9hpT/Gv1dhjPngNYT61/1PnswvQMZZVGczZo65ZeqB3AsMqNsYdyizHtNNrKBZCf5oDGxdmWnMOx121R+yIPsU3SA36oSaUfWj3MBV/IYcNBrLtdafEpnbZ4h9Bsf9mASbPCJHp+V16uYx8M0Do5rgzWfXuPEp/MJedP7yMHdVzouXx76bz4uy9sszcvq0+5H1ow9rHPgxM7UIJz8rWY5bHPXeK7H0svYFhzozBMOkrb/TkP7GFM5fgkTfMT6F8bbcmr7KMtsFCB41hIbY4+phnPq/xsG8os+oo/NWdCDjCMi3ZUY1bzGdqHzWiTNCh2QZ9nPHzuTZ7g7F+Pg42VDiB/ZeufbSJaBc0C0p/Y9NfhdTBvLQYxyOsA92QoSZg2nxJ9sA39CtvkKcJavidOT2r1P544ZD0zf0WK+bx+1e0aBp/qkzP2z6q7XccCm0QhP7cD4pUWnjBAC08tDv7kTT5BH96bLeiUOfTBeHS2s5wrtuACZyUX2qJcLw5mnzCJxVPYAv63XIj8aJwHDyn2+C/kMe/6QnljPGX7/cGF54F9mj/XjKe2YHpILAf538d8Ssp8AjHLN/acmc7Oc0+Xqm94DvmxR6a2AVug3uitWje6zDsx2/Jhty21Dm/bhnmv48THd9J3Q91L/TsoPViNCbit40vGZ2Wh7HHctNj2dhC3Jq/LanVgVtekebYJa0h5CCR24CbPeXltaPe6vG6fGRU2qdb4Wrn6B/NC2Wg2l/QDa1DUNuzrrddMul1pLguul8Zgtj/Pyap15YfbUhl/VV5u6wnr5OiKGrw3ri23Ik553s1zWrNGGLa8ini5fVHTIhbrOMe97OHaR+srY+PavMLxJHbauAh5phlsK8xsjIc/yVJgINc3bdOcixzuX5su+tTGNmei7UkvnpT+eGvOt+Y51o/KZp5vwdO0or6cE1triqKv+JPkzBii03JcZMS6S26avpTrWg/nflbxz3j+9Ju8gAYBLwMo2qyLtibXcCIDeSrAzseTRG6bksuL4Ghi30goTPpk/2rCjrtdb5UL53N/Q44ebTGEpqKzLwra9W4HtJPDuU+ZH26QVoNOfakLoBZX3AxO7c8LnGRi7QPnY5/AbrRLhOI5yFF9cg5v8iKnQJ4WFJwgmt3yb8N6vmL75EkvSOtiv+F/EgGyq/3QLnOxPnN/Jk9AQZ4eAp9+3n3Gxhu+VJv7OfiFoi4dwxi61HR9/wadK33F18porXRyBWwpOmptCAHQJy5ePiry07iW3uhDaTu3pS4yNmxD+a7P86jnQ3Fjdr00W/nS75c49evtwP0UPb6ZWi2+QYa3H9T9qL0bDJraPE5Bfq3LaFdhgfW7ywP7RBW2SYxKu1QfC6Mso9wsp92Odj3Os4+l2/R0pbvYrfc9h4qU0C03NmIiXGdzVWHeVcyub+npnf2g2mTnUa+9nc9fsbHOXIDtli8tH476b+5CFi6cMzOQW3Ipt7MaEOMH7F7xCJ+zD9hnfbylL98Tm31ey/an+lZVlHjObUP5IgR0lNwMFdAmLg6Ogk2+ObturcR/ZB/ntd/4XPKt+ovnma/JiBxFmXi8ztmQs8EwO765yVMbYeyHzcWOloNhc1XyXOdPvckLyAJNQPuiOAdV23lRHQ2uHjjr5wmsyev3tJi5/BqkHOSkTwdiKwJ1UNZztE3vhT60ReW7XbBAiKQ2e2yk77EAACAASURBVDzJtG/zf9TX/C2FQWzpi5Dqr59nzn616yj+yXW3ydvaZ+aXClnzz/shh7QB1Pj4RGLy5k8LvV22wvR6wR7LUN86F2nTYqS+xtvkzkBUtHu24Wpym4yImTS0e9Eu4i93LY6hQ3vAzzy4/cYqZGUdErOx/1PfIMe6ziYjyzadHiub5Jxns9/zdms8PTiWKTYlv8S/4f9KLg/b9IuDsTRh3rvUg5W+zFZzoedh7YznW7bg+M3yUYIc34dZ1pF92G9LzrkN2zbyKBaaAz67uDa9Ka7m33bdbG36wlDoou8uF+uKjVeNgesb1JT1A52IYmaWfU7jXpi5/3Ls49VzoNatkqdJlvrlNcx89NqgsQfZYanXS+9X6kZqOHpQEPUx5VfiVYXEebZfrmNscv5GLzvKjL8uL9XmzjJzHtnYY6rjIzintnvjujXGRIbbNcmrzGyel+rjMPeT1XqSeKh9uP4KfzdzUPq57SV31vJ9Ps35kfKvmony5Z7qGNmG+TevDWuOblNVXM6VT+iVu5tc/L7HYjA+sA4lWagr+Ws+et3JvmSmeU1h97o+lRl+h5wNhgVHWlPKPYhTinvzG23u+YJ+VvlPeP7UmzxfFPvrU59AlGMbmHYPkhiCZrxbgulr7tPynu5bErn8nkyrQJVk6wPBXrF3u0oSbiWUqtBk8df0kbxyTwdPezXv8iOpvWi4fvmrat5/w9+JPk1un/jRd23vcuFGYy+8tG+zsw8CaGqHlZ/YCDFLsUR97otcw1jVv4bo7YzHPI6+aGjt5K82tUIfbFFPLCg8lvFXmdDOLBc5hFwhgRzkGBjIbeVdrpVc8KKTZe31f+Kb6E4xCBuy/W5jy9nX9/I1sSx/Hgdsd/9Yijtit4/rYCZ3Jv+7NHZrtwkvWzQGv9qtn1d9egPi6AsQuT5s2yVNY6ct0M7RGAcxn2amsiY+yL2dtqxybhrPjTySseQMr617wMQXHZ5LXouxST22mpjrA9bJ49v7cn7BhWm0xRiELvNzPp4s36O9xCByMNV3YeF5gIw+vckr8e3zUaXTzicxGbUOJqdF/hpk+Inxh1o9EtKuJRbe7sny0s3Cz2CQ37JgGzvOTKY5U2pK5oL5g/KwVmMb09zHLeRVv6ZNcp+sM9fsiPHaQ5/7dEy+nJcz/qXMK/JK9Zc1lmsL3jGOmpfxF2y9vngn/AQG/fLEtmzHrDYIu5jTelxLHLsuPMD8Fhk+/rENHGvM4C+CRyxGdQjzIx74irjwy8at6805sbWmKPqKrygndEkdAIbglx4WGbjJ8wc+Vutv3StUhd9//uSbvO8HRAtIQAnU4kAsv57A+2ssvq9x9uPttJz/u6bH5ba32nJZMluQAAmQAAmQgBHAzdN9mNhmLTaL95FKKfsIcJO3jxNb/XUC3OT9sQx4X04XnniOgXws51f4S3PjRldevdWWK9WwOQmQAAmQwJ8mcJdNXnprefnt4Z8G/mDnucl7MGCKJwESIAESIAESIAESIAESIIGvJMBN3lfSpi4SIAESIAESIAESIAESIAESeDABbvIeDJjiSYAESIAESIAESIAESIAESOArCXCT95W0qYsESIAESIAESIAESIAESIAEHkyAm7wHA6Z4EiABEiABEiABEiABEiABEvhKAtzkfSVt6iIBEiABEiABEiABEiABEiCBBxPgJu/BgCmeBEiABL6VgP78R/1R32+1iMpJgARIgARIgAQeTOBnbfL0tzdu+4HiB3Ok+D9E4OPtuBz8N9QkJ19Gv4t22w+AquyhvL2Ab9O7Vzq2Sxzwxhcd7/k9nz1tNs39Bb+PKAxO/za9vHzzF3C47CRbkAAJkAAJkMDvIcBN3u+JJT35IgL7Nje3bba4ybtvELnJuy9PSiMBEiABEiABEvgZBJ5/k6dPkA/L4XBYTq+n5XCAN3n6Zs/u9bcp9c1KegJtC2+RpfJmT7dRLupb9vS3Nqe383Jseo5vH5YNastxOb6I/uYH+Nevee6gHeXtjm4Givy8oG12iI+NwVneQGmf/NUtlHU4xD2V92J9ug9um3yi7WjfxG6z77ycmt3yNix0h15UIcfRRrjtaXdczm/xhm3KRYSjrZJjnhNy/eW0nDRWoBfby9s8bRdv8kSXMm59Ozfsh3qSDcfl9HpMbwa7vMNh6bIqoGVZeruq95qc1fHV7Pc3lcm+Q9iG/rS20zhhnmzEL8XC86nl7rv7DLxTXNGeNrbQHmfXOSWebays/H+PXPXxOuWZa4Prc7Ptc6antUo+QC6ikOFYBjtncUs5B7a+nJfzq+dWs8/HwCJy23iDOCj311Ovb732qp1gi4xztAf94DEJkAAJkAAJkMBDCTz5Js8WDL5gsgUabo58wd8WLbqggIWJbxDaQkP7+6JDF1TeHxmv+yf9F/v7AmpgZ1vs9o2ELqJiMaf2pcWt30P/2sbE28FCTBdfvqFsi1HV1fT6gksXv95fOcTGGRklexCRHmdOXXfyMdut8nyxXGxCvUnVhn2pncrzeDa9zcdum3awe8HF+7TNZOIy5q/8MA9aH9tUYNzb4jnZVvSMeKE8tye1S563TfBAr2/+0FZftCcRmZdtthqXpDfHc8XBY5v0Am/RKfF0e9CGxCjriT5yPeeqjc2ci2IXjlk/VnuHPJs+v4e1Qe3KOrv90C6x0DEZfcLNS3omuRgC+oMV98nGFMZqcOx1cCOvTJ7Zl+sTyjOfRuPY+lh/t80eAoVP6AaPSYAESIAESIAEHkvguTd5ZYHlb6TkqX5asAkjWXDpIgYXGrhokYWgL9qlA95DyHnBGHf29kf91rtvMtJC1t8iwWIQ/e3+NAt0QRmLrL6QCgP1bU5cB/+qXtgYQnc9xMWqLuZGC3JpWe1zQfV6sbsvkIsNqNdFjT5n7eb5YG+5hlyqArQd7JZmqBePkUOPc5Nbz7u6qsc3F9Kg39vIoy7IDqqeOL81Z2EMdHuaUuCSOBSb4h7kYWmDp9p+yMFaqU8vsXmTq9nP8WYi2mzxrPfAfxyTmrPjGhL+olf1eENPbVq5+/0ylrNesNvb+2eXV21AjnZv1yav14XaxxXK54Y92IzHJEACJEACJEACdyfw3Js8WFCq57Dg0sWNf+2vf9omqC98oL0tONpX0Xp7/5pS4ar9/Gt3/lU8WbDs6b9e9PSFZrLHN3lVpi1W1Qdc9Pa+60WaW9/16AWwo/ftLeNrWOXrZ/pVw7aAy/K8r32u7Gu3V9dBd5aXF4A9ZllN34zb10wbq77AjMar/n1Ri4tYaQ9c/I0TxtWZQ3/tBX9sJenq7dZxQX/lOPnQ9Kx4FXmpj9i58n1L7605G7FZ2QfxTBw28iiPPXioEeErX8l1VtAW9Ho35Dv76nC0MU5jnjkn0uYk6d3miTGOjZJbq1m0nF9wkxic5S72Vzs9F1FEsic/fEh2T+VVX3F81HtgH+gNpgOfJH9xPA3fHKNDPCYBEiABEiABEngEgefe5MHCQp2H87zALGi8nSw4+qIYFiyl+dZp6Nnb3xZK9c2RnrtdrrAv6P0CfNZ7uni65k2e2NsWlKoX3nTAeV3Eh7+4+AO7/LDaN7s+tTvzRL0uSj637NtqF2/Eqh/ABWxTWegTHrsdLZeSrdAuL35B7xV6wu664EZv8/FU7+43KVUXxAb864zw37whE9iUJEZobpXX7k3b632xr/07yz6egS/KL7EKNtVH7FTvgf9pzMJ17L46nrXb0LOVIyg/2bOxyZvKMxuiPiHHYh/UCfwWRTAVw7CP+D3fxKIbPCYBEiABEiABEngsgefe5LUFhC9IZHHR/zgJLkD8qXVfZNrCQ54o4xN17e+LRO2f7yvqwSIq6b/U399ouC1oZ5HtbzjcRl3o+r9rSvY1f4YL6lhYqX+uVxd5uMmLt5aqp7XDY7fHN8Z5MVcSEf1y/mLfht1ZXl4Izxb5W/Yli0Z6m49TLmkh3BgjPz8uG4dkq8jwdiiv2aO5g9drfqSvAGYbRr57LibfUT7qxbhIh8QIJZhez8P0Rij1GeThRk5aHuU46ybWxxCaoHriQQTGTDi433Ld7ez5pH3jrV9t733nPC/5H7LVLrcf2HRb1CfxOfqEmxt6MIarHAkJuNmSq+qT24Ob+g152sftaz4Yo0F8/U0cMM6+ok9Ri7pt3h9c4CEJkAAJkAAJkMDjCTz5Ji8WprJhO77JX2aExZMuZAZf7fLFD7ZVlm0R075O5Iu/itkWQU2uL+B392+LHvhLfb4orQs0FdkWWfYVp1jk6j30L9lhT9/9a1Hhhy2y9PrrOb4a1hZop/6VQWDYNhkm67S8i86mKy/mKqXydVO0b2J3lpcX/3mxirrAJ4kn2IetKi/9S6zdJpCBXHwxrflwXM7/5C+iNjZFT7LP/ZPFdWknPjpL+WumafE80iOGuzx5KCF/lbXbnePsm++V376ZU/mnJfRKyz05jwt16ZNjg/Z5bqgNbrduMoBxjZO3a/b1v5RZHRm1k2vAA23DfJqNWb/uYyTig1993fLf/fIxM+Pp7Sz+fcwnH7f0oNySiygDNltyOeVlitu2vOBwWqQuOB9/ECA5bPW21STQi9w9v9xf523936MGJdvQIR6TAAmQAAmQAAk8gsDzb/Ie4fVDZdaF3EOV7RMOC7R9HX5Bq9Xm4Bf4RBdI4AEE8qbtAQookgRIgARIgARI4MsJcJN3d+Tc5N0d6S0Cucm7hRr7/EEC3OT9waDTZRIgARIggV9PgJu8Xx9iOkgCJEACJEACJEACJEACJPCXCHCT95eiTV9JgARIgARIgARIgARIgAR+PQFu8n59iOkgCZAACZAACZAACZAACZDAXyLATd5fijZ9JQESIAESIAESIAESIAES+PUEuMn79SGmgyRAAiRAAiRAAiRAAiRAAn+JADd5fyna9JUESIAESIAESIAESIAESODXE+Am79eHmA6SAAmQAAmQAAmQAAmQAAn8JQLc5H0q2u/L6XBYDqv/H5fzf58S/InOn/udvo+343J4fd+lX35fa+37adnXe5cKNtpBIMXs078PaPlzfPvYoTma5FzIOZDvHRaUrbb7+NmZd6H1iqNPc7lC169sanmh4/2RcXoIO6vTp38PEf5JoWLbo+aLz/md6krzEsdr5Rnj/FH+fBL1td3/Oy/HQ65l14pYpO7cICNY2hyLNTPukXOPxwM4d9m/5eAb89kRSu5iLvt1fj6OADd5n2L7uUn0U6qnnW/f5PUJfOciTieb0na0MJiayht3IXBf5jds8mSChTzIebGRj2li3mh3F0oU8jkCUus+ueD9nAGf6P2MddrdEdsetVi/3e/hXIDjVReMYbe2fzkv+mgI27mbP/Hz2xbF81pIzpNEuinn5pwnWn725W/LZ8PmDye4yfvaNOImD3j3iU3fLMQEBk3K4fYkmp9alIKiA87fhMHiSYrVy3k597dkp+Ud2vYBMmqn1u3UUz0RfS/n5b2+ydsoDHkx3wQ2u+w9kPHpb/t8I6Ayj8vxRfxvvmuRdh6HpT4ldnO9UKzeKGz1B37io7+jmsVb4/Zy1LeUnbcbsCzLrB800cNod1zObxZX0b2ZFzM/lOtpOSkz4dTyE9sLX23nPloudP7ge7W1c30xHd1vZOexqp3rudrkOT1fxIrOWZxR5Cge3d5DfTI4yTnkknzaM87FmsxybHcbe2/yBsByuXNU/Ue77nFQTi3n/Zo6DrpaPEyfXT+23LRr2/5erCO78hl1OC+w8QBxrH7q+SBvW4DncbRx4rkbvCd6MWEqw9eTfuNCZWjsPTeX/eMFYzUZB5qnr6cee6w14/xBX5wrsj6sH55cfOsNMtFvYZLyHhgUdhqTwVyg9cxreBsPnt/qe3/7bz5EzIqCdpr7mN3Wpx2XcaR2qf9o+5xXaG3yXuWtWhtv3Y8ZF5Tb9KUcgJwPRXYEnE+qE+xFGWnMoxDR7fmA1+vcsY9znhPa/KX+k/OMc6be6oTnzixuWut87kXOHrdzfPPr9R3WERhri0nP04kuHTuzWgP519dXy3fmc/Pp9b2sfTKjFXNeuAsBbvIcoxbfKMY6oeBE4O3S56UiK8ndBvC/E7ymzv1Ulw/mNgngZNcHKtrY2uEka29TWuHWryaZntTG9SQ/4iRP5HF9dDRiFNfMDtdtC4zGohWhvgjQ8yh0asPITvG5Xwe2m/2FQcgW+9QmZNk2Xv42KsWjOr7RLzVNNhkLt73boB0gXqlPK4Dub4t3youWnylmwChiIYpyzqGt2t8XrmqDb5xyn00uIDDFr8lbT1otfrL59clzMt6q3pF85DLMuc4FeIvNcn2iF1zSycnjZ1/BipyKdi3OiWVrp/GDPo0L2u12JP+0ny8om/xur51P/e2br2KXysSF6566tx5HQx7Vzw37k5+JR5n89Z7ZmHK66opADOLVGIIsbS4y2hhLstN4yb6L3Z151Zli73FrG9Y0lj0XUPaFeHp/3WB5fzDA65i3S+yvH8san55rvlD1R2TNJ71vdvd6XjaA2cI4E97BEWW0fC1+WFtkhMe+WRtxGcnzdsbF7Ui1BnNFj71Pqc3hUq+zSV7KCZfRbAK+XYzqaptRqY3OoXG9lvOlTV6X3/KFnHsk7CDFfiNuUEukI44fzauUB/EAB+sOHm/N2SN5lhdPmM+AU+z2sQGXefhAAtzkTeDiAJ006QW9L1J9sdqLcpt4XuQJfiykbJEI5ziZaKGNe2lQbLSzTZT0syKkAx7bixOpWI292ue39bVCA5NRmpCqfFjMXLKjFMsuaXa9N2gH2A6Pazs4R7/Vr9HkC+39EPv5NfnU65gHYEeKKcYLBcgx9Kk5g3rxOPVJ8iAv0vXx4k0LsejHvK35VOToaY1tkoETZDvujPIiFEXneFg/nCgyz9QzNvid5ZwD9szHtc/aBmu/vt5tSxxKbKVzvz/WZRP4Wn62E8ZYl2ctuh1yuhHHlEtJOMhe5SzYVfSGXyYs5EOfpidsrAzckJojs3b1OpxX38XenoOuRz6hj274fHGObfKx2O8b9dwfZYVsy2HkmuXZIq/pndqJfcZ6NHdqXCoHFNOOI1ZywWTXcWf+juOCbQfiy1N9tL3oKrZGnlSpM5ZFHsazyE5zZL2H6mbxqH3gXHlirs1kpFiZ7Z/hXOeEiGvhAraKq3+XMwZ6ey5PLUs8gzM+EFGyMS/5WmG43rD4WO1PmuwhVu8D7UoMnyKfwfR5TkEjHt6VADd5HWcrpr5Rk88+iHqjclAnt3K7na4SW4t42Rz526ZSKFJfHMClXSzcYMCrntgwpkXD2NT09GnSpF8W2zYZrfxsCxb0AxilzTJOhl1jK7gtRriIUFswdq3/amLtsubxTsx7ez+Y9/MW8okFXq9DvLJ8iJc/iR/4MZ+oiy7QYzmR82w9YZj+ylLPV/ETWRuLXY1rvL1AHv1YZZaHEe3mLJ9GvFKu4Fhd2TxaJNu4NRk4PrqV5WA9zrNN3jzHUq72dhgXzw/M8T4mqi6UicdN5y5/wQ7p1nXJiclMPId1T+zy2FcbQX7xc563A709jmv55q1dT7auvq4rLWt/4JZ8L5vtlr8ov48XvIdxM8P0vz3WegY6V/YAL9xwSL9ZPD1nWm3A8RombPi9knthLLs+yIXsn/lg9R99FWvsfGwjWJue6qMMPK75iuy2eYWmIg+ZKxesAcJwPE+J/5gbw4cDVR7km84JWNv1GHWHxemoy6x+2PklzpfGYM7xsCfFe5U/XgvQ0mofsOw+eHu4B4zk7vdzdhvtc3fcxEeoDbgGSCwx/+o4wzrTcqXHB8zK8oD7k3POdoNDPHwYAW7yGlodkJMBOqdfJ9VBSxl0r+f42qY0KcUg9Sr30qDAYlgHcz+HAY/tRYmej4pzWICFKa6Oj8S2+SavspkXdVvYxOSyyaebAn5239tNZIjHve/66Rz6nZhDHzncmye1HfqU5QOnK/xAe/E49BifWAAAr+JTtgcWURN2pbudqu3buaUNQeZI7yifcru5H+vFPeQc6E32z66nRlVnZeuN63U4r3pG5/rWdKzLJvp6D3JHTZj7mxhCXah5mnLJ3aqy01suubnfz5BffUFls3vgHzZfHdf+cA6+q+Xyb5C17lubPeMlfMiKE+PECPRrF9SFPm3EM6mq8vxmvQ7nNd+8y8Zn9TOfow9QM1Re9WOsJPPCPmC3dC0xi37YRxrK+agGFXnYrshOcyTeE374rYYZT+xTbM/8xkyGV0FX+O7+XnioJs2gv/bq83bhUmwPXX+PM8Zhd9ymnEfjI/I05Ocx5XX1qk1eieGz5XPkFBLm8SMJcJPX6OpA65s8K2qjBWcORi1++a5NOm3zIoOvyPfBq7p9AhkUir7wwAEs7fq/uWlPv/SpKxZus8/7ywDDJ03VWjmPgjO6m6+pPHjSW+7GHztwuT4Box/SSX3xTZ7ZP7Iz2ybtWqHc6q+6oqC6zVvx3ipEW/2S/6rXJ+DsU4oDxnHLD7nX8yfHKXHp7Uynx94Yuz3J0sy/2W39cn6rHs9TFFEY461kG24GpFG31RdyY/tqPEYxmNsLOaL8xKfIB7VhmsPhyTpmIKM3y3FOEyz6Ku1H+dHsSP5Jvz7OTb7XjbqptfhUf824xFB123hLunTxO3twk7lNeVQ/y7nqG/nZdHu+qnyPC+TX+vo4Z5JfyHDEXfNiY7wAL6Epst3OHnp8a6sXc6ymvHDD0Rh4fNWHlqvIzRZ/o/wrD6HQ76Fsr7noRRxnnaVOQ0ykh7b1+qR6t2VLnzUTj2VmZ2Ml5Em/i+M93BDrlvOLy1bNUAM25kiMe/LJ5GE9DnV2z/NDffSaWZgl/0NAmYOzvFs4W+33fDF/h29g0d+Uz9ZnlJdg9q/inPzaGbc55xs3eWn8JoviGyJ6GfP7+fIZLY+xi1d5/EgC3OR1uq34+VcoZIC1SWuemNgnf5Xj9A8HnilJk6YWDu/jBbgsfFOh9YVhm+zUPvirdT7B1gkt6YmJsrtdDpKNcq8Ufmyuk5QvxPBGO1ZZ7SsHx7f3mGhXMo2VfRXmuJz/zX6fCNv5RC/K8PqgfyuWKr9zwtidlvdd8RZd834rBKBX/8raSLe+5fUFyIYfYJ96jH8F1fVILLCdX5cYDP6yFdqrsWy5f8ZFbMofyFPojHG2GEpeR66F7PUGAvv6wghE6+Fo/M1kZnmQczMuYKf2neYzxsbjVS21NvYX9Wxs+8IoxcW7YXx6bshN0DXIjy5TWkoejMYY+rtVR3bns+Q9xh9s7JvQdf2qflfGszgmBigf2Qy/qulwwb7214qdWzDLf/XWFmmtJpfxEn3mD8pynrZc6L/NB/YM/TG2qCfVzDv47Q8Wep298FuuNVaamZBvztOJRywxT2reeGv5hFq6ledlvkDOc16op8ai2JRqXNStsE+uYfwGcwyqA3nHN/mLiiATxzxex/5tvPq4rg+br+fcNtRQ36/b5G3UmWT37+KcXLs6bqdF5lGPHeas5VWMkTTOUE+pQWhPlle4Q/7hPPyd+ey2Z7tbXk3nXO/Fz88Q4CbvM/S+s29ZxH2nKdS9gwDjtQPSdzd5X06fmnDKZHsPd3TCjgXBPURSBgl8JYGPt9NyvrCh/Ep7fqsucv6tkf3Nfn12zv3NbO7jGzd59+H49VK4afh65p/RyHh9ht7X9P132vW7fXNj7rPJiyf15W3gXDHvkMCTEvhYzq/x22FPauQvMIucf0EQ/54Ln55z/x6yaz3mJu9aYmxPAiRAAiRAAiRAAiRAAiRAAk9MgJu8Jw4OTSMBEiABEiABEiABEiABEiCBawlwk3ctMbYnARIgARIgARIgARIgARIggScmwE3eEweHppEACZAACZAACZAACZAACZDAtQS4ybuWGNuTAAmQAAmQAAmQAAmQAAmQwBMT4CbviYND00iABEiABEiABEiABEiABEjgWgLc5F1LjO1JgARIgAT+HAH7aQv4Yes/R4AOkwAJkAAJ/CQC3OR5tG76HbP35XSw37I6pM/v/PHi23+rK/8+1/7FTO7nPPb39xDw834EPt6Oy8F/2Pum3K62WF4d3z7qjYvnaouPD7ep9ZrfM30+rk7/Lqr5XIO7MPqcCb+nN8SuxPtZfZQatpnbLT/ecVw92pn/zsvx8Mg6avPXZ8bWiBuO6So75orvnCPvH7hUbz8hXuW83PK7gnUtkvk+A/d7MboV7yhXb5W17nf7/NhlDcc7xPWmvOjSn+7gXvG4ecwob1+vyucja+334ecmz9nftMj7/CTp6u/3acWmTq4X5Yv/sCDTSQHOt/qP2n53Qd+y9y/cuz//GycxKaR9cioyJOcOvhjJ9zSnvJ8WY2/3F6L3032UuvizJsx7LTjuGrnhou+eGj43f+kYPZTNsY7pFvsybtNiDNvd06VvknWvepsYXePLRq4kmd/I/V6MrsGCbR87xvP8hXp3HWv86kYjy9TxtnNNtkvnNze6VzxSfl/jk4wFX2Nc0++HteUmzwPWAn5+9Z39nkXK9iSZk9gGbN989UFdBvbIDmjbnzaP2qkvO/W437NPke8LtY0JRLoPi0+zz977GCd/K9M3kyr3uBxfgIHq9Rgcls5rYKfqHb0h2pIBLHGAa6FwWX3jYb4dX46L2N7ZF1tmfUszPY22x+X8FkVmM1dm/ijj03JSfsKsbYSwvUwK2s6fDlt+9FhcKHKd8Yvp6QyQo+fJyOFyTf1vExUea7NuZ8nhlmNdd5Gp7EqMut01bsjmAPnVdS/Lkny7YnOZ+m3UD213Ws7yVkhzDnSoHcflKNc9NmizX1MGEMsWHx0vKn/nuGp+p7oHfiDzyF3ItRKLZcGx3vwCeb2mSL/qq54P8rnpmMbUa1Abv1EzgA/GutiMfom/t+fTPOYq8/VkccXYqi0jO5Gjy8VrB3gol/tjzIqry7JA29eTfhOl85rmWZXSZLy+Gyt4u5/HtLVze5RBb2u+dN1VEhklmQAAEoBJREFUBZznfibT+rXjN3njaXOGx8/GlXMTYTN2oEgPm8xXmf/aPIQL6xEjvIa1zft7Xa6q/Lz3Py6n12OM+5LXztG7pU+RkWpD3M389nNPufJyXqRGmA0YA9EjMksN6763Gtt9bHk7qlGzOhGu2JG2Oy2nvlaDOj5hVsd48k1sRXZo64W5TdhqntT5EcfZRu1Rh1TfcTn/q2/uR1wxpysYP9+IT2OX5x4YG7Nc3/LhQjy0zns+NM7reCwLXutrGXepfvYY1TFjvvvY3aovuVZVBfV8v9za87vPucnzCLSk8UKqgxcT3tulz0sFUxKjFb9/J9gk5H6qy4tMswMnsb4w0nttkE/txQFuepJPrif5sT7RAbCz7YhVXDN73AZbQOfFXx+IWixisti0Qfzv9gHnTRnCI+SLjWoXcvVJosU+xWaNyRaqMBGE34PGyTbj4j50W7QbxDD1aYXQ/W45kHIFFxmev8Aq25fzsFpsRbflm9rhk3zud5FRF5xzYVVkexzA/9Y3290F6kHVn/Km2a2MLrFUrkW32OQcs9pydsVYaza53GSvMogc9Q3nNMajXECfxcpLfvcJ3Hyf15tYYGzFIy/6NrhUX/XcF2zNFsxn97X4l/JI75mdycaqq0bPa4GPf9cl2yLZjPt50i2+RaykXa9zA/mda5LRHpK5fLQTfPFFaZev90x38l8X3BGnYoY9kEu6Gm+Q57o8P6sMPBfG3abGrp6bnDKu2iIY26JcPM46UE7LkeKPybR76+P1eEBd3fcks8U4xa3pxvz0OqExjBikPMzK+gOlNL6b7nneVSF1gYwxQV7SD7ms5eCVkX7kaTZLDxgHKY/y2El5mli6jLBbmXkM0Cg5bn09d7Ct6vD5uOjAPMoxsRpl/oAvbey7nmrGSJe3TfJxTFcheK72Rt6Yn/U86g12zcc15uDTiJ3XML2HuR66Ui5kZZvx8Fil/G7jBONhG8HwNfGb6EsyW66kfhe4a1vffPY5sCqz82vkjiV831Vu8px9KcyrAebt0qcVB39q0D+xOMnAeZEn85HANaGTrmKHJJcXjq12cQ8G+LBoxMBNruAJDna8Pjmug8WebPmbo9qpFpwNe4QFskRRW/dm7Xb2wckoDW6UOznGvrXJqlCCPSnObSKOSRQkQZ+aR6gbj7XdkCPkCqjww2xTe6spT+LFBsznmmcuAD+1jzz1rOMg4m95ZPf1uNvcxpkvolCuL8r7vfUipvrRu1eWqm+bSe9bDyqDrTG0ugdjorJFG0Vnv1/thPOV/GIsyuzyrE1iVX0CMSm/4HqTEpufKgNtK7rDN5MSOrZiCn4nOyxnYgzN2jWLoc5q7u3KJ4hb0r0+GcnERUq1U2t+ZZfEhu7glBoMTiqDOFcZfbz5W9ZZDQ/RKV8GG4jwexyPPreFyNXRSAey6zIKr9wPxQY7vGrHxqTLxA0MjhtpDPm7FYOteypjyL3aAfV3bXT+No1y8Iclt3Lf0h95Y6Zs8ARmiQPWARFSYmfnMTckl2tf0FFjHvm3xQ/92fAlGbGWF7rHzGOMF0F+WhlAfmmT6rf3W32iP3ITfCoyUkywXZUJjOutVaywLR5LR/ApeK0k2oO1XoPL/ZFMHUPXcC/5rXZN8k25+HgSWyrfYt+TnXKT5wGpiVMHnLdLnzWp0s1+skpmTaj2mr8/SWgJVuxIfdGm0i6KJCSg6oFF9dYgdmtVBya035h/io2bT31X/jZf0Z8mXmV1JuVrFMUELVCtbUzKVnz7hlvut0lU2+OE2uUZs9SnFZjEv7fHg3lfbCXHuaDmhVTWAzH0TcyISckBlI/HaSHR4ou+jicfs6Fy1fNVPCWXZwWyUCg5ifE+yVekenxsbJmd9tUctAWljtihf3oM8Uz3XF9imXW/o7LZcfErTay1zyrvRd94/K9ytvettQdyprcJxchZ/R/6XRYuSc7+PE++b3FJzPN4EMsjhwe6ZTxoTCsH9xljGLV2Tw5dk09Wd5t8Z+omwOdIpo27tf29beLvC6TwBcccxnc8nsWYqityRlljjdFjnDvAGTjstrZro/NHv8nr/hZeyRbNwzE7cGewgBNmNjbnYxFzVaQN8nWyYF3J7GNiIKPnfLZ4dKb5oDojxtbOzmfjIGTVflgb6r1gJP1VN+ZSGxcxngebuq06EUbZUYlzzG9r31BnygeVgfkAax68Nx3Ta10h38ZZmmfqPxuoPsl59Wt4vmee3YhPkYl8Ut3eiOPK9CIz4pHf5Go/aBu85I7ZnJhdPWZu5K6GreMZfn5Gbkj5riNu8px8LTL13NulTwt+n2TSvXYicl7P8bVNuSzXZsWj3EsDAQaIyli9FZFJGQY4the9er5RJNTnjfsj/7wYTAbkemEhzJqOap/qh4VFYTFR3wuExmFLxkRenWix8CX+AwO2+tbmtS3mQdYDeXWFP2g3HoeeWsggV6qxLa64GOg2TjgORKwvbeRgsjn1vMbOjbZXsOzq9/pac3nDz9U4xLZV3+hcx331E86rLVf43WMsAEBOzd15rKTjxhi/wtfQAb71wPjB7B7Y4E03PtFvPPbFx2aNb3LD3rWiucxqv53ruAP+m3U0qdvye6xLfNuyPYkvJ9mvKgd8WdUTqHFFZj3NOrBf8Sfxwk0J9hHp1zCCttOxmP1WljC/b7IdydS+xbcK5cI5MsPjdR7NBOXYKbX+trvYVsd0XZc0FolDiRXWGrUIZVYTa19gmH1tG862Nol71bfiD+hLNsN1OQx5diPOIWdKn83T6lfN01rHp8KKP8iy6Mj+gd1VFzBeqS0yY70xWOuC3OC13gxmu4rGaks/B/tLl8unhVnq8Bm5SdC3nHCT59g1+eJpjiTg5tsp7SfBjz4uKj7lftu0yEDohT/304T2dj1hTQoOhFQIp/ZispoeX6irT92GsFKPsBCUW5dOt1mNfN2zyTM/ZpvhXASkbZOpXHyjWGQUH91uldW5mL0e+8R/AGKr76q56vd8ybal2GBst/yRe93uwULDN969nen0fLAHBW7Pytr01QrLPf/q8CimzrzI6brteuKV7mWZibu08/FRxMtpaitbfvw3VO3thfqc5GT+MTGJHfCgQ/o4x4HuuGT2O9sUz2hkRy0PvG2yNzHxjZbHqNnc7Fn181qk8iEeu/w20xJLkJN0NaZzLshwg0v1tZyrzpGvGNMW/26L2mzx0xh47Brz2WYN/cZjoTLyXWMHfLydx9Roxn+zTIuj26J2+jjWWI3qo3H0PmpTy9MsW9pB7MMEPUq+qK6WW8BNGiabigw8zbr9bWPTX2Sudc/trDp6nUOb8YGmdCjxCNvm7FCPHefYpA2hyt8Yixu52vNzpRBtyzUp8So5n8UUm+/E3XIMY+n1v9mJ/nrN1Ph4XAf+pPHo7cQb4+DjZzP/Spyjdrex6vmf4oXzhNnlutI8WGQLg94uQ9+YH9v4Sb563lQhcF50+wMm169MXCZ0Wx9uxKfo0Bh3mRKDHeuoqrDIxHj4usHqVrYrxue4xt4yZhIjtWvGHXwVf1LeVgdvjOdazLdc4SbPsUuQ21+QSl9nGiwivYsXpvSKuX1N4fTPEtonZemTBlRLQOtbFpU+4VfdOJjUXvhLdL1P0Zv0YFENL7pt+BULPcYCP++bBlYWq2c2WdhXI45v78v5BRcWKLcVAdU9+mtTKBzb+uSjnqj8zrX+xSodzO1rGp2ZTTDW57S8t1yQvwyKhQi1x/G8b7SBI9A//Xqivvn14oR+FiZgp2hI+eV6pIBjO78ujAd/HQ8s1UONbcsF+YtcPuF48e6c/6s94xzjXzdrIR9jKH2RK4yPENuPRjFCuTFZ7GSJjHzBUPl27XCwc6z5YjT+OhyMAYyVi0Z7es7KTfAHcwbrhMqAdjKJ45go+hLLJAfjkceImxmf0hZiNuNSdKc8HfAex7Rw8I3uyu+aX2GtSmh/6VTyOzFozWa6U26n2GT5WabFI+YGjI+Pe+nvzC0/UFeqo72d1bWQm22wM9DV5rveHvMM8n4kxa9lv5qG/ldj0Re7FxxrfsAYcOH90zlIzZJvxbjcwjHla67dc3ZdSTsoMpUt2IqMMN5+XRfMYK9wrHleVXpfyV35S6EgN3hdeuiMOp1PKAo54Ev1LZr3o+hXvjIPY/r4dobxDvk1qjVt3vEamL4KDzLrPNENkoMS51o3wuY85j0HdA4D5nUe9HY6t0Eskg3tJHTZX0vu8yPW5j1f1Rz5pTogrmCL2tg3ZwPLgGWKT2GX5Yguz4+NOFZ1RWaNh57rGiL+6YyIcM7GDPz81JhBu3P8q9nXrGHSXLs3niuF33OBm7zv4f55rZcmjs9roIRHE2AMH034zvLfl9PWxLpXW50U9/bbaqcyfYLeash7JPB8BN5ftzZ5z2fvb7Ho4+20nDce0KGfsqGJTQze4fHXE7jTXPT1hlPjFxPgJu+Lgd9NHTcId0P5bYIYw29Df5Pif6fN323cLfNOm7x4irznDc5u69iQBL6YABesXwy8qftYzq+X/4Kq28ZNnpN4gs97zUVP4ApNeCwBbvIey5fSSYAESIAESIAESIAESIAESOBLCXCT96W4qYwESIAESIAESIAESIAESIAEHkuAm7zH8qV0EiABEiABEiABEiABEiABEvhSAtzkfSluKiMBEiABEiABEiABEiABEiCBxxLgJu+xfCmdBEiABEiABEiABEiABEiABL6UADd5X4qbykiABEiABEiABEiABEiABEjgsQS4yXssX0onARIgARIgARIgARIgARIggS8lwE3el+K+pOx9OR2+6QeN7/TbXUMP5ffgDrf/2C1/n2dIlRdJgARIgARIgARIgARIYEiAm7whlu+6yE1eJe8/+Hx8+6i3eE4CJEACJEACJEACJEACJDAgwE3eAMpVl/Qt1WE5HA7L6e28HP2NlVx/OS++Nfl4Oy6H1/cmWjZz1kf62fWP5fzi1+Rtnp0fX44m+590xTaH5aTXqrXW5vQqb8+avK53WRZ9Y+d6/O0a2tOugV/qW9eVbZhuvkCP2eK6lmVB2cAoe9L0vL4vfJOXyfCMBEiABEiABEiABEiABLYIcJO3RefiPdsc+WbL3jrBJgk2MLHJs81L3xzpZsi/oolv8mKT42aofJepGyXv5y3ks/UbtjN7XXeSh1/XTDYti9re5IUfokvkweatmzHQ4+2S7LWPXQQccJMHMHhIAiRAAiRAAiRAAiRAAhcIcJN3AdDmbd1owSYHN0pyzzdasvVKb/JQKm7s8LhsBtvmzTeUvpnzDVtIrP1AJtonHXDDVe+FQHvzNtzkYSM4rrLgHDeM2qNwAin9kJu8joIHJEACJEACJEACJEACJHCRADd5FxFtNKgbFNjM6FcSZ5s83Rz6Vybl09/IwYZstamTe/krmuPNj23yYjMIMlUvbEr1TVzTjbbLO7pXtO+QNqx4L/QAp6oHZOsmz79G2j/RJpDTDsd+rtvxCgmQAAmQAAmQAAmQAAmQwLJwk/eZLNjYzNRNnm6M9N/G1c0abMJw07Xa5NXNW31j547UdiAfNlvaWs8Hm7zql5zDhtU12dc1fYMaV+0NIWzcQO/8jSb0L4fc5BUgPCUBEiABEiABEiABEiCBDQLc5G3AuXzLNmz+lUl7w9U2N7pR8g2QtbM/sGLH/gbM3mxhOz+um7X2ds03W0k+Wlr7wSZPN5GHJdnr8mAjZn8YxTdpJs83eXnDJbK93dqGpMfb4cbS3xi6DSgCjrNOuMFDEiABEiABEiABEiABEiCBFQFu8lZIrrygmxb7auPx9RR/XdM3MPqVxNNyhn+Th19ZPL6961/VtE1f21Dp1zfrZk3s8vumzzeK2eLaDzd5/u/w/KuYuEGzzaf9nh3qOS7nf/BXQ9tG0f9y59iGrOf4ds6bQd2gjmzInvhZ3eTd8jbQZfGTBEiABEiABEiABEiABH47AW7y7hlhfBt2T7mURQIkQAIkQAIkQAIkQAIkQAI7CXCTtxPUrmbc5O3CxEYkQAIkQAIkQAIkQAIkQAKPI8BN3uPYUjIJkAAJkAAJkAAJkAAJkAAJfDkBbvK+HDkVkgAJkAAJkAAJkAAJkAAJkMDjCHCT9zi2lEwCJEACJEACJEACJEACJEACX06Am7wvR06FJEACJEACJEACJEACJEACJPA4AtzkPY4tJZMACZAACZAACZAACZAACZDAlxPgJu/LkVMhCZAACZAACZAACZAACZAACTyOADd5j2NLySRwNQH54ffD6/vV/diBBL6VwL/TcjicFmbut0aByv8EgY/l/HJYjm8ff8Lbn+ykzueH43L+T2Imnz/ZG9r+Ewn8H7VfrZzrqyScAAAAAElFTkSuQmCC) ###Code valor = int(input("Digite o quanto deseja sacar ")) if 600 >= valor >= 10: nota100 = 0 nota50 = 0 nota10 = 0 nota5 = 0 nota1 = 0 while(valor > 0): if valor >= 100: nota100 +=1 valor = valor - 100 elif 50 < valor < 100: nota50 +=1 valor = valor - 50 elif 10 < valor < 50: nota10 +=1 valor = valor - 10 print("Teste") elif 5 < valor < 10: nota5 +=1 valor = valor - 5 elif valor < 5: nota1 +=1 valor = valor - 1 print("Notas a serem retirada {0} de 100 reais {1} de 50 reais {2} de 10 reais {3} de 5 reaisde de {4} 1 reais".format(nota100,nota50,nota10,nota5,nota1)) else: print("O valor que deseja sacar e muito alto ou muito baixo") ###Output _____no_output_____ ###Markdown Faça um Programa que peça um número inteiro e determine se ele é par ou impar. Dica: utilize o operador módulo (resto da divisão). ###Code def imparPar(a): calc = a % 2 if calc == 0: return print("Par") else: return print("Impar") a = int(input("Digite qualquer valor ")) imparPar(a) ###Output _____no_output_____ ###Markdown Faça um Programa que peça um número e informe se o número é inteiro ou decimal. Dica: utilize uma função de arredondamento. ###Code a = float(input("Digite qualquer valor ")) if round(a) == a: print ("Esse numero é inteiro") else: print ("Esse numero é um decimal") ###Output Digite qualquer valor 10.5 Esse numero é um decimal ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA3QAAABdCAYAAADt2yTYAAAgAElEQVR4Ae2dW3IjO85u/zlpXNZUFJ6JPIt+q/DDGcSOqDnkCYIE+QHJTMmyy5al1RG7nRcSlwWQSYhZ0v8tP/y///3vfwv/wYAcIAfIAXKAHCAHyAFy4FFy4IeX16h/MgL/92T+4i4EIAABCEAAAhCAAAQgAIGHIUBB9zChxBEIQAACEIAABCAAAQhA4NkIUNA9W8TxFwIQgAAEIAABCEAAAhB4GAIUdA8TShy5RwLvr4fl8Pp+j6ZhEwQehgDj7N+G0vgejsv5v7/L+aX8/bf6kA4BCPwsgb9vx+VwOCynP8tSxn/5y//um8BNBV2d3A8W7MPhtEyXq/+dl6M9AC4BeF9OB5clf1/Oy99LXe/8fuTkvm3wunNffty8P6flcCknLOdu5VsWKh6jw2Vd1wAp9lyy+Ro5tPkHBGq8Zw8pe5A9VNzKHPvAi/CL42w71v8gsfZFfmqO2hf97+6+LycbD+1Z/U/GxhfGqDwrttYln4H007FT/dc8Dz/j67Rvekb+C8ZTvVsXNWdunOO+MFce67lR2Ja1lMf81nXVJHZfyHwi/akvfbygK8GQHQcrWuS80vQkuGYRUR8Ss4XVb4/MjI0N+hWv3+7pN9j/jx9gJS7HN/8IoebvOP8G/1BxNwQe68FcsN642LmbiDyQIboofyC3Pu9KnXO/ZB3wqAvGH82duk4Lz0TjfM0a7/PZMZegOXPjHPeFufJ4z4059U9f/ULmn7blwQR8vKDLACbBscXx6+lDO3TbE3mdSMrWr/0XiiG9J58gmE1jt2Vb9rIseZIsff0TyHZ8Lq/Nmf7T8m7t63mY3DKXsoyavW7X5bei4WVsay/905AqX+22ycJsOC6n12MvqouOY5NR7alyOy/3pck+vZWd02G/2ei+dR+U6zWvDEr717PtdJnte2yLLo1Tt7MbEQ86t3p52H0YhVjSN5gVfz/24Lk4OTddp54b45UEi0kqDmss2wPoqhjEOPZcML3H5Wi7iS3n7VqNafhkOly/wv/QXsZTjMQ422r/yXET8iJ8CrzBZFmWEevJ+NiLhb9G0nOx9e/5GHX2uWFQCEc19ufx1kGYr6Ks3Zh2ew7L6XU9J/WPHsprMaajyra2bXzr9TofeA5s2BE80ZPYPjDYygHt3o+bjc48FJpRR59b07i3OHemsc+wq173efH0J+kVtqNPN3J9IO3r/FnHxvY4LyJkTizxcJuN14WxtcW0sQjPI7F2e05M84X4U/Ki52GRpbr7GNDxtZ5Lp3rFrnw4b//JGInddQwMxnN96flzzTwTYreVe9nbdm6xO9Znr3PVOPg1a74hW/W3XPib8yw8y/fkpJzYMNsvx3HnV1teeG6Py+PIbE66JFbXPK9K/Pp84OsYm0M8Z9RPn+M2xl+xTPRfnSvDo3jUY5ifGzv6o4R+Ns3TFvNze/1xtY7p+uNbRcbsqnVhUa+2jnETn8NpntjQ253JBx9l3vO7CWoc6tuAGu9kV9ZbztXWq8d5ytmZ3Du89umCbrXwdfD21wfXnuc1mcJDpTevgeuDOcj0wVwb9wkntGkTTpgsu/B64Pb6ZU2klgjVNk+ilvB2T5Lf+8tfG6BpshvXmjy5b/fcVpPf+AWf2uBr/UKfVRGpbJu+IN8nynpPC8I5c3FODodPY/AYsz22wac1CxFfDyUuIedMThvUqi/FJ9i4Er6+cLF90+ucNA7l2K97kR5y6GIM0ocBq1zQSazG2PUNOyrTPq6KDMm1icdWhHj7IWfdsl7R3Gr2Jr+Czz6RprispRe5Y94osQ6+uQ8rJt6n2uW+XoqF2ag55B+qNF+Mg+tsDz5ntLa9cej2Txi5rJX965gGfsrWj72QNZmz8a1M/Hgnt2YO7c4p1b8QH7FtLS7lpPGsdvX52zoVuTLPikxttx2bPJ+I3lmsPSZrg/tix2NuOpttl3LLudQFZOOvc9SOPu9r+tx/y5kxt6j/+3Oi5JbpH7kQ+kk8imndvzRmV3o9VoHt1Ln6wYv7E9rnGLmNOZYzuZM8bDYF/4K+0sd11HVCYO45sRqnNS+VgS+IPUdmFtZF5dAXcsLnnKZzU7bZPx8XrvOq8RE4eM+9vxKb3CzlRr7txdNgU2Pl5+ar5UPSUeQKD4/N7HlaZWk8q6zex/x19hdy5QO5HH1reSq+zPWvCNkFi9tMd4uVs1jns/vV9Asz/bBqM6dCgSwFemCW1tEhf6LeuXcXmE/nA42n2JWfRzo+p8rXcjwugYnKCf5Nhd7txc8VdCnoxcsCyQbY5N6cQg1231EKnzDlHhIck79fUFnvEihPmCyunGc52t6CPHQU3zwZVv0msi1h/NNy/9ttSZNOGlg+cRV9OkkXNXoeknJlQ9VRJ7ykL/kdfAtyhHm47ic1fj5Bu90jBwY/e6g1/8PEVEQpdxetf/v95IcuPJJP2l2Z6fXpcYr7tI3p8sk02h9Z3hKDS0xFb/a526V6px7Ei9nnLDe2bp96SWy1fZIVeGi7LNPOt/Jtm0mOrZ4H3WGMCZ+eW82gfN7tlD79Wjwo+vzhW+4MW7btr3OJxDTr13M9DvKrbX1+CotzZbpjR3Rl40wY5FjaufixkiB97d6wa3BKnTb9Te0msR0sRG+SVxfaksdZbGnvC61yT3zezq0sZPip/XMrOxf547wx3bQlx16KsUsxUR56PDWuXtRYRQbpw4KVjB07JX4mvz8n49y6ElkuTJmVmO7pk5gEoTvjI+vp/SS/+rV0kGJ3vY8iW/VPYrWSGUzIcvbGaehYZpnx1k2+pTble+Xc7ouuxGHEbltHzDFtp8db8SxGyL1sr5xHPZdyeZKXk5hUJKJ/xkjXMO1+scWeI5mf+LKKt+jv/af6hJv4P23qF0X2aq2WY+p9/G/W0c+rHWOelnkrjF2x1/yXD6j2ctP0b7Ev1zfkrJi7I/f/9/aCzpxWIC3B/ZOtq6FksAmaJYu/Tiave+wkkSWzF1Dlrz4ckvgxobQbO4kbBnxPyixwnF89qKzLmoPry3JsIMsnMTogqj/Ky2Okg8In2rGQcV1myhbz4Zoc5QEjejIjYWs+aIzseNgjCuph71vlxw8AdPJzGZN2npsr4XLBfJcHkNwKhzu+BZZhwhE2RViSMfrVXMg+WpxTn/WCVOOhcpxL8GKcrGIuY220Gkd77XusavPh19rnIVCOzMeWw33sqi8jvwuTIn9eROkDoshX/uN478G4PZ7EXjkMvoaCa9v+nAe79iS2Yy4Y/lRzch54Tu/YIX6EQ41HG7P2gY3lgOaV6gwS2smejW0BpfJLr01/PZdGLozXB7Oecb5im8dTNjvp11jFWA8dJsLYqG2N/zX6tICUBVxmMWypuvN8MRaEGqPI2fpsfcjWWUzk21xar+vzZ+Rj7ywHEzkl3iKr5JXJaHkwfIo+iNCdD5f29KX8uWKeGbxT35yzwbh2kvJo18et8aa5k+StnwM7Nqqcma2ra5Xj+NBWGlySle+vxkUZI7O5acR7e5ypXWnuWelpOuz6kD1iWmVdn8stT3vepLlqS7+gG4c7ujM/mQ/2cigy28mFzGMYVZ+rOg635omVjSKkHGYdvX31e4zxNl+2dVqfS3r7IuyTz68erx05QV/y5c5PbyvoLEA+CIeHJYlWwemDdbSLRxXsdLJYVeMyaLeg5+Qp5z2IUbOdJTmWRN4+9Q2DJPWbSF4tNGMbnYzKnfl5mWB6YjcBeh5sajLGpKQy9dgH+JjYhpwcD2EeHWhnub3oSYyUrfowFZsv9liI/NxG9Kmu0uwafdYmLKayAjkXXXa125eLCOWTbE8yYgzW48v0pD7jgdRss/uTvmKfeDEOL90fLevRXvt0b/i1zrssNp+PuG3n4WhTe+t50B3mE4lFsncsnGub+XjKltbzok+Ly3G+bf8qhpv2pEVD+2Q3L4ibJfJKmerW47kP8eoOg2kuTnKvCxTm5dpWrsqiZcSiChk8d+zamEt7EerzexFZWO+N+XxffN7OrcLYP0grSoS59K8epf/P95VRtqWfJ64qMsvrfVqjcu489Fhk2HjyNmkujQxasbj5wdmOnRIzHb9ixvZh9rGf7+mL4oZOiVVsIh/A7eVe7tTOE9uhL7ffkd39SvOAXu/irpTT2+8fbNm7db1Ly7YlDr1dPpB2Mcd0bGl8NW7apgiWe9keOY96LuVyikFR023e0Z/9bOebus0+mVPlfI99lHdDLhRfdF7svqmfzfjcNvsojO1WP9f45U7+fDgt70V+n1MklpMuly4NZjtyun2XpN3f/Y8XdJJQu+5c2y4ssrLEODAsGL1AnNwrD52QXDVh+gMriy/nZqc/fFN7TeK2eOqLuyuCXgbVSMSsfJ3M1t4fnOZHG8iBZfXb5e4OXJMRfeuFc7J/yJlw7cyzD/V8bXfTucc2+NQmT/d9pkZiYXnQ21Z7LS7i06yNM5uJv7iwy51El90S+zZ5yKLF+iQZIwbpYWLtlOkoxP0TK8/LobtwkQdBsa9PitmZcj6Lu+rJfXbaCwuT/JFXlROTEsfgm/uwYuK+Vrvc18GjPYj6QlvHn/qic0A9dv01R3w8ZR713PT1B6HK/XhM61hVe9yHma/qj1GX+Bc7vM+OHVOX9hhU/5xPYL0jy2Oj83np63JqLrbcK7nUba/6av89uzILOde8aePR7Zma3MaF26bxDf6anZ4bMe7q56p4XyndYRp0xDjO5juzOY2nOM9VLv35aG3XeTKT7cyqby1Wge3KMbswk1XZ5hglO/p8P5Nb+85itKkvcSntQv/Neab4GvVdMy9YG/Vhxtru78hWm32OTXIGnSvljA4XjmJeWuMwNje6q83WZDY2CtNyfcTceLUYbI+z6mOdJ7X/TIfLjlx0PH80l+Mzs8qtY2lP/5zTpm7jN+bGkM8p9sqpHHs++2ZBPw/zyMTWkofWpo1rnyc9f5tN4fnk42Xq3gXmLjfNtW532STq69a2Dvf5J67fJ8pT/n1snE/k3fmlDxd0NfHyTpwHXrydJFtPKGkWB0W4YSeq7/j2Ht/lbolVdwXdhjawbKv4uJz/lG919Htr+eXK0HFczm/bn1iGQZISZSbZBthmolc7NVE1gXMSDxtPi33jkUx2gWsbrMbk9d12Cev9pC/Zr74NXWVSSMxnjvqAL8xfTsvpZQzAISuxLXLU1gsxyg9EY+uvAzjj4FOdqHpuFF194lg7EeS53J32q4VZkC+69Vs/G6ce82BvXaCNWGoey+Sc+pgnds3HpOT6R/gWQUGOPwDXrPqVrfaBRfJrZn8XWA9GzuTXpTeYhDEcx4fPL3U8yDew5lgIK/smQ4+9XC8PER0nyWw7tfv2Db81HiOe5faG/TMmXe963IxcVV/T+A4LJNfrMfXzmY0Tr7ot9QOqwCDkgOTeRIxdkvbHt/JtoG6TjJnZA9zGpPqb5o8Qm8winas/Hucte8t1tdli636KzWGc6zMlzaGzWGfdom/9KXmdXy2fk+0jL+SDxJU+jf3k+ThlI36Webq0Ed2qN+Z7dqyea/u+MNsZj4HBXGSMkeWVx6gVvj6n+7MizBlXzjPKUjmF3NswMDGzVipDn396XWVn/SUG2tZ99DGl97bkOAfhsuHBav66Pi4jFiY75LeP/zSelUdb6O/P4Z7XVZ4+Q1ZrGNFf56Bhn+bmNbmsaxh9buzq3wA81d1iPr5Ne9i6l0NFVrB/KxeKEOExYuo8yzPi2nliw7GkY4/5mA+qrMox+bz1HN1Qr7HQeetDz+MN2fd2+cMF3b05UJLx1L+W/O6s+wcG1YEWBus/0HK7yGpfL1puF0RPCNxA4GfHx+pBeoMHuYs96K9acOWenH85gbbAql+f/eXS9wWWRZkUUvuNP3H3zyl8Iv4JSY/V9Sdj/1gk8eZaAt+Vc0+3jr42AL+r3e8v6D70CdPvCk63Vj9dKZ/C3fXijoKux42D7yFwR+PjSwq68Ilp3j34HqRo2SDwXQusmfrvKujyPy+Y2fJk1/xT/vv9IPXJAvIs7n7jfGM5ftdry2cJ+u1+/vKCrhYPZSueHaHbk4CeEIAABCAAgfGKsrwKBxYIQOCBCbCOfpTg/vKC7lHCgB8QgAAEIAABCEAAAhCAAAQ+ToCC7uPM6AEBCEAAAhCAAAQgAAEIQOAuCFDQ3UUYMAICEIAABCAAAQhAAAIQgMDHCVDQfZwZPSAAAQhAAAIQgAAEIAABCNwFAQq6uwgDRkAAAhCAAAQgAAEIQAACEPg4AQq6jzOjBwQgYD8VkH/w8yex/Ozvz/2k5+iGAAQgAAEIQOC5CVDQfUn8v+a31+x3QD71w7H3vqgdX49737+l94mk0ELnG3836hMW39C1xPHOvta8sOY3dG6IJV0gAAEIQAACEPjtBCjoviSCFHTXYXxfTod72tW5zuoPtdKC7kMdaQwBCEAAAhCAAAQgAIGPE7iPgs4WwYel/ED46kfC/zsvx3b9oLtX2qdfb4XV2+hzfPu7vL+6bC8m5u06PpWtP1perr+cltOLy/NdihvlFYVd13E5vR4X9XHYfViKH1v/6+2abb2tstsqpKyNc2n2OE/z97yclZ/I7HqWZbHdRY/TwbmoxaWYE27/r8TouByNZdPfWeQ8qHw9P1Tv4HcI7FRzOR72HZfzW4njeSlEC7shr8XxT+stvh62+JWm0u70elp628avRk58aHE6uZ5F2Ry2d5oCH4nZIrI1XzOEq9o1BuZHi5fvfAV/GtN2r3I8jxi/vkfm/w1jer4e1uyPL0eZAz7pV87tYQJHEIAABCAAAQhA4GEI/HxBZ4uuUQDE1w7LQnfc64vvtoCuC+K26LOFZTvWgqQvGuu9unj3hWJbFKsNeuyFQJJ3nd5m9568mR9NVy1A1L7DMgqAkX+zdtXHWiR4H1tEux+jeytGpDjQRXsrIIK/XtjYvdZPj1uRNH/9TeIZfPeiaMRa88COvaiw4ke5eB/NA3Uwy4450nPKutR71d8r+bVizIvCWqwIF42n859w9f61OHSf1A9h1/LS+5hO52OyZ/1r8eqFbC2EZ+0in9CuyHYffGxIQdcL2RZbzwG1T+PqhXDIL/fD88j1XfLL++20U5ocQwACEIAABCAAgUch8PMFXSapi0Y91nb5ui3iyiK6LkZ9oVsXjG1xHXZjUrtwTxVNdqy8oAkL2iwvLr6DRLVdj0sjOY+FRluM+6JVBM7amf+dSWtsi+zBoovI18WGupgffYKu3K8LjDs3crlQHgW69Z8VFK2H2BELuiExFAflsvQZrZo9XhikdsGnlj9WYNzKT7l0e2p+1MKlGJDP1VphpJeV3eq6FvtbsvP1et7HSpeZr4s93Z/aWONSOHoBF+K8O050hzTrvdbeYt81/ncHOYAABCAAAQhAAAIPReAuCjpbDPbX8carc6sFe0O/ut4X0WkR2K/XjmPxntqlgm7LnlwwjAVtlieL4CbbXxe0v624WPnRF8xVni62hy7Nv3W77qMVJP6Ko/+dFFCJUfCx25P5+a6XF3vVjuDjpPgMC/2sd4dT0a4x8cLImGje2LHbNDit2IlfnZc1lzhey8/aiU71q+u5UHSsdE3iVOwz2S2WvUCtsgP7vis9GFT2WvhoMaXthIFdllzu/tT2yjVylD6Tgi7bWgvBmd5r7L3Wf/WRYwhAAAIQgAAEIPA4BH6+oMsLYl006rEyz9e7jLQo1MV1KNpqu1EwyXmX1RSqLj2eLFS90AiFywfkaTEVF8g37tD1Rb/CS8eJkS3SvV/yN9gk/UKfwCXp0l0m6W+t9jgFMaNY0IIiNEkn2b5tzrU46Dt0ziHJC6fZDz3v/FJehh060WmCh39BTzoZvl/Xfr0rKDkfZGdbRX73p3Yo+eC7ciE3NM4hH7JsVZzvzc/HmPW+Yp9f4i8EIAABCEAAAhB4IgJ3VtDVRVz/dzq2OB67FX0Badf90/vWx3aE0iJQF9eTgm6qJxQWyZ60oB2L6qRXF7R78qxd8qMVESbbX+8M/qbsVPmtXV30loWuy/YvBZGdJBcTZO/7GxbtwjYWTFWvL/RdTf0ri2/pb/fUj1bweHyCXmPW/DAZKT9mRdiOj5ZT3sdscGZX8mu2eqFh8jxuRZ7G82o9w6fOL/EqzINO3xENvvbedrD2daInFJulm8TM+HifGOd1jLxdfAV3livVjzyG2ocYgdmQqZ6ZX1f4r304hgAEIAABCEAAAo9C4OcLOl+82+tyx+X8p3z7oRQebZGtryoa/On1tChMi+Cx6Gzt5Jv8xu5aK2pm9sgCvdhgi9NZIamL4A/4dyrfzukL2PSaoS/eZ4lnC9r2uuFZFvrhFT35cpksw/xwf+UbIHUnq/QZ/Pz1P49TW9w3G94Tp6FPioMUG99Bqq/j5TxQ+V5wNamaB5o3Q2k9knb2TZSds8h+PS/nF5HfiqNuk3xTYxAv7Y5v5ZseG5fAQfIq6Rn8y7c+vkcbRJG280Kx3hbZ09ctXUhsN3Le75e/tc24JzELOXlaSq554R5yI+S/jpOqZ+Tr2OFb6x22+Cuawya1d92uj5VVjuV+nEMAAhCAAAQgAIHfT+AOCrqfgJgXrT9hAzp/jEAotH7ACis05rtNP2ANKiEAAQhAAAIQgAAEfjEBCrpfHDxMv5HADxR0YVdKXoW90QO6QQACEIAABCAAAQhAwAg8aUFH9CEAAQhAAAIQgAAEIAABCPx+AhR0vz+GeAABCEAAAhCAAAQgAAEIPCkBCronDTxuQwACEIAABCAAAQhAAAK/nwAF3e+PIR5AAAIQgAAEIAABCEAAAk9KgILuSQOP2xCAAAQgAAEIQAACEIDA7ydAQff7Y4gHEIAABCAAAQhAAAIQgMCTEqCg+7WBr7+l139E+df6geEQgAAEIAABCEAAAhCAwK0EKOhuJffT/cpvqb2+/7QV6IcABCAAAQhAAAIQgAAEfpAABd0Pwkc1BCAAAQhAAAIQgAAEIACBzxB4uoLu79txORwO7b/jcv7vAr7/zsvxcFrOvV/sM5fXXod8qbpOfyY6yg6b2/FyXv5ak9pvtH9fToeozyUVvf11yyLr5bycX92v0/Judtfz3XYucCm6vP9h7P6ZnONyfCn3Tsu7tZvb1EVxAAEIQAACEIAABCAAAQh8C4HnKuisiCpFSf3feymALr226IVRa2cFnBdgm/JqYbYpu8mshZu2/URBdzgsQZ4VX8uyqI12fOiF4PC/6u2FXyvirNgNtn5LTqIEAhCAAAQgAAEIQAACELiSwHMVdAmKFWdXFXS6I7W/a1aLuFQgJb1WZHlRWO71ouszBV0sVGNx1u51Pc0gK9ZGv2Gm+KjF3WjAEQQgAAEIQAACEIAABCBwBwSerKCrBVN/1bG8YnhVQadFjxQ7y5a8XJjFSIddvnKrF1a5n+pay+hFWynUpEAsO2/9XpfdCkdpN/R6USmvXPqrnto/msAZBCAAAQhAAAIQgAAEIPDDBJ6qoMuF1E07dLJjtS0vF2YpyqkA29yhE11JwlJ096Itydst6PxVzCKw79iVwtFf2Sw3pJCkoMvoOYcABCAAAQhAAAIQgMDdEHjigq4WMdft0I0dLy3i9LgWQb7jd6GgsyLJC6jaVl/VdJtMvu+UpZS5vaBzvcsy/g1dLOiCXgq6RJ5TCEAAAhCAAAQgAAEI3A+BpyroetFl3+Z4Wt5lZyvsaml8WkFz0m+Q7PdbUbiSd6GgK/1td6y94rh6DbJeP76dP/Qtl/WbMmuh1nfvtCAzf0/Lyb6x8hBe06xFnOt9X84vrfDT/ua37N51DhxAAAIQgAAEIAABCEAAAj9B4MkKuhsQrwqaG2TcSxcpYD9j0t+30+Wfe/iMAvpCAAIQgAAEIAABCEAAAlcRoKC7hImCLhH6u5xf/Xfz0i1OIQABCEAAAhCAAAQgAIFvJUBB9624UQYBCEAAAhCAAAQgAAEIQODrCFDQfR1LJEEAAhCAAAQgAAEIQAACEPhWAhR034obZRCAAAQgAAEIQAACEIAABL6OAAXd17FEEgQgAAEIQAACEIAABCAAgW8lQEH3rbhRBgEIQAACEIAABCAAAQhA4OsIUNB9HUskQQACEIAABCAAAQhAAAIQ+FYCFHTfihtlEIAABCAAAQhAAAIQgAAEvo7AcxV0X/TD2rfj/7ucXw7L6U+R8L6cDsf2A916/Xbpv7Hn37fjcnh9r6b/eHx+I0FshgAEIAABCEAAAhB4ZgIUdD8WfS3ofsyIH1ccCroftwYDIAABCEAAAhCAAAQg8LsIPGVBd349LIdD+e+0tL2h7aj9d16Oh9NyLjtJ1sd31VqXsqtk1w/L4eW8/HVJer3r8Z24+nfI8+vL8v56WI5vXcoSCh6z5Qrbt9qlHbAg2+0uf63daTm9uK7oc7HRfVZbrV9jcXo9BR6myzn5zqQyKrt0bl9jPmITi9+5/thG3eEYAhCAAAQgAAEIQAACj0rg+Qq6wyiYrDDw1/22IuzFUWtnhYkXbu1efYWyFWnWLhYXpU8tfEbhtvnKpRc1Zk9un2x3O4LtRfdGuyB7icWiymiF1tqv1sf1Bv+r3tDH25m8UTwr91BUdvvUby8wa7FcC8MmK+hXBziGAAQgAAEIQAACEIDAcxB4woJuFBaLFQRyPou5tdEdKinWegHSOvbCRdoEmVqoaBu5rvrUPj0uMrWd6thrl+wNxZTK6H7Ui6NdtVN35UpxZudJdt9tU7nteMhLRaXI0KKv6yj/8tD1NVnabqKKSxCAAAQgAAEIQAACEHhoAs9X0PmuUQlrLn5moV61GYWYFSZb8qxfezWxt5HCbfNLUUYbLXzq64xafA47gtmpGAs7gVIwlT5BvgrZbFdt89ct+9/X9yqr+xl31ZZl0k93PH2XVPX2Y/WzytGCctMH9YdjCEAAAhCAAAQgAAEIPCiB5yvo+r9na0WHns+CbIWZ7NDpeS86WsdyPpE3io5RrIVCqxU89XXFUWiV3eRrAiAAAAZWSURBVCe/tio+1Q61265L4aftkr2bu1up3dx+VZoLuHhu/aXYG/KGryYt6G2F3J/zcpS+7NAl7pxCAAIQgAAEIAABCDw1gScs6EaRtFnQaEpYQTT+TVooTtq98O/Gym5TKqpKn6v/DV3R3eTG4rAUOMMOs10KnWHyTjsrOL04re36TwYMAePLSdq1VQHW9aquehxYtHaBme1MHvpPFajs/Jqm+Sg+F3OsvRfNgb86wDEEIAABCEAAAhCAAASeg8DzFXQv56V/y2UvTNb/NquHvxVnp/7NjrL7VRpZkZRfrfTCI1/XHTp/DbEUWHq9CG33/FVEN6YXekVussPblL877bxIKv3tmzuzjtI/7JSlXbT279j0dcuuurM4Luc3/ZbLVjzat1yelneV732KHXrd7fBvxOxKaqxc/3j9sujwYlUacwgBCEAAAhCAAAQgAIEHJvBcBd0tgWwF3fgK/VuEPF+fq3Y/vxjL37dT+6H2LxaMOAhAAAIQgAAEIAABCNwpAQq6S4GhoLtEqN4Pu4LpN/muk/DJVn+X86v8DuAnpdEdAhCAAAQgAAEIQAACv4EABd1viBI2QgACEIAABCAAAQhAAAIQmBCgoJtA4RIEIAABCEAAAhCAAAQgAIHfQICC7jdECRshAAEIQAACEIAABCAAAQhMCFDQTaBwCQIQgAAEIAABCEAAAhCAwG8gQEH3G6KEjRCAAAQgAAEIQAACEIAABCYEKOgmULgEAQhAAAIQgAAEIAABCEDgNxCgoPsNUcJGCEAAAhCAAAQgAAEIQAACEwIUdBMo4dKVv0NXfkj7+PY3dF2f/F3OL4fl9Gd95yev/H07LoeXG3/D7Uo+1/p3HcdrpdEOAhCAAAQgAAEIQAACj02Agu6L4vubC5FPFXRfxM/F/GaO7gN/IQABCEAAAhCAAAQg8F0EnrKgK0XD4VD/u7hbpjtQf07L4eW0nF68/3E5/7csVhA1eX2XrrRt18bul+zQmdzjcjRZp+W9RNyuuex2bZYJW+3MvrHTZna9muS1lG7fcTm96g5dtdFtj3zel5P7dFCb23HTf+58T8u72NrZLJHZ4VA5FiMp6Nah4goEIAABCEAAAhCAAAS2CDxdQReKHCs2dgqnQk3btCKoFjmt8GkFUyhErI8XKdouF3T6+mUtlrzosaJz+hrkTrtrC7pWZAU/mi7T60Wg+Rv98AKvc7zExws/kyWFn19vRdxhxnEra7kOAQhAAAIQgAAEIAABCBiBJyvopKC6NgFWBcsoAHtRk3aW7LoWY73QEv2h6EuFY7Et33d71Z7cruupjdU+725/U7uln9di0Yu2Zcn2Dt+7PLWnyEmFmheooTDuneuB2hkK49SOUwhAAAIQgAAEIAABCEAgEniygi4XLBHG9CwXLFKobRUidr2/mqivUO4USKkYWpZiq++OiWV77XphVturfSKhviIqfuSCzl+39L9WlK30Nok7fEJxpu1aoejy7S87dBoijiEAAQhAAAIQgAAEIHAVgScr6KSgugqP75TJq4JSCGnBpMWLXo9qRH8ocJKe0snuTwq6ab/WLhV04fVJNSS1iwXdRGe354odOuGjTHSHzvhIO+UV+qjNHEMAAhCAAAQgAAEIQAACKwJPVtDVL93wf6+1WTQpJi2gUiG0WYikYswKKytgdgo625EbP30w+qgx5fjCv6Hru3q1Xfc1iNGdymqTf3GL6fV/Q2d++L/z0z7tS02KTzt8QnEm7WJBF+0MfYLNnEAAAhCAAAQgAAEIQAACmcDTFXT+78L8dT//92KbhYQUImMnq2LUgs6OD6Mgs7b9tUvf2dor6HxXTl/RzOFq563Qqj647HrPCjLTe1rO5fflvDjLouwVyqrr9HZejn3HrBV4zfb+b+BK/5neHT6BqbZrRWm3Xwrl0acUehu7hdkXziEAAQhAAAIQgAAEIPCkBJ6woHvSSOM2BCAAAQhAAAIQgAAEIPBwBCjoHi6kOAQBCEAAAhCAAAQgAAEIPAsBCrpniTR+QgACEIAABCAAAQhAAAIPR4CC7uFCikMQgAAEIAABCEAAAhCAwLMQoKB7lkjjJwQgAAEIQAACEIAABCDwcAQo6B4upDgEAQhAAAIQgAAEIAABCDwLAQq6Z4k0fkIAAhCAAAQgAAEIQAACD0fg/wP+V0snhIph2AAAAABJRU5ErkJggg==) ###Code valor1 = float(input("Digite um valor ")) valor2 = float(input("Digite um valor ")) operacao = str(input("Digite qual operação você dejesa fazer A-Multiplicação B-Soma C-Subtração D-Divisão ")).upper() if operacao == "A" or operacao == "*": calc = valor1 * valor2 elif operacao == "B" or operacao == "+": calc = valor1 + valor2 elif operacao == "C" or operacao == "-": calc = valor1 - valor2 elif operacao == "D" or operacao == "/": if valor2 != 0: calc = valor1 / valor2 else: print("não é possivel dividir por zero") else: print("você digitou alguma operação invalida") print(" ") print("Escolha uma das opções a baixo") print("A) Par ou Ímpar") print("B) Postivo ou Negativo") letra = str(input("C)Inteiro ou decimal ")).upper() print("O valor da operação é {0}".format(calc)) if letra == "A": if (calc % 2) == 0: print("O valor é par") else: print("O valor é impar") elif letra == "B": if calc > 0: print("O resultado da operação é positivo") elif calc = 0: print("O resultado da operação é neutro") else: print("O resultado da operação é negativa") elif letra == "C": if round(calc) == calc: print ("Esse numero é inteiro") else: print ("Esse numero é um decimal") ###Output Digite um valor 10 Digite um valor 10 Digite qual operação você dejesa fazer A-Multiplicação B-Soma C-Subtração D-Divisão - Escolha uma das opções a baixo A) Par ou Ímpar B) Postivo ou Negativo C)Inteiro ou decimal b O valor da operação é 0.0 O resultado da operação é negativa ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA3IAAACWCAYAAACb6rroAAAgAElEQVR4Ae2dX27zvI7GZ09ZV7OMuTUKnFXMVbqGWUDR61nBC3QPGpAipYe05DhtkqbNc4DvxJYl/vmRksXYb/pf5an+91lOL4dyONT/lvfq/MfroRzfPtck/p3K8bCUD7nyvpTDy6l4r8+3Yzm86pWixweQIX1Nx8HHl6pbdaJc16ptbpvp9Gv4udFP/Kh6l3IC+3C4HoN9y9upHJtfkU9gMtKLfiQ+gSn2Kx9lQTYwro+RPsdy+reynA0kQAIkQAIkQAIkQAIkQAKllP8iBRIgARIgARIgARIgARIgARIggd9FgIXc74oXrSUBEiABEiABEiABEiABEiABPpFjDpAACZAACZAACZAACZAACZDAbyPAJ3K/LWK0lwRIgARIgARIgARIgARI4OkJsJB7+hQgABIgARIgARIgARIgARIggd9GgIXcb4sY7SUBEiABEiABEiABEiABEnh6Aizknj4FCIAESIAESIAESIAESIAESOC3EWAh99siRntJgARIgARIgARIgARIgASengALuadPAQIgARIgARIgASfwUZbDoRxeTuXz36kc5dMv8ZMESIAESOChCFxcyH28HspBFnn9bykf4E68dijHt3PLv90wmjyTyxsHUP3bhy1nbhzzz7cj5O2hHF4xcx+bsdr+i+x9bJq/zzqZI+fX0t/g12c5vRzK8n4nW6UIOdxR3x3cuksuvC81394XXTP/Ru7dIThUQQIkQAI/QOCyQk4WdthQ6ia8nX/lJl0Lubvd2H8AMFVuEZCcOZbTv60+17l2lw3QdUxdSWEht0LyVA2/OXd/MlDCbXmN96yftOcaupkL16BIGSRAAiTwdwhcVshlv/UbO38qJ0XZpZvyc4VcemLXikYxBK+5DaUU+xbRnxoOi0T9pjaN8SdCMv7lVE7tyeNSPuybXZE5/HbS5J3aUx/goPKO+s2wvqoipqONrlfZgk+vS+ep8o/l+CJPLM1ulIHfOl9gf3xKBTanOPd+x3J6q3zkWWvcVKRCHpg1m4Pc2j/GKbY1XpmNM5D2GYeBrmEuYL+tOGZdEDfl8FKf+NX8AD9elrL4UwiT354FWqz0ubUe176VicUD/bP87/GQfIC4BebQjj6azpDf7TrY7a9W6bXafjQfK0fIVekb5qYLrOM6dxkDfu2cZ1N/XY18bvpVc9VzLcxh5DvNK1grSmTUfStll51p/NSWnF9vp/q6W2A9isvcvo5rIy7DOQCxHsa5SpZ54Iwrl2wf6B3FC/J3D5fuz+hIbJa4Qc55N9AT5o9fl88hh94Bfe22Zn+lP7LLazrklfFo6+rrUu8ZMA8xv6rOqs+Zh/VyltfdBfPRY4brxYbclL89/wecURePSYAESIAEbkLgW4Wc3lh8wxFujvZ+/VmT602u3wxwgN0U/fVMle83m3rNx6kdssEIfWxj5fahaLtJTzfUrTDyGxoWT3DzdZnuO260Xa/eUN1u3yD46z4m38bp5gBl+KbX5Lu/m37aDbz23bBf+3VfULe7pZ+Bqckz32RM3sRUvTGuKtt5BOFVnvsVbUAZsd/F8cbNlGyMcMOO9mzFccQB4wb+hXmB8VAZnbkXH62Qy3mHuWDHtXDtMjqzyEj7+Rj00ezxuPXxVuy0MWv+vVirulxGzseuLtmEm2rk0jaH5hfmJh7blwfdjq6pcun5iH6FeFiMe572uSn9qk/i+6jdGHms1Tbrt9POlrtquugxn4NdlZv7qb40e7bikmKI9gGqYrx93tViw/wwOzy2Qbde61xQZPBL+4lf0Y+gV21L66CzQJZBZ5aHFqRjkQHzx/0JNsgQ6BckGAeU4YXSPJ/W9q3y0OPYGJlWsQPW1bZGmR0eK5HnvqBsLxi38xo9rLa6XOQwl7uR/yiaxyRAAiRAAncj8PVCLtxg/cmIbzDXN7SxR3VT0r5R3Npk4yYw3wTHwtu39LpRxj55PNxE64bQ/ag3Lr9x+re0rQB0mZkF2iqyfYMi/VGXn+v1uHlcb67GGyg1AWUmfXjjn9pfrOhtm3h3bFAMg64gGzeHyYa53rSZ6GrFov7vaXK8Qj84AdugdVXg60bFN+PYcSOOYfMmY0BX3PiA3SobzrMfICPnHW6M8RjNleN+DfTkTnj+pdhU2W0eoDw9zrnrHbJN0C/ZEXIpc3JxwV9olMMkr+fc2vauC+wJ4mbt2Z+1bBfT4+It9XPWjvmkPcGfmF/rmPe4iN1eHImUbK/bktvB3zQHor3Qz0XpZ5bnFzMf6Af+Se8eE/+yq67Bqh/nqozDc1eVPkVeK1LEpzYGbEhjwmni0Nfj7BPanq9lXnCecxz8ivGO9gZOwWDsB3pCHzzB/tiej7EfHku/et7zL4/lOQmQAAmQwK0JfK2Q05sQ3ChHVqYb9ahL/BZx0ENl+Ksf8jn+9htH6k1QC0Ib127g0GvjJpo3VOHGmce5yFU73EjhBi3dVxsTH6ufWKyBDO/j+vzJxMjPpG9uf70JhyJ6VshhO8gPsnHTuIobxA588I1A2HChT74pVXm9uEYRu+KNA+R4xdo6rDj3GGjckm1eoEcOMgbnBmx+snxgmfMON9B47MzGcau667Uxr6ynFzzOBeeb+wE+OMtVjDF3vVMe13lmOwLDwGlfnmZ53a/BeImj57TqGqwVw/Yc2/UmfhwX51E/MWc996frQi5yfA1R+0d8Y/zEnvVGezTO4hfYY9EotkP8gktrLvVy1gPnmPvZR7Bha94FE8JJtSfEwu8d2g+vT+YJ2FBFu+/Vhyjb8wn8k0EqA+eFy/BroBt4hLmA6+qQU4y351PVPcjrKSewRe0eya3cmo5sT5DNExIgARIggXsQuLyQ0w0c3pwmZsKNadLDNga+Wcy98k1j4yboQ9U2uCHNbEg36bCJSmPCTTWNc7WrGzbewJO81Yaz2Qz+qWA4z3rbGLMAdeBxvtGCnOBz2Bw2r/Qg90P7Axvd5Fkskw1RIp7hxqce900nXAO7cbTasvW0M3SGk5k8bYfchnPl4Jt/ECWHkQPYrf3gPOkNbBMz1Lc6hi8n8FowK8lr16Q9M9PzDf5pM7n+AgZytSmSA/BdToEn5pFcCgyBU2C0kafDXAC/cPMZTISTGcvenvwx/yRnd9sJ+kJhlOMFcRI2rfA0VvU82zOLQ1C6HRdgL6O673I2k5/tcH25Hc6Tv5vxn8w715I/cyzkevQDRiQ72hXMVWls5+BD6+wH+VrmBecjzjavA4s0h/q1qmu4Xro59jn1Hfs1Dltys3+5LwrkMQmQAAmQwD0IXFbItZvZ2rR4s9i7wMuNbV8hp/Lbt6pxnF6TmyBsfnwTOXwNR/1wvdXW1q/d0KqP/cbpN3MoFB2DyfObarNHrid5/k1p3ViabtuoiC7fsAV/000/blq/Zn+w0Yow1+1u6ecGK7XXiwpl70wH8cHioSmotiMLZ1h93JB3SbxTDJBzM0UOtuKo13qRh77LcbPbN40jLhssc55ofCwvVscuO8QNNonii/g82gCHONUCCouC5kfoh3ES4aP4djadaczvkNODmDTdyqnOs915GuxFv/JT8Gq76gI9YrPo2mz3Isr5q87q9147Y66ILbaejHID1wXvF9jnuES/PZ/XRexGXAZMeh6lHOuB1kK89VMZwiXbB+c7498LqKoM5x2oh8Oqo+WSX9FYCevkg7SP5onFw+VgfPHY50LtB/6ZXrUX57Hfw0bxtryKORJl9mvJT/XP1stBDN0Px7EqyhuHDbkb+d/l8ogESIAESOCeBC4q5PQGNnm9TIzWm5Zfh5tjv/lk1+qmar3RqP1Q3/Hto/+bKblsN8L6iosXV/UmVNuO5fQuf0fIr0XdXXb8Jca8oQ62pxtkk2jtS/vVNtApN0jf+PkAu+mqneFa5SHtR/2VOtscr/Ru+Jn0ze3vuvQJTRrnpuon2Ks/591sBhmvp434jDb5Irn60eIPemRztbbdX/dxvhscggO2mffcbPanTltxlK5oH+RVsFNFgl2Jy968034+h1yvngNzsUGuuT/eT/10RslH7Q+/juljs3+Bf4qTF6vGczU3UaUyrXELOY1229rRNpsh3zf8RT1bfm2sTT0e8QeaZu2es3WN8S8axJCddoZ+OD7lF8Sl5lf/1crGKc8f5QG5N3yt0qDN4hLY5ydZ4uP2XI5cct7A+e74Jy4w78Ic8VxQ+0c21vgoO9HtawHIcxH6aRyGa/o0n8C/JqznRcj/MIfiPSiuJ1Gm5+XKjzBfd653Mw7YnuRu5/+IewPBAxIgARIggRsQuKiQu4H+74n8dyqL/6rl9yR9b3Ta/HxPmI3Wm+lkM34VBV8UInbBJvOLUr427NbxvkUcpxvLryH49qifjN+3jd8Q8Ff92nCZl25I4BZrwaOu6TfESNEkQAIkQAK3JfC7Czn/VtOfXNyW1Vz6VW769ZvX/k3xg367+cMb5uG38PPIXHblKnFMT6anrw5fZtrVev9w/K7mRxb0V/3KfvL8PgSushb8kjX9PkSphQRIgARI4AYEfnEh12+S7dW8GwCiyEchwHg/SiRoBwmQAAmQAAmQAAmQwM8T+MWF3M/DowUkQAIkQAIkQAIkQAIkQAIk8BMEWMj9BHXqJAESIAESIAESIAESIAESIIFvEGAh9w14HEoCJEACJEACJEACJEACJEACP0GAhdxPUKdOEiABEiABEiABEiABEiABEvgGARZy34DHoSRAAiRAAiRAAiRAAiRAAiTwEwRYyP0Edeq8DgH9ifAH/TMN1/GQUkiABEiABEiABEiABEhgSICF3BBLbZS/WXaUPzjuf6NKPg+H4X/TP4Gw9+8RNdksTDZCEi59vB6Kc5djjZX08HiF3t84abGR2Pc/0u76b/q37b5hNoeSAAmQAAmQAAmQAAn8XQIs5DZiGwq59EfHZRN/SG1DUTsLuaZrKISN5wiEQu5c54uuf5Tl0ItrLdpeTuWz1D/8LYUkY3cRUHYmARIgARIgARIgARK4AoGnKuR0E96eqPXN+ZTj+1Kf+Pw7lUWezMH/1oVc/4PV8tTOnxSVVMjpOLPBnyAFu6w4HPUrpepYXuHJIBaTqsufGPYnR/kJlerDce6XPck6SZGqNoKMIgWNt0MRqzqP5fgCT6vCEyxg4Xr8c0e/XKB124VFjSHyC09QndfbqRyBeWe7wz+xMbDqhd3n21JO/+oTwBZv942fJEACJEACJEACJEACJHBDAs9TyGnR0DfuupkPG/TLKOfx4Vx1WaEIhZwWHPY0pxZ4vcjpBUp9wtNe4bPirBYKViy6DNRjhZYXh2oP9vPjYvJHvqu8/opi96nqddnVdvSv+xGuuS7Q3ShbAaiF0FY/samNt0L2XaT0Qk7OxNZmXxsz4uX90Cc8LiX70GyWA4hnaOcJCZAACZAACZAACZAACdyRwPMUcgkqFk7p0q7TXuRI9/q0qj+VqYWBnreNfyoWUvGB9oSixPrVp0JZRn86tCowsFBqhU11DXUFZ7WQ68XuSmbrnPVuPN1MupuIfDDrh340ljJYWHS9gVmTlXiF8an4C/aAf9iOtmA7j0mABEiABEiABEiABEjgzgSeqJCrm/rwYyWjp1I7AzAq5ILsgz35acXDQL+87mc29OIqFR/+tEr71Wu9YISCIxdhWlxaodMKm+pc15WcTf1CIafy4dVK/3djzb8uS9nga5jtiVrvI0f7+nWfo93Svq+Qa7ySraviD212/5rJwLq18YAESIAESIAESIAESIAEfobA0xRyWgRAQRGLgsvhrwu5XlQEaa146AVJuG4naE8oMLzgOVfINT0mUM/HhVy0HazJxWA7lyIGXp/EIjHrbWNMrpwD96Ztbz8oZMXuVpRd8ESujUm2ds4b/rnBMz/8Oj9JgARIgARIgARIgARI4I4EnrSQqxt3fxr2Fd65GArnWjBY0QHFQywmqw3+77qwkNNj/5l7lOU/3qH/RkysFhleQEZ5ao8XUFo0xX5D37VfL5a6T1W2F0TVPpMH/inHUKDV4vV8IbfRT4Qag/bvBlXRNZ/Ibfinuvh/JEACJEACJEACJEACJPBYBJ6mkPN/x9Z+jRGesEjB4gXV3vD0IsdHWDECv46oV1Kho+P8FT54tRMLORmH/bptVYcXVLGQw4IHfkHSzOvylnJ6O7ZXOt16/VQmS1n0FygP4UlaLd7qq5XHt49yelkXqlUWcjiW07v8YiT8u7umcG8/GWB9gVf+N3Jun7JqsU28BrFwtj5e8iP45/Y2md7ATxIgARIgARIgARIgARL4OQJPVMj9HORfo5nFyq8JFQ0lARIgARIgARIgARJ4bgIs5J47/tF7FnKRB89IgARIgARIgARIgARI4EEJsJB70MDQLBIgARIgARIgARIgARIgARKYEWAhNyPDdhIgARIgARIgARIgARIgARJ4UAIs5B40MDSLBEiABEiABEiABEiABEiABGYEWMjNyLCdBEiABEiABEiABEiABEiABB6UAAu5Bw0MzSIBEiABEiABEiABEiABEiCBGQEWcjMybCcBEiABEiABEiABEiABEiCBByXAQu5BA0OzSIAESIAESIAESIAESIAESGBG4HkKuS/8jbTPt2M5vn2WAmM/Xg/lcFj/p/1mlEspMu5cn43h97v071SOh6V8XFvjXrnC+hb6xR+I41XcU1s9FzozifXyXorkz+H16iSvYjqFkAAJkAAJkAAJkAAJ/G4CLOQ24hcKOduQayH3hc05C7mdBeItC7mNWF9+6aMsh2M5/asjtWh7OZVPK9q9kPsVxfvlznMECZAACZAACZAACZDADxN4ukLu1J6o9Sco0xi8L/pkpfw7lUWezNkmffMpy+ApjW7y7Smeb+y1IExtqydG4SnWZzm9+NOf+sRnbXfts7zKUy3ri0WnynMZE/+DTnuK5bKsUHG96IMULvV/UuC4jkN/IpXlenf5BLuq7WAb8kz6UQTKOHg/GftyLEexR9r0XIot4/QmxWW1VeLS/QH91td5Nj9FFrItvbD7fFtqgef5EwzlCQmQAAmQAAmQAAmQAAl8n8BzFXKH/nqjbtrDRnwfzO1xfTMv0toTvfRqJT698QKkFgjr8V4sBL1a3PSnQd1yK/awkGlPjWqBFQpJ79cFWFFlhYwVWNU2k23M1Afn14q02sd1VN/MztYHlcnxwC5/tXJDf5QSuQkrtSFzSoVcKPhabkQf9nH3YhQLwGghz0iABEiABEiABEiABEjgmgSerJCDjfa0sNjGqxt7fOKkx15UxYICJbXiwp7wtGInFHlYRNTjXuDhUzi8hlpwvLSDPdlfPXe7QQb2a4WPXdfCSBjO9IMcPdzQ711Rn7Th+VS/D7bP3M8vN3tzv8QJdYZ4iP07uOv4AUu3g58kQAIkQAIkQAIkQAIkcGUCz1XI4ROotHnfyzU8oRkNUrn2aiHoy4Wcv6rXPvOTrmBfLShaX3gdMJqQCywopHJRg0UeCgG94cmh9GnXcoEDAlQPvFrpTwTbWOgrh9ku6DfXH2Ws+vllkQ0x6K+uJk6gU4b2WO3hDoxdLz9JgARIgARIgARIgARI4MYEnquQ81f2BGouIHaCPlvIgRx8/bAXB6mIgP566EWF2OevLs6Krjx29aQMigyX62P0fPAUCfuNCiFlOPMhF3gb+oMdkyelU/0+2D5zP7+c29t5sh99XhVyA0YuXz6bTGzkMQmQAAmQAAmQAAmQAAnclsCTFXL9NblLCjIMwea4VBBIIeevUPZCzn6Wvj0pqsWP9yvw4xrthzWsuGiFnerpvnT7UoESCsCoR/1oNnQJ/ambP4FzPVW22xA4qD1S8MRCTgvZc0/k0qumKtcL7uBn1A8W25PCXnA123KR1c4TpxQ3Ge/xaLJEYbAnWMATEiABEiABEiABEiABErgrgecq5F5Opf1qJRQxuHE/R1839qt/I9d/nbEWL+tXK709FAgupz15q9prX3hKpc1WyExfq5ROqUAJhVwvROormlm+eZ6Kmvrkcu2P6/LXPb3odD+l/fj2ob+0qdeyXFOnH1Yg1TGnsnghJxel+HJOEDMcrsejfq1ws97tPHFKtsV8OMO9yVxZxAYSIAESIAESIAESIAESuBmB5ynkboaQgkmABEiABEiABEiABEiABEjgvgRYyN2XN7WRAAmQAAmQAAmQAAmQAAmQwLcJsJD7NkIKIAESIAESIAESIAESIAESIIH7EmAhd1/e1EYCJEACJEACJEACJEACJEAC3ybAQu7bCCmABEiABEiABEiABEiABEiABO5LgIXcfXlTGwmQAAmQAAmQAAmQAAmQAAl8mwALuW8jpAASIAESIAESIAESIAESIAESuC8BFnL35U1tJEACJEACJEACJEACJEACJPBtAizkvo2QAr5HoP7Bbf9D6d+TxdEkQAIkQAIkQAIkQAIk8BwEWMhtxPnz7Vi0wHhfyuHlVD5LLToOh2M5/YsDpe/hcKj946WHOvt4/a6NVy68hO3rR2X071SOh6XoGR5fheBHWQ4HjZHEaXmvQtcxvooyCiEBEiABEiABEiABEiCBmxJgIbeBN2zytdiwIubFCrw2VtqP5fjy3SKpCbzZwcMVcujp1Yu3Ljz4rXpqMb6OcR/DIxIgARIgARIgARIgARJ4VAJPV8jJhl6eyOBTmWlw3pf65ObfqSxvn6XYE7nl7VSO+oTORkph8HIqp/C0y5/eJV1WREjRd/CnT/JUavC0KNhlRc7Jnvzlp4LoV39NsReeIv+//1OfGsqx9xmPC5r1pPV7WcoSCtb4pKs9XUMR7YmmNZov8uStFVIF5SzlA/oUGy98Kye/Xs/dF5Eu8jpLf3Iqsu1Jn5kg/vjTVn0612KMhvOYBEiABEiABEiABEiABB6TwFMVcrrJH73Gtzs2tTBa3qUw8CKhFyOtOCilaOHjxZ4WatZfC5T+al/R8yirvsaZjLJxXiipLyYfj6s8l2/FpPtsdnnhU4sefJXRx0Xdo35VhhWKWuSWkn3pUta80A+3p45He+zYCt36OqT55IWZXsN+duwxAN+7PR7H3sIjEiABEiABEiABEiABEvhNBJ6okLvG5r3L6EWbtNVCLLZhUVTHacGSCrdVstjTJ3n+F/63GufFEci2AdmOViilQq73qwPl3Ass1D3qhzJ7X7ept9QjtLEer/6NmnRUH2eFXCzQmn4ck9RqAboq5NCWNICnJEACJEACJEACJEACJPBLCDxRISdFBhZXX4kQFCFScEmRAIVEL3jWuto16O8WaAEFr1bOn8j1Yqboq4hSQFab+uuE9vqhFjBgrylrdthroq0g8tcSdxQ+XUYp+toj2g5PKt0/+WxFVfJf2psNeA2PU3Eb9GO/9mM0/grmujANY9FAHpMACZAACZAACZAACZDALyLwRIXcuqi5PE4oQ4q1pei/WbPipxcJ2E+01HMtWELh4YUQFGipaGk26rj+CmZ/jTHraiOaXn/6JVe6jfHYr132RC4XrHIONqIp7rcXwHbtmoWcFov+OisWj82ODftaHx6QAAmQAAmQAAmQAAmQwOMTeKJCzv7dmj9xyoXRrlhh0VSP5UmYF0q5SGpP1qR48QLHCxrXp9e8kDOZUIx4t1q49R8pwaIFj+uTOu+H9lZJaKOO839rpnZ1X5peOUAbrV99ihYLuSpvUsjB0zLnJaJlzLWeyI04jArT4BtPSIAESIAESIAESIAESOAXEniqQs6fjPlriF5QYHGzHcNYGIVCKD3tmunCVzGrrl4QarH3Dn9LDY2xAnDBX26E6+KD+9WLl2ivdK82e6Fnxa29GtkKKpDrh11+fQrpfV2e6D6+fZTTy6QYbLq9aK2SZbzL8iJUf80TC14pJKG4DfHCfvmXL9O4/jqqe8VPEiABEiABEiABEiABEvidBJ6skPudQVKrQ8Hyi/2g6SRAAiRAAiRAAiRAAiRAAt8mwELu2wjvJICF3J1AUw0JkAAJkAAJkAAJkAAJPD4BFnKPHyNaSAIkQAIkQAIkQAIkQAIkQAKBAAu5gIMnJEACJEACJEACJEACJEACJPD4BFjIPX6MaCEJkAAJkAAJkAAJkAAJkAAJBAIs5AIOnpAACZAACZAACZAACZAACZDA4xNgIff4MaKFJEACJEACJEACJEACJEACJBAIsJALOHhCAiRAAiRAAiRAAiRAAiRAAo9PgIXc48fob1gof5z7EP8Y+N9wjF6QAAmQAAmQAAmQAAmQwP0JPG0h9/l2LIeXU/ncYC59jm+fpUgRYn0/Xg/lcEj/vX5sSDl/SWSqnvNdf2mPz3J6OZbTPzH/oywHP5b2Q1ner+eWxtXj4/GVv8Gnx6j7ejopiQRIgARIgARIgARIgATuTYCF3AbxUMhZsaaFXCjcajFyCG0bQp/+0g2LqfRH01usQiHHp4JPn4IEQAIkQAIkQAIkQAJ/gMBzFXL6ep88TTuW5fX8Ezl5EqdPi/6dyiJP5uR5kjyRy0WbFhD+lKmUouf+1M4Kh1Rk4JOp+EROCh0fO9ClVtiTrLdTOVrf8ESv+Vnl+BMv1fNy1CeKtb8Voa7Pn2ANEjs86WpP1LDj+ula90t8Eg6oT3jBGONzkielao9cBxbIfOJfK7zdrMb8oyw6/rOcXrefwvpQfpIACZAACZAACZAACZDAIxN4nkLOiqta1FhBsVG4zII2LOSsQKmya/GBxVN9LROKFhGeXtfEwqoVZWozFIjNKC+IsEi0fmmMFmDmp9oOPkdfot1NldsK/74tjus9VVcruESe2y7H/iQM24GJxcd9Vx0+Hn3C41IK+tctsSNgvLrGBhIgARIgARIgARIgARL4xQSep5DLm/p8vjOI4yIGChKR24oWfzpXixgcK8dYtPhxNAOLHrxS9eEYlIc9c8G4eprYOoMPrW18EAs26IO+B757Czkv/KxAGxaFoE8Ogx64hrZAMw9JgARIgARIgARIgARI4C8QeJpCbgsKDroAACAASURBVPXkZlYAnIkqFmO9qxQq9qMdWkDAq5HtNUEsOmKBFoqw1fhe3HR966ILZaiN/rqkfMITOSz+4iug8TXMrkuOqr7wIy+tyMKe3S+0p75GuueJnPfZLuRm/jVL9Kldl9XaeUACJEACJEACJEACJEACf4TA0xRyqyc31yzk8HW/TblW6Lz7ryjWLOpFDxSEeqkXRjHf8hM5ONdCEIoYsKfrEWkwRoXXc38lFPXlInj6RM7+DeHx7QN+pVIkiR9uE/oEOlPxFXXAmA3/3OY41lv5SQIkQAIkQAIkQAIkQAJ/h8DzFHJaTPhP3dcCwp9UXRJOfRoUnkaZrNYmRYfrsSdLrYixH0s59NcqRXcvsEZj50/kmv25kGz6op9dj2it19oTOi2Qut3IJBZy1cbpK5omp9mmgmTMLQq56B/azGMSIAESIAESIAESIAES+MsEnqiQs1cb7ZXDRX7xcfbK4UbEtZDD1xbluBVxNlALK3+9MhViWujENiywtGgy+fXJ1qi4qgXM8ir/Hi+/EmnFjbYfy0me/lkRhXrUUi+6zIfV9cbBijeVuZQPGQc/mtK66UHt2wrE1uaFnNsnDMwP+Ttye5/Ihdc8o39uB5/IOQl+kgAJkAAJkAAJkAAJ/FUCz1XI/ZkoQgH0Z3yiIyRAAiRAAiRAAiRAAiRAAnsJsJDbS+qh+rGQe6hw0BgSIAESIAESIAESIAESuDMBFnJ3Bk51JEACJEACJEACJEACJEACJPBdAizkvkuQ40mABEiABEiABEiABEiABEjgzgRYyN0ZONWRAAmQAAmQAAmQAAmQAAmQwHcJsJD7LkGOJwESIAESIAESIAESIAESIIE7E2Ahd2fgVEcCJEACJEACJEACJEACJEAC3yXAQu67BDmeBEiABEiABEiABEiABEiABO5MgIXc1YHXPw0Q/yD21ZVQ4I0J6B9+z3/o/cY6KZ4ESIAE7kLg36kcD4dykDXufamfd1FMJbcm8Pl2LIfDoSzvpch9TD75PxIggb9LgIXcRmxlQdSCTG50L6fyGfp+lMUWy9C896aoN9KlfITBO06GtpwZh7q+Mv6M+D93WXh5vJFdufbf76vy5KYr/7Xiv+mXHDuW079zhKOcw+ELeXVOxZ+4/su/ZNFcrLmy/M9oTbogSPdYB2Du6ObSvxiB9gssfpiusjluc/VaVmE8rhnniX0SD9ng46Z/0vX6zb88/mMguFbvuE/cjIHolvXf7wlXvBdIjvLeMg7/tVpvlhfbBuoX17YP8S8Btkfw6iMRYCG3EY1QyPkmpPWfFHLt+pmDr05YvOGfUdEuo66vjG+CnvAA2V3Z/bC5LZBPolMLSWk7dyOu48LGUm+4ewrAKzv08OJ+eSEncV2tQ1+Efud1IOb6F21+kGE3KeTQt2vGGeU+yvEN19Sfc1HW4T++5up95dz96OciQM1fI6BFnH9xLSJ0fvJJ7tdo/syo5yrkdCGq32ivn7ANAvC+1NcS/p3K8hafxxXceOvQuqH2pyvbGy7su5QPnTjHcnwR22yhRFvxyZ+0v5zK6dX88P5qg38Ll3zEG+fm+PoqhvvQigMcL3pMRiaSCfq3veFpU5no8Kddb/bKjz2h6t8U7biBzJhlw+Q8+6TxtBuxyNENc4qT2yivqow4qszKvrEr/VvvytVv9hIrP64G9s3uR1lU/2c5veYnwdGZPmZfe+9Vc2VJvPW6+pHyEXxrOWrCepyPZXk9Gjsrml76az4+Zzy/2hyxWJzslaCDboiAPRYvl8TY7Gs59LKU5QWfpsT5Ul9BMi7wOlLYuKN+vPl1sHBksl7lm2ybk+jLBlMQUg+zXss/mYNq3+tSX5UTPWgXjhusI9tzeMSnz52z8bK4fqAN4r+3u+2WIzhnsv8hBjgPb5w7Pbd73kRbIE+FfYtvZNd9m7R7PJGVxNHbDcjIntKevqR138aIvZ5//TW7md22tnm+Yi7loPg52rxxP2q6hzEzYervseay60b53ua6h58bvg3613ie9A0b5dRiKJ0nstQHXyP/u/xH793CWdZ0m/dtDUEZdh+DOTC8lzQ7cSzmV7YN7o/IC+e8DMFre1iqnTV/Fl3Huh7Mxep3M3pwYExG9xvpDXrC/SW0w/1y1j7dX4iSLZYDkzUX6z2jzh/Qv6lnIEubUD/mgeeR7wXrtVFedubftcVtrHHp61Nt1zWjzYO4ZrV57CIGn91OnxODTq3pi7mR1r3uQ7S3t2/lP8Ymz7Nm6MMePE8hpxPfk98C3RL1K/GpgcfNX0uYoGsiW/vgZIZvQNJ4nRS+6NpC7LpwwuGxL1pqH+qy8Wi3bz7q5BvYhOPFHV3gtguM2EdYVfZTHT4ph37WeLnPQ6JbzIYDbPHwmy36JMeeG8F3GDPi6JsYvWYc8dgXf5ed7KoL9/bWOg4Be+IFu2GbDfmantex7aaJ/PQY8tFugM5f88zjhOP8Rqn+mfzmaz13GfXGbfPR9Pk1le/fbqN8PPYC2e0Y+uhFdMzpoMft0zhBjnq7+oR22rHna+s3MsAYuI2gw+dnsMX7jURJm4z3PnBceUUf+7x3e43FYPxMncp1/9D2i+JV7dJ577J0fG0PuTQzxOaNs/LCpfsYi6y2qcR8weOduRN4Qx6IzdWWeU4Hf3XsgAO0B10Q22l7sqetV9re526wo3Gf2+0b6tH9YRyevrbLddHncQqxHeSP26w2Yl763BeBIW42nzyPhgZt+Dbs718s+jzZeV9XuzrnOp9dRrUBGdZjm4Nif4tF31wGburjli+oI8t1O9KcDzbX8R6DMZrKItg1usedua9V2abPx4e4InOLh+ZD9FHnAnBxpr3d/A06PEZVlvsS82rsvcpthXDkpTk71DORZfcLt7nNyxATz3dYG5MOjxeuzWH+ZHkzc6C9+jIvXlBXZdLzC8T0Q53r1QdpDON7Lzj6Sm5AzqskyaEza2xgg/H8Qm6A9Y9w+DSFXEh2IY83yy9FIi4+UYRcO5PsmlSW7Hq80R9tTZMk3BCCETU5deFAXWl8W1BWmyWYgDh+JzuZvG3RBLtye5/ko8kUF4ORPBAdD5FZvNLOuu7qa5MvY32zEHwHpolj8CuMaer0AHnjFbQF27ePwZ7cccOG2jXxxvjrWMjHLAuuZ3/6+Vp+NBHmCMiTPl2GnEG/KGDXHA5xQR/ThjcUBxhbyCO1yzecYgtcy6bV88wAfNlgOpaV9IHumDsbOQFjztsutvomSCwCuXvjBT6GmEJ7tH3qeX3q2N6K+IItWTSyyNfsPOeOd5u1Y64Gf33gKrfhAtozOZ7rBTkYp3CMffIx5CXqlm5y7pvJPEzPYWy4DjHS9nqua2zKH+SW9V0+54IR2+uHdc05OItdsHPLB+QOuR4sw/bMGK+FQcB62icMiGvUpbHNOvI5qJoz804Qf2tq+Tz1P+dQlOVFkbfKZ5MJOtq9HDtu3Ve8X7ILfdyvx4TN2Gn7+F4b8xJiH9aRDa7ux65Pkd+f3Hdmtb2znsVkrgS5jXtt+JBigPvdmdxZ++qel2U34yLr1vzAB89VyGGi6nEvFC6PUUpwTQqYCBcXctEWncRor28gRY8fi9G4QOgx2mAbMeyTxvekX0+mdg3Hi84kY81uLav2Wbc3HXjzy34NFui1Tis8R8xGnaWt+ZEmrrTvKeQgDmFhD7yqz/5q0/rVnVy4zIwdtVfZfZGFPsEGaG+H67HNhzxWeITNXOcVbzboy1p+3aRhftoNLOnrOSHGdl16Bq+JKUuIQXOtHVQbWoEe8qjO3xCX9oMzXWdj4jdPzK+za0hm0OVWFjjn4VqzPx20fMXczZuXqFPjgzY7L5SV1NTTDT574wX9QkyhHfkOzbDG2A98BFnSNei5cu64fcEWnRuDnLZc8/zCOYoxae0Yj+HxOpfdnnoPQBu8AK8xbDraAC/QcEydi8rPc0T6J74ooh1rH5PVxq51N24rmZD76LvHE/P37Jyb+9bsTQfNLmsPOTSL75YPeC/T8TjPTQmOTz4H5jP9M7kp73CNvDi2WQfabD56fuun3zMT33oKc9auN+4rHyWXvLjBdQg5jtrXc2RXLIf2xjVWunRZZ/SM5GWW3icwjfOt8dG+MEcGtoQ4yBzZjIUrn32if8i5rxd4T11LqeODTZv21P64TjXfN3Mj7vnyeNfv7Zv5v9Lj+bf27hFbnquQ20ymHeGRYA9vVDXZPWHy5nMoGScwHkvnPOlR7+iabrJx8okQmBwoH2WFBSFvCG2SCDMc72Mah6F3q2/GvFeboNYg53XRAXvlWtKZx7m89jnicsbGFqd3/3ERkyayPFeCHWBj4hjsgzF58eg3A7ccZHrTBZ9reXXwrL2Lrnr7ggznYL/2H57Dxs9ZeW7oefZrY44k+dF2uIF9IcYhLqtCbr5Y13Ef4d8xRrs6yfnRiIHpTD7XfJ/bozow5+A4+gg6t3jB+LH9wD13SLZHLjAO+oU+0B5tz4r6eewHuQSypHfQg4XcFouuJhxFnf1Sbwc79DL43rtL1k3e0IB2jMfkuOtF4TBvtRnij/cAHKL2eLEnFyZ2yKXMLchZn3T+aIf0Azs1ZpDreI6+r+K51rduEV8mvq07a4twbeu9F0K6hm3ISnkXGCL3VT8zAtsz43Z+iX6T28bCud8HE9uzsUUbRRyca5xd7q44QfzVNDjPdpnpq49ZP2jPc6THdoPlSpE1gFxp6bm9sVeayQJ2oUtuh/Poi9jf50y3Jc+zIH3HicjFArkO6fKj3h0CK6d75wauYcFIsD/Fs+e/9LlszQgqHuDkaQq5vFHSCQ7JtisWMMmivJgIOglg0g1lr2TBZJKEa5OrTtRWQOq1nnR9oYKFURRiP9SVkrlPWFuoXK+OMT14bDepZs/QOZPV+HY+lY35OpDbimG0OWzAJwq3mE2GSLPya09irKPI8uIk2AGLZuIYFl0Yo/4mDk32hl37L1W2vSDz2PdFfywr5ZXabGPA/jo26ghzB8fZBrH6B6xUSM8BOQ1zJOnDnAwbpK/EGMeonv7Kb587vknp88rnT8jz4KvlTovtiPKIgcdlg+lIlLRhzsFxyD3cRKLved7C+Jm6KZ+98YJ+IabQHm2fWZJYq1+4NvW1M+jBm/sWi4laldXi2/O329zbRIT2t3W/95Er0q/aOGufxRbbx/bUHGvzH9n4+hbWMsm/ud31vubzwNYIHz/iBLF0Bm6L+Nrmj9qF60ufh8Ev6deY+7z0OZPyYGSPrUF+H8GYDLs7I7/vhfHnOPW8C+sUzsEgD+6LyG0asw39M7kqy+2y+DlP1XlBbM2PEE/jFGJmtmzf10a2eFxHfooP0u59bP1rBfao3eeg+R/8HekAGaPkSLmoPttcqHk10jMSJG0D/RIXzAPpBufrtaLbu7LFY2x6PGYza7Bd52mY49VWlxGuB6YopR/fJzfqWuA27lpjg+24tg1ig3nXXXvYo+cp5CQEtmDWR66+2OWE2I5VncD1EXNPIl9AvF2+yfcFcyavJo8WbDB5a29LMn2N5FhO8sTIbzS2uLRfrWwTOPn3+tGfiqH8jcVJdOuktVdZxv4dy+kt3WwnLs5kjdurz34DxgXN7UJ71io3mK079xbNib5A6gVpawsbxAlv0Imj+NTsQ95+k/NXgtI4/6a6+d0tu+AIfZcc7Lk9F2K84RcVmw3BfpOgbf5qRZTf58RS9JcMlV2KZ9voDuZI0oc3qdEGqc7fNC/mjkJOV/tanLy4GeS733h7X1Og+TLmsDYhM5BcglzbYLqW9YVCLviXeK3ycKQx5lVjsTde2M+5SW5Ae5g3IxNam89DeXpy6usryJKut8udPr/R5p77ch3XfbB39W2v5w/cIzAes+PZ+uxsJY9x3Vd2MYY+x+d2p/sI3l9aLOIBygpFWMg/8NVi1n/1FNYT9N3VoH871ja0J8bEBcZPjSf86mvLc8+ntj5AfFPe+To+/NVK7esxN19xvPoMv44IzDd9GckNzNOcF7eRJeiJROAMdBzf5Jc9PVaY30vRX6bdlGdr4eh+I+pAT3+tMtnbdG+0z+bIVizB3XCYclFj0fYF871SkIEnwcdBHkhf7VOv4ToT74N5nYu29L3L/r2t6PJXEeUT50DP7donXkMH/fhOuRH2VrC+TNtT3kC+bs4zd+uBP5+rkHvgQNA0ErgvgVxkXEN7lXl+ob+GLsogARLYR+D8nzHZJ+dv9oob5h/wMRUMF1kw/NNIF0m4U+db3G/uZDrV3JgAc+O7gFnIfZcgx5PAryRwpcUTv+G1JwK/EgeNJoG/TAC+6f/Lbn7Ft19dyPmTJnhS9BUGtx9zpfvN7Q2lhrsTYG58FzkLue8S5HgSIAESIAESeFAC7bWhh9/sPyjAhzWrboDlVTh/ZfZhTaVhJEACNyPAQu5maCmYBEiABEiABEiABEiABEiABG5DgIXcbbhSKgmQAAmQAAmQAAmQAAmQAAncjAALuZuhpWASIAESIAESIAESIAESIAESuA0BFnK34UqpJEACJEACJEACJEACJEACJHAzAizkboaWgkmABEiABEiABEiABEiABEjgNgRYyN2GK6WSwC8mYH/QU/5gpvxsOfzhzF/sFE0nARIgARLQPxnjf9ibOEiABH47ARZyvzyC+tPSj7bRxr9ZNDt+NO5o501t6z8ZLT8bfXiI2Enhdiynf+b4+1L0j3rb34i72x/4vlYM/upG5Vp8Un7j39GSY8/L/3091DxI/b9y+pDr1AWOCBf5iXf1o/2Mv33hIfNY//uBzTHmBB5v+Db2ZWPAFy9hXn1RRBvW/oTCDf9W5W/JUbGzrc/h/pHyMVxzlHL/gbXem7/1CX8HTNbeod4dCiR/dWy6H+0YOu+SmOB9bj7oR69szU+59mW+t/DqO/EupWz5ummu7U3qunu9+9Smzge+yELugYOzx7SHv/ns3Fzs8fWmfe5kZ7sJqzP1Bni3QmkK8Jo3zqmS8xeuFQMWcudZD3vcYpNXFT38OjXk0Rtxw1Hna90g4txVHw8/UMy5mTvnz9oXF3DdT9GDfL4sXfxqxcGN1kxl92Cb5Amwdg+RdS59qYC87zfnoJCb2LyrucVZ5taV5tHOObHLvjt1ms5P5bOU5eqF+J0cG6iZ+jro25r0/o5fRtxoTWgKH/+AhdxWjHQROJbji3zjaguLto2+ga3JtPqGQJLu5VRO/k23y1G9cUz7o562+CxtTPqDn5rIYsOxLK9H+IZmS17yI/v9FTvtGxX3ud1UzP6Pgt+GLeWjtVc7m78mx29CdUPkjPuErRuDU1n8G/DXj/oNuZ73fqVscVjKmmuyU9hM45zByXnUd8k3Zmdvti3Wh7K8ncrR88fi9WnmqJx2U6/fdHlcnKt0FYbeXuOFtneG2A/Hz7mgnPMbuKH8lh/VqVkemMvxA+K1vC59vkovYNhik3QVzdUd/ketq9gHViO9nusvR41D6B9kz3Kyb3CmfICFrBHtSeukvc6r/ysnXedqfsjcrO2eYQN7Mtv8h4mb/9vrVItJ8B9O0O62oZ/E1fm+7Vkn4nyYx0KexC2V4/tS//iy2gRs1dzKSNc1vd5jpTnYbJ/MFZvTX7lXrNbajdisfAHU00OMga9Bzvp1KUdfk81HzE3lqr4daz/rM1wDpgb0C3mt61f8KPLF+4z3iJ/SP+do7NHPJvNgi8+Vc1FyyXNseavzU5m0/HJrKwfta7nlsxkZ1nl+fr6M+lVNZ/RYbuD8Gsf+oyx6D/ssp9dTcVvdm/Y5Yd2u40HyGy8Nj1E28hQ5Kcdl/IhJz/24Pox9Xlsxm58iVxhi7Hw0ym77MLOv2Q37g3BPDOt2nDs9ZpN24KssBmuB2ohcYf2Y+ep+rT+rHd0u66Hy+3rbY1D3ye0eiHEEO/Ie7vyasbbsJ1tYyG3Rt+TrQYUbtU8SvHG1iSL9LKkscTzxdMJZPz32xUL72cQ3vWGM9ws22eSyayi7TtQor/sxcPordsoYt0uLGdRn/uMEg+O4GAkvG6t29AmJPumxczUOvmit+nksLuLqeudxHpCrxZHrs+J1kzUIQbuh2Q4Hdrj/gX185UvZelwwX8IYyR1jnoqYughi/PyLhIE9pmcdT2e59mpqH+RHzd8uYw+nMF+ck8p0P22+aKzqcYsTsJnat3Yl3VSFD3Ib6bXiweMzkOk3Fbetsd3FZ+DXzF/LWWFb2cWxs/Zoj/toOeh+Yd75Fx12LcYy5tQah1zvOppNI/ngz+qLN7xmx+pHiJfn+dqKdYvlUtgEQS+MlTTn/DIb6hcIljPS5+CxsDwZ2L1e2zHn8LhzCzkNZu47jDHS+GEsJwxbrNx/iGOwJ8TynEWVu8/1Ue+QX8q0cxj193wONo06Wh6v5qWt+W7TOT6je5bqnnAcmhIazzPB/JOh7rMcq71J98jG2s9ZYk7AugF5HnX2eRw4XxR7tVa/yPUYIOuAxE4q1/7lpcdo1Hf0ZZ72DzbavMd5uYPd9+Ir1opeZG/zXC4hc+w3a1d/XFZctzEvcG2ataPuUR7VONVccfbnYjaOjbUm24d9dc53PqpP49VzUMaJT0ObdHznM9TxYI0s5LYCkpMmJUh9OlETJiY6CJ2OgcVPu9fzvnBAIokM3yDhsYxr57iwygWQn/0A89rhV+xsupuUeqD6cEMxOEZ9MzmjG07YAHVGnf8FHFAv2oy2iUd4Lbm6PgXu64uxJeuJV+1JUl+Qgh1oe+AEeWTyZCHTvEpjurq4wLX+MF5v7Nle4NL5d6njow37QF4euyk/j4NzHedzR4QCg77A+zer8h3whn3ZqMA9XtyrN46yM7A/XJ+1Bztm+TdrX/uOm6S+HkEeBqPgBNgiZ+2B12CI83ad4ZKczMbldjm3DRXGNW/OMI+meb4yYqNB9faNYvMjxwrsRRuCZPBB25uMc2vaYH0Ngjc45n6j86ldsdjMsQx8g4zL5lgzSWUI661c3GDVBOGB9K/yVnMWu8lxi0e6kNv1vN6b7pOL87ndLBV2sA5iDl5ioxd4IrfLAP2gJ8S/GfLF2Pv4kEcbMbH+wTeNy8aXNWC7q9PP3A42BPmjL0Sh4POiQWTGcUHb+ERsh/jJ+LbWZPtcwqzdr/sn9Osx9Yv1c9aO63P0CXJiY35ELTvOsqwdQ7rtcZ/Th4Kt2rjO0d73MY9YyG3FJSeNTuB+066PrHsxoYlsj9+nk6zJzDcc2FC1PmZcnmgwoftEqvLaY3R8nSHLG/kMOvRyG7Nhpy/mqEsGt7Ebx7DoCbe+yNVJFPwYLoZxUsbJmmNk8tEusRN9xmtn4rzCp2Ojzhb/VWdrUB09d4bd0D7pkG2EPOj+D/hJfIyh9svxglj4ZqzHA27YZ7gM83/l2IZ96J8VVKM8WIlUu2BzB3LQ3y7L+ja+mEsb9q0U14aR31t6Y74PhGZ/vAv45XHqPvUY1+LF8xG42JODOqa3d3uq7567rX1mj29ILJ9UruWk+g/5uZ5rbl/9dJ3uqn+u5NiFVTuwaXZrX4wt5PKgYFeZ7Ysit+CCT7XB5jTYoxJarlV5YqPHrvme+vT5Xtdg7++fqyI76UQdGJsLPKpdNf7d3qq/Fyp9rZjkj0gJvl0+x4LNak/P33At5Hi3uduYesOGeJVTsWv1YVREruzpOXefXKw8Zz6qG4E/zgPYd2jHbruc4pyIvuA1iHvTM7PpGrHvccVczOEaneucmMzxWfxX7TDPIpMZuzUL5DqyM7dpf1xn4Z4uffE65sGsXTmgPFir8Vpbm9Ja39pbvHMepZwI8yZyyr5unuMaO+04yDGPuY63/Gk+iz1QGJuvyHGq6kEusJDbCgRMWO0GSbs1LHwLLGMwids5JLoKg8m+pTfb0M43JkeWNzK+2WUX2/mGnUEO9EN9s+M2WT7glQFbkNoEw5tFXiiiv31hjO3BRLRFLjR220VSkLE6gbjpNeCw6lsb6uI624jAoBYDa0P70XZfZHWxOq/frNB/E1UX5Mgs3pwq9/ZEDmIDlqbDKC9e3LAP/FNGoKvHN0rTMxiXzzfHeQH7jt92btg3UB2but9bejPfKCPlIl4EP3fzSXnSxEF7tyf63tpBbxsvBzk/QWaYW95X41l19Jtk1Bnkh3HpCuryfrbONrt1SI+JnGJcYj/I86RqeCr6fXPQOoAviVmOVxviOSi/GDviqT5FH/rYlCuocyQL5lOQce4ks4b+kSH439Z3+5dOQUbsB+L2Haqfsy/BNlitpEvfXBRs/OAJ8kVZuR3si3yibVfLRc/rQXxFv67xgX/M9UtsxJyXcfUc4gl6olwHBn296ZJPkH/JMO87tsmuzmTndjm/51pjX0604knNjbnk/vkXfLGvXAXuYL+Oy/41YTMd0A5jI1vQtzE/mqrdB1Vuv3/4QLGp7qnyWovzzHvLZ28HW7XDTAeOfqxjFnJb8cgJmCaUJsJ0QttGXSdNr/b74meLqS++2m/Pt7mSsC6vJpy/MoGy67e51m/lx8DpL9jZJ4LIE1sG9qNuPJYhptPtVylv+OMt1Ve/ecSFAhaTMCnjTWqTg+h3/sE2ZGwT3uK8JpcmfeK46q/XdxRxOrDa4YuWxtftUDm+mYmcNC7ul+WsyJjGCzeTztL1KBfPtzmXdWzmPs7s608gjHnywfNgxTQ9XQmc1H7nlOacF8Dw75JE9tS+tWL7x+7+z/KFj/m9oTeyGggdrTPCAnJ0ZGPlIzZ0f3WOacExa69Mao7VXPZNQLdzEHexJ+RyXIv8qWCVhdcunC8jjuKPtntemnwrrLrdwjb6jXNAj0O8XN4oJrktMtGrOCdH9lk+r+3bea/wwhFl6zHmHMqyY5sfba3Lrpw9j74it+hLjcM6f9KXZhfOsfylQMz9tfFiU1srkNW6a2g5JzfmNK4TlU9Yp6ex7nNT9VlMkWnM7WDixkm0QToGmZibtr44oxhDkTO2Ubn6fAlrFMRd9JjvkWfPoVG7s9twrUHYsAAACoFJREFU0C51OSsfV4PBLrmmudB9W3VP11sehRz6gbUmrLNudbVj676unH3NwP1ZkGf+TPO1riHrHIF1ZjgW2cfcVK42xr256DPksoyMMRnll+a6xtHXxDo/PO+CTSv5F1n3I51ZyG1hT4HXrjap14/0a7LWdtgQSFK8yE/G5se5Is0S0L4V9BsgbmhVp8qAX3HSRKvy9JcM26SI8jxJV/JGPn/FzmT/WJ9zwV+tdAPiBK+t3l/8W8oH+L5eTPqivF60+jetY7vy5sL12kSfxtlth0+IhywY0U7oB4WD54l+tvjFvnoGdhz1F6H6QqSLj+bOUk5SALdF2woW/7a5tU/yo8Wx8+yy8bVXvxk6297fNznuV8vlgUvShPKb3WG+eTzWeTAUiZz0V+I6p/aFgbH6QAEaO/SjXhzah+PaMdoJ816uY160DRAWTk3I+gD88W9/4zxGvXGezPTO2sXXOkdqfnjsenuOu7PFfDqWkzzZBD9RX1inkMuZ+aJgsD/OlUl7sHujkBPZGOe2TqyjMWnBGEiexjzSNUlz7lhOb32DO50r4s/Oe0WzdThnJD5nYhM8Ej+i7eGynIR87H0j65g/7r/aqr7BPSyxb2vASnFtcFl1ffH8m3QOvntuz/r2dtWB+dUv9aPAAeyYtEc+kbPqa2vzd3NRTMz5CPYF3vF+sddG7Qe/Sthy0HjrupHiLGP8ntD7R1/Pxb7Dt6PAuufiqp82RCa+to37pjUbc+EH1xplCHnittc5kec65juuAbP2vG7PeE3aId4xj+JaENePmJfuT/0UPedimuIka2zgg7bGe2NYRzC+ac04myfR6B8/YyF36xBAot9a1bfk/xY7v+XkLx+sN7CtRfCX+0fzSeCZCfzgGtx+BvyZ+dP3TQJxo77ZlRdJgATuSICF3K1h/+DN+SLXfoudFzn1xzqzkPtjAaU7JAAEfmwNPvN3u8BEHj4vARZyzxt7ev7YBFjIPXZ8aB0JkAAJkAAJkAAJkAAJkAAJrAiwkFshYQMJkAAJkAAJkAAJkAAJkAAJPDYBFnKPHR9aRwIkQAIkQAIkQAIkQAIkQAIrAizkVkjYQAIkQAIkQAIkQAIkQAIkQAKPTYCF3GPHh9aRAAmQAAmQAAmQAAmQAAmQwIoAC7kVEjaQAAmQAAmQAAmQAAmQAAmQwGMTYCH3iPGRn6HGP6z7iDbexKb6RyTxD4feRA2FkgAJkAAJkAAJkAAJkMAvJ8BC7o4B3Pd3WKSY2frL9rXYuclfnv/pv1MmBezrx8UR2cf1QrH4N52Uy6EcDoey/M9SDi+n8nmhuGH3X1mwf5RFv2SAPPzpvMlwd9ojeSPz6PPt+KW8y2p5TgIkQAIkQAIkQAL3JMBC7o60b1JwXNP+nRvga6q8hqybc/1igXnWt19fyNkXDo+WNzvtwUKOT4HPZis7kAAJkAAJkAAJPBgBFnJnAyJPIOrTmPC6o24WB+36JGcpy4tfq5td/dbf5OimUfsdy1Ha7AkP9jkc/KmcvW74cqxPhN7hSYjYPrNj068qQ54w6VOmd+ucN8BaaJgf06dQUVbYEE/Ga+Fl/tT+yPiw+XQEGbmuUMihTnmC5r6VmZ2Tdo3PqXyiPGHg7YZsZE9Jujy+OgTitbymV2izrlkMQUaQPRlf+Zx6Hr9+1KdQGn/Ps6pM+npeON9oxmc5vdYnkh+vS9Hnp5g3dryAnB6DUsrERtGBuvuYGJ/WPtWDuYT2uV/WVuRJ3FJO/6pNTW50lmckQAIkQAIkQAIk8LAEWMhthqZuIn2Tp5t2ffWvbhZ9o6sbUC90bKNax9gm1F4XrBtqeylP+8EmWs/7JlNl6rgow4uEKn/Djg2/umzfWJsdqw2525dt6MI7E2kTe3DzPB4feFnR4yxrYerjuh49EkbOWXXVfo2r2t/Hqm3Wf2bnrF0LDteFemfHyZ7+imiN0TRegZcXnnPelXH3Mfo+Hq+8kx63T69ZfiqL1M9zP0Uinq7y5lA8nirfOWq/sY0hDiAP7atFoPlusuZ6fC4hfysW3Z7oBc9IgARIgARIgARI4FcRYCG3FS7YUIZuuV3PbYOZCjLcoMqm1DeedVPqm80gXU/6uLqpb+Os8NEN9pYda5HWEje2oTAEeaofN7xYwIDsbic06tOOIxRdVjCavLA5j8OsGOyFCl4O/ODCrB2LsU07rZABkfXJkfuPvsPxVG8QVOM3j5flAMjV4XLuRRXKy/38Wm6H8ZG3xL/zRS7ZnzjOFQ0+IW9WhTjahcciptkIjIL4c7na/cBYVxuAK3JEW4MunpAACZAACZAACZDA7yLAQm4rXm2jmTqt2mFznDar041y6ucFlb/Wpp/wRK4/GYFN75YdyeR+WjfHQc/BCkzY5Krd9upl7zsuPHXDn17T3BqfC4a6ofdX3+QTNujN8FzQtgv6Sp4XumiL2u3F2PTVvckrfRif4fHcnvi6a/VL45fjlXmDraEY6a7WVyKxn11T3tgOsiNvyFUvuCHPnKOIxdwFE9aHoGtlN7Cb25gLNlexL1e1N+gJNihzzK1ZfrlOfpIACZAACZAACZDA7yDAQm4rTrhBxX65Xc+t+MANZdoMhw31qB9sxPsmGgo3tQHOt+xAe8Nx3MiHSyCv6w89zpx02VvjAwd9JdFftxPxXUZWFsf1q61dN+1QbCbGMCI8lRq249jJcdPbBUjEy+kFnryefYIKT44g/v1pVRAenxTiJbRR2uXcnkRFOyNfjFPsZwXu6Gkl6pVjyJtw7Ha4X1MbIaeD7GhruIQ6sx68lnUGITwhARIgARIgARIggd9LgIXcZuxkI9mLDN306qa0tvvTC9kAt3+7lTaO043yqJ9veK24qf+OKW9y8XzDjg2/1F7foOum13zEDbAe9ydjwUeQHTf/Yo8VJhvj12MS4+ETOXtClBjJk64mT5i6fiugPC6tj9re7Zy1h1f1MFZw3PNBhNZYLPZjNJ4b/rSxPlGNRZ7obvZiHNx2jxHwroVSiov02xi/9rGPx/zUY+cX5KEBg2Pti3GfFNNBZmUx+rd66KMycg44HnWKSRCXWEx6XKrdwceBK2wiARIgARIgARIggd9CgIXcuUjZ5rG+Xggb1Fk7bijTE7m6ibSnNamfFwJNT7uOhZsYm85nduSNbvDTNtH2OmQrOvIYsaG9Xgm+B1l1o+z9+iug/lTIX2vr42NhYQVas+VDn2gFOaCvFj9Vptvd5aFfx3J6P5WjFyZeHKdXQCP3XlCGwqDFIhUM6XVNt8eLN2Xy+tELTfED4nV8k1+S7FzCuFawgvN+iHHBfpP2zkcESLzGhZxehV+bbP643tkn5g0eS39k5+eeU2i7F6+r+GBM4Unnph7PSWMLzMev7bpjkY238pMESIAESIAESIAEHpEAC7lHjMqVbGo/D38leRRDAiRAAiRAAiRAAiRAAiTwGARYyD1GHG5gxUdZ/JW0G0inSBIgARIgARIgARIgARIggZ8jwELu59hTMwmQAAmQAAmQAAmQAAmQAAl8iQALuS9h4yASIAESIAESIAESIAESIAES+DkCLOR+jj01kwAJkAAJkAAJkAAJkAAJkMCXCPw/Q06h3cJJr/kAAAAASUVORK5CYII=) ###Code a = [] print("vamos lhe fazer algumas perguntas digite S para sim e N para não") a.append(input("Você telefonou para a vítima?").upper()) a.append(input("Você esteve no local do crime?").upper()) a.append(input("Você mora perto da vítima?").upper()) a.append(input("Você devia para a vítima?").upper()) a.append(input("Você já trabalhou com a vítima?").upper()) soma = 0 for i in range(5): if a[i] == "S": soma +=1 if soma == 2: print("Suspeito") elif soma == 3 or soma == 4: print("Cúmplice") elif soma == 5: print("Assassino") else: print("Inocente") ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA7MAAACuCAYAAADkvlq3AAAgAElEQVR4Ae2dXW7rOqy2z5wyrmYa5zIo8A0kHUnRi3O1R7CAzkEfSIrUS1p23DRt0+bdwNqxZYk/DyVZtNzkfxr/20/g37kdD4d20H+n9uYt18pfT+3wdG7vvd77y7Ednq2VHh8O7fjy3lqp19pbO6GeuP7ezk+Hdnp1xeV8zQ4tB3u9uX6aDPOp2yPltY3YgDYlGX6CdqOdzXyctH97Bp2tNeciuo4vb8Vf12Of0tZtUo6ttSEP/Tq286vEzhms2blSHvy7Hx5TLO+6qz0aW/f7+Q3sc8bmw/Hl3E5hX+Hl+rL7doZxwXor5YOPNBd/j+38z0Rh/9SrE75Wc+P/2G/wWJoUXokN2t4wdtiPcrnHfNpXQ57HtMdebfJ+M3xfepTZLK+zhARIgARIgARIgARI4KcJ/M9PG0D930Pg7dkTue/RRy0kQAIkQAIkQAIkQAIkQAIk8JUEmMx+Jd27kf3WTn1H+G5MoiEkQAIkQAIkQAIkQAIkQAIk8AkCTGY/AY9NSYAESIAESIAESIAESIAESIAEfoYAk9mf4U6tJEACJEACJEACJEACJEACJEACnyDAZPYT8NiUBEiABEiABEiABEiABEiABEjgZwgwmf0Z7tRKAiRAAiRAAiRAAiRAAiRAAiTwCQJMZj8Bj01JgARIgARIgARIgARIgARIgAR+hgCT2Z/hTq0kQAIkQAIkQAIkQAIkQAIkQAKfIMBk9hPwfqTp66kdDvzN2B9hf63Sf+d2PBzb+d+1AtiOBEiABEiABEiABEiABEigEvhUMvv2fGiHg/8rCZYu4FeuVSv0/K2dDod2es0X31+O7XBHv5EqPh9f3rORX3A21/Pezk8/lBTdOom+tbxPxmDO+0qh2vfHeBDZtV/vllxk7W73cBVlbFzmfNV8sjcG0qefzu1js8M+u7fCqT59WC9KNBu+Y15DrWvHNx2La0pYTgIkQAIkQAIk8CcIXJ/MysINkkxZgIzznJjuW0DmNk53X1uv/fWf37XQ+i49u4ndOvm8tbzdjswr3pT33uRnbgpLv5DAVfPJ3ngymb1J5G46Fm9iEYWQAAmQAAmQAAncK4Hrk9nqESYncgyJbq06P9+RzPbF4jl2hE/tTReatgM83VnoC9Gz7PDqLnLZ2VS7+w4y7m5geX+tVxfCfSc6dGE9bF+dXKuH5Rt6UPdh+srqcocnLQpRD9ipdZ5P7eg77HCtAdvTc3m9+ZvkVYxoU9oF27Ln5ay7/hr/57c2WI6+YKxyPdVdExnRE4yMub+dYH3C+rGVye6s1Ol6qqwmdbMNWVZr5q/v8s70LQi1pnInb0VAPNOr6t2nD40rUYvMJ29VuGXC1v1K88KaPa2lGJ2ex9sZqU8rW9+Nhf6v/pza6cn1dsZor89PWBZxdcvls8azlsFDPNV7bokjiEIOMX8kH9BvsX30DRBjh2H3sSkfsH2uZyGhRb0nY1Vt8pjhWwXRRuYLZ6iiZ5y8/3ocvB831X2czDtjbI43YFDnsLH2vyL7G96eWRJlCQmQAAmQAAmQwHcTuFkyq4uQvqCS4yMmELv+xtMWQ7hwEhgq1xdNfQFndXxh3xcxem0saAKkL5i7DLTTE6MkT+vlJMP8sZcHZWEVC6oue9k+tNuB1vOFabf7I3qKb7q4cyagKrHCRGlVvy0qI7FJ/lg83FdbUHa+XyIvJyV5oexO5rhELJLdyHfun8tGjuafxwj6osqGfiWxgH7usizpQT7eRuxxuWZb9PEqyxMS9Af0L+PrOpyPfGYdo80knq5P+1fh72O29L3QpHa5X32curyo1BOOKEcWwFhSRkl4vV6SbfWcc8RcdaCvcDzzB8e/j52kx9q7HnRh9kDBx4Vd6xy6Xr+G/UvjkPzLvLVPFNbYfmlPbu/s1vUkCf1hAfbXMa8lvWoT+Oc+4EOaK/qc6vA+pnFwf3qi25NR9WdaT/pF7n/OPXvKMxIgARIgARIggb9M4DbJbFoU+u7CWGikxdEqzby49WpjMe5P4scCXuTGAkZtGNe8fVpsaiEsgmShFoszlA91QpAdJJ2r7XOjtMCUS9Fup54sLif4eE0Xnp1B6JgkGnAtxwYSgsoTztf96UmJJwu4yIX2ajKegz16Df2o/mG8/NpG++xf5o19K9eDhyhop+gDXdjeTdHP1EaYzsfC6EvGPfpyT+70HGSt6kPlUB+Lc0LmO2aQpHjCgLpFwJq8JDxzSZeA16IcdKKe6ieeD2YiDforHpf+g+0Xx9if1mzdZAB9qugdPm3EF+1OgKAPlnLsg3op7N7Sk4VkjphAij8jsUyMQ0+WNfy8UK4crc/l8YZxRFvysUgf7YB7UctTEiABEiABEiCBxyHw+WRWFyi4+Jkswuoib8q3LqKsEi4+6yIuLcjWFpyL8rEIUtm4mMW63S991Q7qoM7N9uCj1vPXeOMz74ps6fEFpb/2p5+RMIIi2I1d2Bl68yt/WM/1zHaJcMG65c/V8oAx6kLvFrz7xUU5xDHbM2IvTbVd55jrwTWQperKgl7aeVxixzW1kYX6SGZHH0ZbbDHvcuJTbEuy+mK+xzL0IaS1sbYoB/0TnyKxLvpRFfquNmMMoSL2l5Cr9gx25vMs0YFY1EQ7JYKQEBV/MM6L45VxAeYvYqAxTO3goQAyCHYb8Z34EPEXHZNxrj6gnvB3Sw96ZPUiFomr9Isal/HQcD2WkweJGmMsH30ujzeIXbJlaSfGz8ZGtxV5oKs8JgESIAESIAES+NMEPpfM6mIFFuqOKhZXvWCxqPGK+JkXNH5FF8y+oCty04IoFo7esn9qOdiI50WeLVJx8WUycAGVdF7RvliXTtf0aDks1rBeEhALwbeUQF2u79+/CjGoPOH81vJGgte9Wesvlbc7X8uhfYoXJPvSFP1I/axz1EQC/I42EAs3wV4z7v0stRGm0P/chtdzO4Yc4D4E2lGShRdHYoClNfmNa1WOnne7Cr/ErLZzgcBYi4oMr5Y/wc+N+hgXaY/nyTZl6Q/S1mVj+7XjbGc5Swws2RsPEiAOMya6+wy2FdH4AEltiz6R/U7NKrs439KTJNjfrMLflQ6u4E9uUs5AV+ID1Wq5nlufG/qkPsiKOWzyZx04LkGNSrizb70v5vGUBEiABEiABEjgiwhcn8zCwmRpGy6IbKEy22Go7WSBk177VR2+WF2+ypgWRHXh5MK7DN+FSAvGJB/sLLKkjbdf6nT7oL3r9k+VNxKa8HOnnmRzX8Cv8tQF9YzjRH9ZOOZFpfmDfue/rb21vCs5xu7lvH2KlyeS/fdelWt/UKIxiddeIWGZ9ZGecCxl4267PxQRjoOVdAnTNXa7pGwWY2UPfWRVn/cz/QTbk1wrT/H0xCmSIROU9ID+pEb7GfpY+lyvjIytfzmLiZ3OX3Xmet7flR3aHa/EWn+Ntwq8jjPocU72JD09LtAu/E0MZnZ3W/vY80RXbUW9IRtjMeye9QH3O2zRA7TB2vu8OZPhMU8yMH7q3+iPaLfvfopPiZ0moDlG7vewAf3MfFMf20hmVVbqF32cp5iYbVM/k9M8IQESIAESIAES+GsErk5mbZFRX0fzxa3/rd3yFbC8iFni1IUUvObmCyStec2iuy96TvE6KNgoQvsCtL4mmfyLRWhf0B3Gwm+t/cIz1OOLM19ou7+remxRaK8fntpb4ZB15QVkXFvRn+MxFtbari9yRa99oRew+0p5wCHs9wPUi/VWyrN/wsYX4HlxbvXGtxnjwnj0hWM7v+DfWWNcPJkWQ71ceAnToVPdUFtLGSS5GueeBOWdVpdr4yqNDecjnxC3eACxVS72AMvErCQNQ01PorTvHttZdpqhX8/rwbhZ2JN5DOanpt9E7jyCrbyCe4bfloW+W/xRWd7e+0k9Vz+gfw8HSjzHHGDjQt6C6LHveuPbjIGpiBOuNobx9WGwG30TlsWPZJL7Icn8C+7yr+lJrfVk2GOMR5/H2GLM1so/3udSHyvJrMfe7Rl2oi05Dqv9d+k2S0iABEiABEiABP4QgauT2V/DYHUx/ms8oKEk8MAELIHyxOaBQdB1EiABEiABEiABEiCBQoDJbAHCUxIggR8mALuOaZf6h82iehIgARIgARIgARIggfsi8PeT2fviTWtIgARIgARIgARIgARIgARIgARuQIDJ7A0gUgQJkAAJkAAJkAAJkAAJkAAJkMD3EmAy+728qY0ESIAESIAESIAESIAESIAESOAGBJjM3gAiRZAACZAACZAACZAACZAACZAACXwvASaz1/DWb0jOPyVyjRi2IQESIAESIAESIAESIAESIAESuI4Ak9kruMnvHq7+xucV8tiEBEiABEiABEiABEiABEiABEjgYwSYzH6MF2uTAAmQAAmQAAmQAAmQAAmQAAncAQEms1cEQXZmD0/n9o5t9dXjU3vDslscf5XcW9hGGSRAAiRAAiRAAiRAAiRAAiTwQwSYzH4UvCSXT6d2eip/M/tVSedXyf2o36xPAiRAAiRAAiRAAiRAAiRAAndE4GGT2feXYzscDv1fSUw3AiTtji/vTds/wz5sTTr1vMvHXdzX09CL5e29nZ/cHvibXJSLxxs28hIJkAAJkAAJkAAJkAAJkAAJ/HUCj5nMakI5XgnW14YxMV2NuiScnvi+tdNhyGgp0ZRrXq81kS8JsNXxRLUnr11venVZ7evtk9xVw3iBBEiABEiABEiABEiABEiABB6KwGMmsyXEi13Wcj1OJbGE3VRJQONbjTHplGQU6kX7Wh5JtSW2Iavv0o4EGJLmEMYDEiABEiABEiABEiABEiABEnhcAg+azOZXevV14x07s/nV5P5KsLeDZFbrTZLZRXm0kZ1cSIxb3c1lMvu4Q5SekwAJkAAJkAAJkAAJkAAJzAg8ZDJbk0o996R0RknLlglna/A6cSSmrbW6A+syazl3Zp0MP0mABEiABEiABEiABEiABEjgQwSYzGpCemiHS8lsJJ7I13Z4F68Da2Kb/2ZW5Wu578Dyb2aRJI9JgARIgARIgARIgARIgARI4CMEHjKZtR1V/+bgU3uDHdP4sqZCUb+gaZLw6q6ufBEU7sxKW01++W3GBSNPSYAESIAESIAESIAESIAESOAmBB40mb0JOwohARIgARIgARIgARIgARIgARL4IQJMZn8IPNWSAAmQAAmQAAmQAAmQAAmQAAlcT4DJ7PXs2JIESIAESIAESIAESIAESIAESOCHCDCZ/SHwVEsCJEACJEACJEACJEACJEACJHA9ASaz17NjSxIgARIgARIgARIgARIgARIggR8iwGT2h8BTLQmQAAmQAAmQAAmQAAmQAAmQwPUEmMxez44tSYAESIAESIAESIAESIAESIAEfogAk9kfAk+1JEACJEACJEACJEACJEACJEAC1xNgMns9u+2W/87teDi287/tarxKAiRAAiRAAiRAAiRAAiRAAiTwcQKPmcy+ntrh6dzeP85r3kIT11N7g6tvz4d2eoWCyaHUORz8X27fVGa/9oySQVD48d7OT67vrZ1+KIl+fzneliu4+tFDtWWN205h1/kj/D2m+WFGxDv1PalfYr/TvqurRb9pTWw6vshI+Ll+c7UfbEgCJEACJEACJEACJPDQBJjM3iL8k2T2olhJKCDZ0kQnzi0hsmTYElVLOIpUSErGlZ9LSq5L/obltzz6qWRW4hgPMSQ+nqhKH+lJLNYRO6exvSWMKuvO+k01j+ckQAIkQAIkQAIkQAIksIfAQyez59gZ3bkzpsmJ77p50mKJp+2wdjm4q+rJzKVo1MQH2q0miZGU+M6sfZotsivYE+Gno+4AY3LsO8KReMneXPA4pER70/Rgcmyn57wzi/IwYcNyTOhtd9D5QkzWeKr/p3Z68jZ9JzRsAj+wLO2MFu+i3n5/igQ4hQcLIrc/rBgJrFwHP6FlHPYHJWfZ9dYd37zb28LeQ9oVF8bHHndkr3KlDSTWx5f/dGcf5c/aY9wWMsNgHpAACZAACZAACZAACZDA9xB43GT24K9X9iQudkVXwGtSMRKJlGD2hMNeBrbk1pNETQC2kqeuLsmDZEMva8IySXqiniezUhsSqNaTW/At2aNyIQEMO6Xd8HWFSPNXoVOS3GUkf3oyqvXCZpGKetCH1rS92m08PXla2u8PFbKvo32b2wlMwj+009ld8icaTw4wbiK7yxIfhIXY6P1k0tqKuk2YCHsiOuXf/UqcqnCIgdQztthv+piI/tDj4Yl34lSF85wESIAESIAESIAESIAEvofAAyezkBzq4hzO97CHhMCSit4eExiRs0e21hnJY0oERUaV6faFDZgIYlJi5Z4IWvLoyZ8Igeshy4Xv+Kxt4hzkdjGRNEWdIn+NUy1HViLLEyzxRnYvezKHx8oPErPLPLttYeuGP8UNO7X6stM52MPOt9gofohN6o/tLE8TW/RXhUN8wz6wt/PQZHaWsEtVaBdxSQ9Blg94Rj3TtSm/m8MPEiABEiABEiABEiABEvhKAo+bzGJyownD5WRWF/Dx5T7wWie21wTLX3v1z5GoLoLZk5mUyECyofVV5sS+qGfJk8mAZKcnq0O2XMNk1pIWT7g0Aez+ednCXiiYJt3KdSRz/jqzfmKiWfVs+QgJa9p5Dv/NKExgF8c74n2tP4CkHC6TYK8gfUni4p+R3HoF/8S+pWUjvgt7oa7IXY0hcBv1hlxRM8rlbOkH8nVT+UkCJEACJEACJEACJEAC30ngcZNZTJDWEimMRK0DCUHafcVybD87VpmTRBeSEmm2SFpcVujam8xiPZWsfyu5THpqPVdYPkN/L4/zne0x2S4+h6ZaruedWeiz2phg4THuRGpNaYfxd2VF3mi31x8XND4lKfTd4igVHyLp9/hLMjl5YIH+igA8n9nbZeRkNDTbAbQb9baS2Zrc2vnCr6KGpyRAAiRAAiRAAiRAAiTwlQQeOJkdO5TThKNSTwmQJTf5bxc9EZGkYMjWpGpPkpL0oQzTtUw48XVRTLYwKcFyU6C++i6l+mTJVEr+NMn0JCsZVk6WdjoTled69BVW2ylc14OyMIG3cvd/YX/o2HjNWBNAj0mP3fQVXLSh1+vy1/zJQCpvlDdqig++Wx7HkeCOenrUbXf/kx0bfolcb1Mk7n7NGNurXu/HSe9COgtIgARIgARIgARIgARI4FsIPG4y+3Ru8W3GkBCtJwE9ufFvlH09t6Mv7nuyFrt9fbGP3w5bo2nJgb+G7J+eEPsOXC+fJl5ryazbKcloTa7ECr9usj2pquUjkZGEbCOx1YS4y3rxHUfzVljGa8bhQ9Y/9BSfg+1GOewwqmfwN7P2d7GwKwp2esJdY6LnUO+0y58qxRJY9zv5J1WLzbbTKpxWGGtfOrVTsIQ+4vL81fdd/TjbMPq7x8XsGOXDP4znwq9RjUckQAIkQAIkQAIkQAIk8C0EHjOZ/Ra0f0fJ+8upnf/9HX9+lSc9mbVvyv5VltNYEiABEiABEiABEiABEvhSAkxmvxTvXxD+3s7P5/b+F1z5jT4wmf2NUaPNJEACJEACJEACJEAC30CAyew3QKYKEiABEiABEiABEiABEiABEiCB2xJgMntbnpRGAiRAAiRAAiRAAiRAAiRAAiTwDQSYzH4DZKogARIgARIgARIgARIgARIgARK4LQEms7flSWkkQAIkQAIkQAIkQAIkQAIkQALfQIDJ7DdApgoSIAESIAESIAESIAESIAESIIHbEmAye1uelEYCJEACJEACJEACJEACJEACJPANBJjMfgNkqiABEiABEiABEiABEiABEiABErgtASazt+WZpb2e2uHpO36j9a2dDod2es3qL529PR/a4eD/Tu0NG+jvm/Zrz+nKqBX+vbfzk+sXW47t/G9U+66j95fjN/G+7JHassbtcnOtcZ0/1hdGXEcsIt6pT0r9Evud9l1dLfpNa2LT8UV+xfjn+s3VfrAhCZAACZAACZAACZDAjxJgMvuj+G+l3BKYDyWzklBAsqWJTpyjPEtULeEo9kJSMq78XFJyXfI3LL/l0Y8ls/oQYpKcSnlPYiXW3lfEzmlsbwmjyrqzflPN4zkJkAAJkAAJkAAJkMDvIPDQyWzsVO3d1ZRFeOxkjoRAQ407mb7zFYv2vnP5cm7H3l4SiKEfkw9LJENPJJi1Q5lMrfd8yjuzaMveXTf1rdtREqLVJLH69wo26e5sT4SfjsrNEiiskxkOHoeUaFfP03nE5NhOz3lnFuVhwoblmNDb7uBkp3qNp/p/aqcnb9N3QcMm8APLvH8kR/pJ1NvvTxKjNk3eBpDy3pdGAit9DftekmQnvS+cZddb++7Y6dUKYe8h7YoL42OPO7KPNpBYH1/+0519lD9rj3FbyJyYziISIAESIAESIAESIIG/TeBhk9m0c1aSt2nItc5YyOcEL+9G6kJcXp2MxKIncJ7E9ATAFuQ94dNXLfG4tVZ0ol26sC/yLFm0ZNh33lI9FFCOkz9hd6+k9k6Snqhndg/9zqn7DQl5skflQgLo/jRp5zKKoXiqfDwh7rq6jOQP1gubRRDqsfbObfQP4+nJ09L+oh8SRk8eLY7zeujOtN4lf5KA1tRueODidqvsLkt8ED+lrvtbxIzTzs59WeWqLEfynjgNaXYEMZB6ZuNyDOEr+uYXPmxxnlU4z0mABEiABEiABEiABB6FwIMmszlxuSrYsCAfSWuRFHVMX0osYEdsLOhL+9W/I6z2w7noBNmWIE0SUVRVkuaUsEi9KtPbFv/Wktnwuyc8I4ECLiHLhe/4rG3iHOR2McE46hT5ymDCqZYjK5EFrJXbLJmtOku7sGRWTxPQDX+isR1oEukPD9TWkfTpNUl05bpcE9m9juyKjriAUPRXiyHpnNnbeSQ7QJweQruIS+nrtf2oZ8Lq9aqC5yRAAiRAAiRAAiRAAn+fwIMms3n3cm+YdQENu16+c7RI/lxgLNoh2ZRrmiCMxCkt1KUN6ph+mVK1H+Qv2ssrsBu7nD2ZSYlM2N0dUZnDXndvJPGgPyUlWC6tqt34BUB5V3EkwKFtcbDgHnab3sxx7Bpqu8449Gz5CAlr+qKi0GemrSWzCztL/N2xRb2Qv+2Pt599riV9Ui4x989IbquQha0jmV3YC3VTn64ywy+M/5Ar1XN78z9iJXvq8tqzJ+1VPs9JgARIgARIgARIgAQegsCDJrM1ydoR65rswIJ8JHVFTtQp+mDRLy3Gwr0me3mBP6QXebjjGTpH7dUj9WmS6Bb7FkmLCwxdaA/ajOXSaH6OSYqJrvVcYfkM/b08zne2R3uKz6Gpluv57NXonGClZCvsAjtTggzl/VVgLYl2e/0Jq+Ng9K0oGruy6r/HX+I2eWCB/ooIPA/7umw5h53ZZVyhHrzybPWw3+CYsDbVDzlnMtt58oMESIAESIAESIAEHpTAgyaztliOxTAu0Nc6AizUPSnzndm0wO/JqcqOxX5JRlTfSBzGQl0W9ON1T02IVnZVU4Kptnm7mYyhK9zb9BllmO3TxGTqHyYlxW9n4wmb2m3JVEr+UpIVFk8OlnZ6TBKfviMsPqzrQVk9MVU7rdz91yQK7ffjsluY9Chrj48xib6XvEIber0uf82f1ByTc7mwEmPxwXfi41jqgi8ht9vu/ic7NvwafTokjYPoN5i0iu+eWGO5NVO9nmwnvUMsj0iABEiABEiABEiABB6LwMMms5GQ9tdNcXHvC/fcFXpyofWP7fwq30wMSWJPKPXVVk8KYtFekjpdjI+2uPC3Rbt9O+7x5Q1+vzVbk+x/OrczJCiWxJRv2C3NUc94HXfYlGSsvc459c85SWJS/FYb/LrZ59yTPwf/UiBpkJOc4kb/e94uS74t2tl74uyvbIcPWX+KdU+SjMcKixpz0KdMXY/3h3ou9kCbz/tTJVhC7DEdfHu9iFk/D59HIpkk9r56it8kBi5S0f0sfmGfTvK8TWcw6nlczI5RPlpLmfuV4jaq8IgESIAESIAESIAESOCBCDxwMvtAUf7lrr6/nNr53y934reaXx68/FY3aDcJkAAJkAAJkAAJkMDfI8Bk9u/F9I959N7Oz5PfTf1jXt6tO0xm7zY0NIwESIAESIAESIAEHp0Ak9lH7wH0nwRIgARIgARIgARIgARIgAR+IQEms78waDSZBEiABEiABEiABEiABEiABB6dAJPZR+8B9J8ESIAESIAESIAESIAESIAEfiEBJrO/MGg0mQRIgARIgARIgARIgARIgAQenQCT2UfvAfSfBEiABEiABEiABEiABEiABH4hASazvzBoNJkESIAESIAESIAESIAESIAEHp0Ak9lH7wH0nwRIgARIgARIgARIgARIgAR+IQEms62195djOzzt/C1T/d3NQzsc/N+xnf99UeRfT92u93Z+OrTT6xfpoVgSIAESIAESIAESIAESIAES+GUEmMx+JJmV5PJQkkotO7W3rwh8JLNfIZwySYAESIAESIAESIAESIAESOD3EnjcZLYnpofDsZ2e9+zM2u7o8eW9RLvumr61U+zaHtrheaS5b8++m5vLW9rthcQ4klnQoXVP7QSycMdWd5lD/9g1Ft1L24srPCUBEiABEiABEiABEiABEiCBX0LgMZPZnjxaEmiJ4sXXjLXNSA7n8S0JL7aJxFRaSj2XZcmvJ5qa8Porz9GmJrMjMV3UP4xkWK9BMj23maUkQAIkQAIkQAIkQAIkQAIk8PsIPGYyG0liD1g9n8Wx74jCPmvagfVkNDeVRLUnrWs6qtxpAlyTWU+EW2trcv31aSazOSQ8IwESIAESIAESIAESIAES+BMEHjKZXXzh00ZCGFHGJDMK7SC9wiuy4jVfea14JJ74CnAkv1p/7Ka2NkuAazIL9ZPtVi/pZzJbosVTEiABEiABEiABEiABEiCBv0DgIZPZxW5mSgjXwmqJYiShUG0ks/bK8PgbVkhMob69Zty/SOqqne5jMKUAACAASURBVNl5MluTdD1nMpvI84QESIAESIAESIAESIAESOBvEHjMZFZ3P/1biftupv+d6lZc+67rSFb7a74H/xvWnMzaTqztzObEUnT6jq218SR58TewapfZqHpr8guJeE5mTS5+AdWWa7xGAiRAAiRAAiRAAiRAAiRAAr+JwIMmsyMJ1Z/aeTm3Y09mxy7rWhh7khivEntSavUtgbVvLT6+vMHvw/akubfz5FVbaYLq33Q823Xdl8zaK8ogBxLdy36t+ctyEiABEiABEiABEiABEiABErg/Ao+bzN5fLGgRCZAACZAACZAACZAACZAACZDATgJMZneCYjUSIAESIAESIAESIAESIAESIIH7IcBk9n5iQUtIgARIgARIgARIgARIgARIgAR2EmAyuxMUq5EACZAACZAACZAACZAACZAACdwPASaz9xMLWkICJEACJEACJEACJEACJEACJLCTAJPZnaBYjQRIgARIgARIgARIgARIgARI4H4IMJm9n1jQEhIgARIgARIgARIgARIgARIggZ0EmMzuBMVqJEACJEACJEACJEACJEACJEAC90OAyexXxeLfuR0Px3b+91UKKJcESIAESIAESIAESIAESIAEHpcAk9lbxF4T11N7A1lvz4d2eoWCyaHUORz8X27fVGa/9oySQdDrqR2ezu29vbfzk+t7a6cfSqLfX47dHrDxhw7VljVuO236rD+1fcRbY+ZGSLxK7P3SV31Gv2lNbDq+vLfWfq7ffJWblEsCJEACJEACJEACJPC3CTCZvUV8J8nsRbGSUECypYlOnEti4cmpJaqWcBSpkJSMKz+XlNTkbdj0/Uc/nsz6wwhPXOW8H0us/UGH2DmN7Vciu7N+85WuUjYJkAAJkAAJkAAJkMDfJfC4yaws6H1X1BOOS3HGNpFsWuJpsvoOmycyKn/nrpvKxvaj3WqSGEmJ78zap9kirzj3RPjpqL5aAoV1RlIlrsfOodgdifUFKMHk2E7PeWcW5WHChuVZz4SlqF/jqf6f2unJd7f7a91hE/iBZVvxjnr7/VkSEsalvcjtTEcCK/6OOC/luO+ndpZdb+1P5dX1sPeQdsWF8bHHHdmrDmkDifXx5T/d2Uf5s/YYt4XMqfEsJAESIAESIAESIAESIIGvI/CYyawmR54U9OTuUvKW2rSWEky95kmJJWS+86YJwFby1GOb5EGyoZc1YXH50BminvlgOkX/um/JHpULCWDYacnYxb/37UlmSpK7jOQP1gubxQ/Ugz50vhoT4+nJ09J+T8itPSaMfuzJcLJzFm+0s7+67Unfqj8QDj/Uus9vyz4CCaTYIvXMJm85+ew2uS/rdmT/E6cqFmIg9Ywt9pv+YCP6Q4+HJ96JUxXOcxIgARIgARIgARIgARL4HgIPmcymhEA4w+J+N3Zso4v7nmxKuS/6RRheWxOudTwBLYmy24cyXU7YgIkgJiVW7omgJY+e/IkQuB6yXPiOz9omzkFuFxNJU9Qp8tc41XJkJbKAiyeRIhmPF/Et7cKSalucb/gTjf1A+FtfqP1ME0zf9RY/JFlUf2xneZrYor+qAuIb9nXd4JfqmiXsUhXaRVzK38zW9qOe6arXuwX8IAESIAESIAESIAESIIFvI/C4yay+sumvp8rnZOezhEEX8NjOd6404cBkFuXK8UhUi8h4hTYlMpBsaH1IUlL7qGfJlsmAZCd9MZS0lGuYzOIXAPnum9k+EuCkMZ3UZG0kSWaPvbYKLHpype06x9Cz5WOKDfgX/ptZmMAujj1WUhXjBR5d6w+I0Fe1PZYLeVBR+pLU80+1CW30ugtbh/8L+VBX5AZbl+WfwG3UG3Kl2iiXs2Uyj3xdLD9JgARIgARIgARIgARI4DsJPG4yu7ZrtUa/JluQEKTkCMvXZHm5ypwkupCUSNVF0oLtNQHam8xiPZWsfyu5THpqPVdYPquvcb6zPSbbxefQVMv1fPZqdN6NTclW2NWlKvfJw4tZvQXfsGxyYA8LFkl8TVLFh5Dr8Ze2E5vQX9GI5zN7u4ycjBZTod2ot5XM1uTWzv3V5yKdpyRAAiRAAiRAAiRAAiTwLQQeMplNCYF/8VFNOCr+lABZsuZ/T5mS2bL7qUnVniQl6bOkyHb4TNcy4cTXRTF5xKQEy02BJC9ht/pkyVRK/jTJ9CQrGVZOlna67JyAWz3xYV0PysIEfrQV5Qv7IW4oG48tPr4j3WM3fZiBNvR6Xf6aPwVIOs1txiXxwXdv4zgS3FFPjzR5HbusSWa/hv3EE0yRO+0zIvSKZFb1ej9Oeou9PCUBEiABEiABEiABEiCBbyLwmMmswNVEzl+BHTti60lAT2709dhjO7+e29EX9z2BjVeV+2Ifvx22xtOSA9fvn8MOT8BUxjTxwqQEk1a3U5JRLHcL/Lrp9KTKXyX1XcWRCEmCt5HYAsfTi+84mi5h6fI8yVrX47uO3maFRTBH/02fMnVWblc9l/hBAuxU4tPbyevYu/yJlouDlHj6VZGP+qOvrDDW66d2CpbARWSCvSh3vR9nbqOe9wuzY5S74f1BQn09fFzmEQmQAAmQAAmQAAmQAAl8K4HHTWa/FfPvVvb+cmoXv9n4d7t4v9b3ZPbtfi2kZSRAAiRAAiRAAiRAAiTwIwSYzP4I9t+k9L2dn8/t/TeZ/JdsZTL7l6JJX0iABEiABEiABEiABG5IgMnsDWFSFAmQAAmQAAmQAAmQAAmQAAmQwPcQYDL7PZyphQRIgARIgARIgARIgARIgARI4IYEmMzeECZFkQAJkAAJkAAJkAAJkAAJkAAJfA8BJrPfw5laSIAESIAESIAESIAESIAESIAEbkiAyewNYVIUCZAACZAACZAACZAACZAACZDA9xBgMvs9nKmFBEiABEiABEiABEiABEiABEjghgSYzN4QJkWRAAmQAAmQwN0SeD21w+HU+LvVdxshGkYChcB7Oz8d2vGFP5BYwPCUBIIAk9nW2tvzoR0Oh3Z4uvHvqcrC4dYyI3T14BMTni5wOoPDoZ1eQbb+zmm/9ryxBAJfhadPvHgMUr/n8I4Wbu8vx3bY4reXyGd8wljighbKMfZis8dxr3m3qWd9WW2BfnUzhu2tnWS8L/4d2/nfbTz4S1JuPYaHPInDL2cO/XM95tCf1yt9wxWx4455Xzu3QQxG37L7+s/MXzcIJbBAn7Jkm8dwzs7Xv+8s2Qjx+BILgM2XyL9CaPL/ivapSf1tefH36rXDjrlH9N1qnQqxSUy0XO65x/b//nesD5Pf15yAvmuaextdW9yKgQvl57cSYDLb7vwGv7s72KT18Zu33BDhSX2aHPBmeUG+tJtMBmlC2+3LjSomX24k80oxN0vErvYJY9ka2iMx0gVRuqmVfnGl39c1m9+A0ebr5HqrzMJL+Tkn8HVjWOJwx8nVHEcuXZn3ciWe7SJw7dy2EoOv67e7vPlcpV0s7mce+1bWu9h8Dv9HW9/U/5rMftSYVH9+Lx1V5PoN5+CV2Mi9++Nr02Hl6tGKvtX6Kxd0bTFZv65UZ/EdEnjwZNYGuu/QXHzCqQNn7Oik+joB9Ws+KKS+HvcJ5eXcjn03SAa2TICmG5LJumt04YlcyHg6tRO+ioL2YLJ6sRPCDbJMqpsDPnwdT8S1fvH3+HRUn31iC/sP5WldYo18LjgAfp+e8yt11+jCNiPeK/2m8zpFXHuSiL54PMHOi6/9Qd3qU0PZ3u9miKSe607X8WYmsTfWe28+wuf4ch47nUnHCiex5eloY2Fqcx8v8oaA1j23d/RTdMxkYJ2pXHcc+rgXlU+Me+ZmbdO41fgc2/FJxnPvqxCzXIZ9WWT5QiKzmj0YChOdSS/QcabcO7dd80xIiwMcr/IE3XepLcb9FTdgrH0ROcM1tF/bw7g3ef/pq3PG0XRZee5Lw6ZhjxgsdX3e9rkkHEkHwPXp3M7x1gj0Ma2Psej9znfu0cckG/pnL5/bVXQhp/omTJU/60e1jpxrvVNbzD1y7Sb9BTiizXv7/szmsNti+am5rcfI+tC7Paz7yntP5z3eVcr9Z7Mf6H2p91+cLyHWyMJ9MoQQh+eTzrtxX4L2MecUxjiu10IyykEXxnxU0KMxRvs9PPpb7/dlPorma/ZGBTiAushGa+B42hqrUhnk4By1dh819pfnpFk9s74ziLfdsJ9kvjaPyXWf28q9WG1fu3+szYlVPzCVQ2GHfbDMrdG3dNPH7Vp/ew9j4/0W+4fo8nK3RM59Lh/6kMMh2wgxRH0iD2Vt3xdwjj+20/MRNmM2dLvR/Lw7Ag+ezEo8Lgx2D5kOoLGg0gEaEydOUCOZG4uIPml5/T752mCza8tjn3iHTjfFP22SyItnnBB9YtAB7rq98don+hk3pV5Z7cbJFIRAXZys6jHeQGb2m82Zp9S7ODGpKTYJeV2b2MYNIXT3yfCSLrXPJ3ptY7IST2XSY9TlJv2de5LVH1jM6gHRfrjuk92YvX/0Pub2FkHGEG7KnnT1G4CyEPvV3rd22tlfjLHbYLZO+x1ywuNip53CmIR+lRhWGSmm2yxa5+92LkwAnTY/uH9gl8wc/up40i3S1jjk9mN+6DfhiF1uv20f2OELDo+dMvKHRKbb+9xUZukTvsgZYxjt6oxdV2LQr3V/0njp/c3sEHnO1hcieT5DG/xYubutSe/CK4tRsdF0l1hozLotKtPtyr4sNEBfUbuKLutjoCvJ7rHzNgvhxttjVjmm6p3DtC7YKG2i336gv6hu75/ar5AVLm4/YPNsHkxx/XgMRl+F+7AvcoHzeh/KfVLqOdPEu64bgPGlfhD3IeS4waL6lNs7+zXu0PfEAdHpccwOLc5Sf0u2LqrmBCVY9LEzjemavUvZPp96HNSuqcwLYxXHOM5DaQ7JMpa6RmKl13COi7nMfFuMfXVt9K8xDuWClOPc14/7eDXfJ7HsffpSn1u714kPeC3ZpFwurHl29luUKzo9lljuD+Te+tjyOnmdM+k3nZvKSgyzb6lnzWKuLI3xXHeSwJM7I8Bktt6U9gYoJuzlk+8QEXVmA2RMVji4o60ejIkvl9tZbRfneuMZ8sckMZOCZdlOnRxgAaA3Qp8ssJkch6/LRYRPDGIf3kjD3i5rXN/2u6qOc5h8tSzOs19ybehe02VtcKI3PbUcZKs+X4BlJvNJu1te2/Vi/QgfsK7FdhofjBfIsYl+2DZY+0MTeTpq1+Wa+K115CnxikwRn+RMFsqDH3Cq/RPstENgDP0qMawyoJ7KqNeTDrsh+tPg+HQ/qyxvW2ORygfbxTiBdshLjn1suCj7BP/zBTsr9g0uwFhqgl45Xde3VDJkQruiF8d8OhZxwB99znbksZfr5WtTe7rZuR36UngkBpXx0Ke6vC+4L3iOKoLJR3SBgGgPZX5Y4pcXdV6pf2rd0gfd5qJjsCw2F32jv9h4qWNZz6veIuOyzfP71HUxgL6aYr2cp4Zvxm/0odEPCuHF6WiDegvTZEe9Brqm3IzNsNXar8ehspT+UNss3FgpqO2q7bnZsBHve8s2UW/qL/RfFD+t+/F74GKOch1lfKzPXRCvdK9b9q86xkbMhoxRxw3pn8Vfqef3iWv73NCPuiQ+yLzG3OvWcohrsRXvORHrCSvzp8p1ffVzMEP5Wgv0oz65hqyqxEVfqH0gGoDuKOPBPRJgMvuBZFYHR7wCMhb5ixuvRzoGSBm0MAClahqE0gZ1xNM+F+qfMKH0opCzkDESFW+9/DR5mGxOB/wnk1mflH1HfJzjjoEvxPvrJ74oWxqdS9TvekOX8+5b4gqJtcaj6pJJbPZkb1ke3EtckV+6cVU7yxPj5FStCzpUZvVpJT5Jvyiocl2pyBfe/tn75/xmWPpuumltcBLdmzGF8QJ1kw9QLqbrNZQJnNy18bm0bVyzI2QbfXSLGXLXer0/RXz6oiHsFhtgIaH25jZrzLFfibWDC3CTC4VB9NPqrJ5PxgjsOgiDBePwZZt/1TvOM4NRLgbla9XHiEnyvzpWeKS5tl4b+jD2Yy6GeQXVBIMJP4m9Msy6xM8hd9xHUKweaz9CvcPGRd0S69RHwkZrVVlGPysyRjxEb7HZ/yyktFnOKxs2V/9A1nUxyPPRsD+X3+zeE1zRx/39IPXxDRbDD4tDxAvXLrV9Gj8YP+xPi14EBVVXZQhV07haJrPDXpCxaW+WvehTV/YT7VN4j+hqFuUgf7CXyhhnnHfBL5Q5GftVhsj3uSA4gX4RJ/bFfPfpPtcN1I/sj9n2wTVPjSPYjuyUcbmfrOvz9clg4w/a1/uCjbvgtHlfmNyvguuGbkTH47sjwGQWbwhb4amDtnb+ySQ5FhR5MbO+yKw3kDrZZANxspArcY625SYrZ6YXJwKtCBOTnC8mfZQGOsMOtKkcS1Os5+cpme7ycSJElYvjYu/gXPgvGo6CoWutTS23c2VX9QOTIXeZZJidkNgMcxYJyfAp30yxyfQYbNHrco4JWG8kMdGbqlzvNx+xfdE3oL7XkyJpP1vApwVktaXLGh/AGOomhlCu7WbnE/9MRx1nQ/PyCGyp8fXKtbza4vX0s4/pV3+lWwqhD2kd0Jna9pMif5V5sauONxStbGEOQ9bRrugd8xsuYMHGzj/a90vjPM9vo1wq5mtTe0Ae9sFePOGKc05hrKxsDKKuIWvlKJgUeak6XJP62C+jfWpgJyV+H5onUC4eb43Rom/EI8ciWVra4Pyk9YBraicnG22viwHGd/1YVA/fzCo5n/WhbTvWx3IkJslp6AdaDlw3WAxba3s4n7af3FNKX0jmpROQreV2vnUfiGuho7aB8732iu5pXUvKt+OTHEpvj6UrYW8vlfPp3AXx8vUQJGjYf0Z/KhzVl0lccL4r/oqPwdbrTe4f+/ocei62oS3F1qhay+1cbSq2YqxGv81rlVFe5bpC4YxJNXDfqU8kjRi4XPicxVzvfxu6oTkP748Ak9mrklkbhLG7VCaoGEQxYMqgLQNyDO48kHSixt2b2n9g0rVJxF9bnMlZeyILE1OVrxOnTypb9fJidviTFw1YLqrMv26XMum6Cp88mS+MhIJso8ah35RUVyzWjc9sMkZd2r7frIyvTfxa7rI0Bv2GUOzGxb7qd1mdq9+gkjzwxg7XfUKbpO62HPHZb1wmE2++qkvsd7/gWOTOb5RdZyzOc79L9iCnGBcLZ3uB2RdJdbcpMawylL3bueJfqMt2RrFr97+F1XOR5dxyu+hTNe5p3JR+7nHyna3Q4WN3PBleY25Pp7NNFkvgJnKLXRIP73Pd1fgIX7TE/PT+Mdqh/50x9pdYgGT+o72pG+ciz/3Ic8VWMqu2ep9LcQ934mBW1xhkG61et0VlZrtirg/J/QD64Ywh6or+7Lb3e8+q7I/MEyXWOPfcor9IzLw/+L1G/al6P2JzeYijOpzNlTEYfSv3JyyXyM36xcwfqbc2ZkSO2ZzH1a5+oN0H+7/1R9eFLND2JFvnVJ/zbGym9jo2UUefW+I+1Pvwyofa4ONbdY0xUZugjaPvzeYIl7Fmb5Us5+tsPnQPnPUpYaHlzjHPC8kv7dtu/zJB8wQ47zhmedbvTMZSNq6Fxnqt9kGNS7p/dFs8VmkMmv61e4nIwmsq2/sH8NJyl5/6wnpspI33R/W7y8XyuT7rG24XMtvqC1YPGWbfcs9CHT1GMV5Gu6w7S+DZfRFgMluSWRxoOVS9w+vrVsd2lidjfuOViv3Goq+M4KDX4zKh6CQxJivUaYPHXq84vrzpN376oM722JlOBmrTqZ3xxtsnaHuFZUzACxmp3nitI3TidZ/kFkLWk1n3RyY19NNFDPvHxCfXvF3i2RcPPkG6jPQJ9tq37GbO/kpPLMw2dPnE6W2CiS9C+6t3Ua66h75xU4f+4QzBznETTJ6ME6hbfUr9DvvjaD2OQM5yAV36KCzUkNUQZkca0+dT+pbuUQfHzLhBJC6jMhyBLTKucDwJc2GI5d5yNgb9Wvq0G5nHFT8tltnu1N+QofPWMoi76Er1yvhTO2dlffxNvvExmY+xOdi4txgBt7Bh2DUbf0MuMjm1N+Cb2gXjYzu/QGxEUFzLr86m9mkMO+e1xd1gpPOBj53kf543hj/jSPRbjO3bfiOeECMbV0Nf8sXjPESOI+AkhUMXJH/pHuM+i02T+8iQbEdg4+Y8Ufvgql3X9he0G5hXvWL1XptL3avnNvAV+5rfQ25176mh0XPt89BveqXL/UAqypiDtsANWaBP6Z7Uv507339GX49vWlYbl+V1TC39yzEPPcuKcb/WsRXxsPb2jbOmP8kAfzf7tuiDushGTVnxb2Lm6hyVxrvfb9JcJdJyvJCfxWh8wWLMMVPbPeY458L9sSejzkT0JHmf6nOFisiCeTX1r3g4KW02+sJKbLDfLln1b8dfkav1+/pqsQ5e0acRirke5qjicpxCvznJN273uG/qjsY8uDcCTGbvLSK0Z5vAv3M7vfhEuF2VV7+eAN6wvl4bNdwTAYl9Xgjdk3VzW9hf51xY+ogE3topJTK3ZtCT2fhZmlvLp7zPE5AYeXL9eWmUQAI/RYDJ7E+Rp96rCLy/nOL3L68SwEY3JcDk4KY471sYPBHXnU7Ywbhvw4d17K+DBY8enMDrKb1iensaTGZvz/QLJMq8/gvn8i8gQZG/mACT2V8cPJpOAiRAAiRAAiRAAiRAAiRAAo9KgMnso0aefpMACZAACZAACZAACZAACZDALybAZPYXB4+mkwAJkAAJkAAJkAAJkAAJkMCjEmAy+6iRp98kQAIkQAIkQAIkQAIkQAIk8IsJMJn9xcGj6SRAAiRAAiRAAiRAAiRAAiTwqASYzD5q5Ok3CZAACZAACZAACZAACZAACfxiAkxmf3HwHt50/dHrU4sfh394IARAAiRAAiRAAiRAAl9JwH526fjy/pVKKJsEdhNgMrsb1TdV1N9yvJygXft7idLuPn8j0ibHw+4fcV/+2Pe1TC5F9kNy629xHu7jB8mHD5/97b8l90v8PnRdHlBc+5t3O8fOh+yJyp/lFoKmByM+08t3Wvi1TO7U6R8y65asQdanx8xbO33LHAc2f0kExI/DF//u6pcY/oeFXhMT6CdwL3l/Obb9a4udSO/5YTqOa+Cw07PL1UTmhbWaMj/09SZ8nl5BfFkvrSbHe+s16zO6xlWduP6Ca9euMcB0Ht4XASaz9xWP3dZct/iViR4H9251X19RJ8dTOx0uJ/JrxlzHZE3aKN8tV29uZUGkZdf7NKz43NFuH/aokRvLV90MhNe1svEGvscP1vkkAVg4flISm18icEvWt5QlC8TvuKfc0uYZ62sSp5kclt2OwDUxmfeTh05mbxeQD0lS5vVentZDI1Zvz7JGWptLpB7MMXqfh3O0anUNYLo8WZb10KVkHMXy+P4JMJn9QIx0APgTJnwqpQPIn0DlxGU8nTq207M/HRyD2NTDIMbBqAv7Uzs9uWwbwEPmofngbDpJbO26mk5/YmVPx2ZlrbU+WRxV76m9dZvO8nQznnbZjUbPkQXaAU+6L91MhK3YtCfpQv8PsJCqbTFe42ngis8SCLQdJuEqd95lTG7EIypZ+dAfF/oB2PNksY66aA+wbA3aHKAPiERsM/UB7JG6XeeIq9uXddQEU5iEnd5k+gn9BB9UoJ3om9p0bv7yEsba2eZ4gD84djZYLMzUdj7G8vgddUFPt/HsbzmIXyDD7dRYPJ1bqtcFqg9PNp6k/vCp63k5t2Ofa/y6xQjtQ7bbN2eRb+1LfwkHwT+0sb9GhnEYY660AQaHGuuno/kDfTJU16fpOJ+MSn1ecj/yYmbun9l37JwX/XUWx9BnbZ1Z6v/Qd0/P5eELXEttQq7Pr6eW51OoMJWxkzWI8UNkYwxAVhkzWDf6cZ9v1Nd0/0NGHg8s25gjUl/xtmJxbj8YdpvLuHAf7Z7lfWOMEfHn+HyKsTTkFV3P8iAV7E32DXmhzw9SPfTD7mXefwZLbzg+x9g6tvPL6E9qe7zG2f2PXa3MadG3h3idWy7asToWTE8eQ1u6cU4CbokTlIOddgiyr4qJtVce3ad3HE8yt2h5mY+wznSO6oaCHzYehi8jjtIPc19IboKuOn+syoA2aW4t4yX6gdrZbVN/5Z7a2ciclcZxty7pgLGQjJcTiBGOmVJPfVmwlP7hbMaxJbNFwOqp6Z+OqfC1Nh669Ir6OmKXa+/zL7fh2U8TYDK7NwJpkEhnxwE5Br4uBnwA64SS69nTIBssMfHgE6k6CcVk0QdYX+ilG13Sk+tl97LeZKsO7m6ryhs++ULBJw9t5xMS6sZjmfIk+XUW2ZByJhNNn1hExlabMgmpLRMmqtsXxWqXyd/nc2Yobdz3Yvg4Lb6PC9tHiZH61rkXeVgv+aZ9B9hd7C/QB1Cf36CA5XhyKfGB/iAuSVvnu+oi6PL+IG02fPMEUJNZ0RF9YdyMcjxAB8Q568jxzOZm31L/SBVBz4yb91+91uPR63nfUdnIN3zDhzjdVr+WZNg1k4fHniD5XJMMt3Ho9imjEstefdmvujz0qfVFuvoBTHpCmnxNPsxt84WRt8txQz9QV+5/ODZ8rrK51dqs9tPEFv3Kx637ZjKxv3T57mdiu6G713O7lvY7K5Rhx2iDM1vvs30O9nEa4wNkRVmZr2e+uJ/KzW0c41KihX1cx7LfJzCUvqj2xEzk4bhwexP3ziL1Y7Rh9Gnkocepzbye2erXLMaX+QJH8Q/8WMbUZScQZS7sPnbOYrvb4OPE1wz7OPeYrvifLFkdC92miMlWjDOPMZ/s5dllp37m3PbKABvEpy5r2OIPfL3v+Nzpepb+Dk4TG5yt8uvzfh0HQ4BcgXtp15X8ncmQNsNe8cX7Bfb1NN5gXGu56pjp63K1ftbh7JL51Tf1e7TDumkM+IUVTh9LZpGhC7ZP1emJOj7oRx5StfiLUlaZYiUe3x0BJrN7QwITY2pSBqcNSbdp3gAAEtNJREFUEpuQ0gSKi/l6M9cJDicVXAyPyQ3l4Y1uMWms2Zr0wqSvDtm5TpJ1oJdztMMm5/lkNibRRGx5IvbGzVLsWJG3bGkLdVgI2SRfffOGtRx8rswgrsjaJS0+62TZb1r+BNRvPrnd3B5fsKS6YF/mD7WgjpZOfQCdcF3qr8pN/abrE3/9JgwmpMMFk3R1nKDdcLzGPZeDP6Bv95goDHD8DgPlCPSUNskesMEWF2P8omxpM/q8LeKw70Z/QXl9IRHXkoF5wYOXkn11MYIV0S+IA1aR49FPgEmxMy0WUG4Vtjhf8wN0pTZWjkyGv8trqWm1q/oQlUF35YLneCxtq3yXp3pwjhs+r/dbsKHauZDniqCNF+knlIcsK9vHcdib538p96RAFIGeNf2pvJ5g+w37KufwqSReyR6UXWyF9mrRlXznLLOP6/HGOaHYlxKiei3LH2PByuu8E7VXGVbuGzGu3Fx4Ld/NE2J0jQwYj2POmoxLqKcmVxabfsD87vXSHAmFcjjTtXIfHTbjeEN5wEeLIVbIK3TCda2/JndiZ6jdiH/UsQO1HxJLWwvhvNcbKO/+VkWsA4swOF3tw/Xepgz6fFRj+sE+iGMZTOHhHRFgMvuBYODgjM6NAzEGrg3YOujG5FQnIZhUppOQGTna5xsd2uXJU34VxZ1EvXVSAplogzQt52hHXsz0xUNwOMSTUbdg9qmcsE08UTN73Sdjnsv02iKZXfpmepflolvkqk94UwGfvc7M9ijT+pOJGhMQreOvwkndag/GZ5slMvPkd58PoCNucuZFimuy1Wx2PVpbriOvAAEH9QYCl9B+jaHLCpvMzhhnpe0oB3/UZltcqC+lT03HxMb4BZV5YR42Wo3UP8CGxcIFrqU22EfSgns59lK7he2z/rfkmOKcnJT+OOauytjH4RhzwF5twYXdkLXgkHT64tLHhXzO/JBGNl7MDtdlNiTbJO46J4B9VaeclzimeU5jhTbZomgxxkDG4hrEO6lflA9W6/0WfNlinRQZrzRu9TrICls+wnHYm+d/jM9gN/oRGod1PZbe30dbiavZDzZ3MTEWlEdu430o6lS/txLCNb7/N179d/kf75PIAB8M9XLoT5dsr31+ydmYYfnq2Ae9aknpF6MPYdwGc9Wx4AY++Q6mFmH/6XWi3OMtBRDzhew1GaVNv68kv4uvu8dttSEYDVtTTCaJ2UJXssVsn8pQXZ233ysXfXhlDRc6gE3wHnOt9LekO/Ro5f6/jfhjNU/oQYbKnzCRZrYzW+0rAvt9cm3HeFm7r6FEZ4qVzzPD99HW/Bv9HZiOSjy6QwJMZq8KCgy6mCiWgtIE6oN7tsjSgdYHFg66Ihvl4Y0Oy5dWYAnYjTcKrWLX9KaENsi1cp71wU1F7MWbVrEfLYlj9N0Liz4vlk/VDRMk2jKYoJ+pdTs/LW+WcSMGubrQ7b4MuSirHgO/cmm9fbUTznezLPwv+lB0QP3BsvoCbdw3iRG09eL0uRbHLd+gz6xxy+Vw8wF9w5dk0fIE9C0vYgkwKG2SPWAD9iGVBH6nNlcls+C3Cod+gGYn2XZBdOOuMFY3u97S2xHrYw6YoN8iUM/7nFZ4oT5PBsbiYd2P1C5kgg2pgpxsXfMkGhKpiI+1G0kAyAm9XRme47FcDnnFMGQjl+B8vd+CDVuskypos1YestbqSsN6DWO0dpwUbp8EN9Mz5d5tqNemczdoy+MM/cDj4mMw6YIgPiB6eVj8GH16WdVL6tjCByzZduHs9y5k7pLmn1kGLO5r9dpX47xy2tBdubmOWr7Ks+qC82tkRDzKQwMoVxNn57iO2fTD5o8ax9VxPNPV76N7ZQzZwEdttHMdE8grdNb6EEupgz5HG3feP6GNF618Vn98ro8xDDb6a8bSJq4nuWb72n0rVYWT0f+L3dXfaFMZAdOow4N7JMBkdmdUxgQiDaSD+1MdGSR+k+mTpk8KadK2erhj4ANTZftuBAxwvLGpVvh6+TFI82JI6sm1+dOrPFBTPR3ck4RaBKJNnlDGEzaYJNIEYbrmdohQ+2854Ul5Zup15TPXR6b5CZr65jZCHLZ99jh223t7aTOfYNEyX7y6jH5Nmay3T/70uroI2mCZ7REGfUGufrr+NR+gD4gOSEjVFnjYEj6jXe6ylDlfL1t85jiGrxu+YZ+P+ip3yFrGsPuM/RRiLs1Tm2TnkCvFqtN5pnrr3FI80IbCTW1Y6VNDBugR/SgvJaYzu31OSoZnn1Se95FcT8+6zYt+Ef3E9OI8Zot2K/c+k3iLzGhfde71Q+qBfyKzs5z1E7OjsKyqV+Nj7dwXS0qdGdpr9cK3xLZfm42RXs/lJ/v12vBzcERfNlgXH7HPjaQZZKm+2YIcdUB9lY+xwOOSLCUeaFhuo3yVk+lxLpm7XcusnZPZ6skjjuExrkR/9iNxT33B5LkdIwbogxyv+TG/T7m8JCUxyj4mvcm+vZzLfJZ0JSv6gxfv4yg/M1Ov8WFYkjmJg477vTwLt+TzXhlgr7Tv847G2scilCuF5EOPgddNmOyax1Hj0+8VqS/19YvPT0lEWtt0XWhjzJPmr8pQ+8ZDN9GVbPA2yquPCWwT/gIbNUp09Pra1nVku7L92Df8/jT6DdbNTPqViR7xxZJZ89nHMcpS1tOYYK3inzLwOcKuJW4r8pbjzmWgLh7fGwEms7sj0gd4f3XRB4U275OhvaKRO74OaG3Tv73SBxC0Ob6cx6QynYTMSJXV27vcsEMnCX9NxCel6lwZ7P3m7q+WxCSCNoiIco525Bs6Mjq286u8lgULJfc9zMoTTBTvSD7M5lN7i4naJtngsebbavlIRlW23yBSAlF1oMV+3G9C8Ypr7g9ea3wCs+cz7BxDudxwgKUn+4u4iVDsB1MfTK7GGthJ0xRXlPP8psngYGscvL/I5I/Xhm/ed2q/3PCt2GQLBms/dABjZFb6aWIxTVC7pdrObVyL1zq35D/aoL7At5FP42E2DBmgRy6hvNIXfQ6QfnB8kd3U+aJCxMw5dv/Th7EdrLW1PrBbjrmZrc4R5qAS06TO+13Mqxt+YJ8s8UT/xiKy2FcVb8Qn9Z3a/8OO/O2zKj6urT1QHDE9xWt9wEqEoIzws/iS+mxpn/y0dnmuAFmT/uV11zlKH/Fx4vLreR2zyagVH4vviXu3Gb7N2OcflZx4uC11vga/tZHbbrGSbx0PmUneBt9prMzXeZ8sHOQUZORvt12Z56rt8Wc5E9l7x77YgN9sH3NVZSY6gFvVvcZtrXxhMsju3wT/sZiAvepT/2Z8ZyxrECx3/X5d5qHw3S/CJ/hh6zbvGxArGbMzHS4mdNX5Y10GzvXZPuAFmyrpvhG2ABu1ZTaOZdzW9YYb7p9ZZ75XeJ3yYCKKe9tYC6LPa+uIXMfnJ9OLPoiSXDf6juqHa1sxLv07ywhHeHBnBJjMfltAbBCvDfxvM4OKPkfg37md4ucSPidq0VpvlGMhtrh+LwViJ94MvpLJvfh8jR2xiLimMdt8OYEbxEcTlliY7bRYx7kvgne2YbXHIHCDPnkVqFvq5f1gdwiumj92S/89Ff0142ssfn85tfO/a1qyzV8iwGT2K6MpN4jYoVv/O7WvNIGyb0vg1hNnenqPT1Zva/YNpclDmZxw35rJDY39WVG3XCD+rCd/U/s18YGdGZ3b8aHOXkpMZveSerx61/TJW1C6sd70ls8t7PsrMm4xf/wVFjfx472dn8fv0t9EJIX8SgJMZn9l2Gg0CZAACZAACZAACdwTgfEKKl/PvKe40BYS+NsEmMz+7fjSOxIgARIgARIgARIgARIgARL4kwSYzP7JsNIpEiABEiABEiABEiABEiABEvjbBJjM/u340jsSIAESIAESIAESIAESIAES+JMEmMz+ybDSKRIgARIgARIgARIgARIgARL42wSYzP7t+NI7EiABEiABEiABEiABEiABEviTBJjM/smw/g2n7uI32PSr9PNP0fwNuvSCBEiABEiABEiABEiABH43ASazvzp+b+300d8mxd84hN+Wk8Tx+PJ+Uxr6W3PX/A6jWCF2Ylu0+5ZWXpArXLZ/YsBikH5PWH5bGG1vreXfkz21tws+XIqHsvXfMH7O0vDatu0XjOBlEiABEiABEiABEiABErhjAkxm7zg4l037ZDILCi4lT1B19+Gnktmq5ULSWavvPv+03FkM7Lf24uGAPzT4d26nl/emXEoCivZ64hvt8aIci7yD7xZPdB16sqy+eb0qhOckQAIkQAIkQAIkQAIk8LsJMJndG7+e9Jxfjs124SBJ0GTl2I64I6cJx8HqbuzSjZ2z8WPjIn+UVwOh3vMp78yqjV2nJzS1OSZvPcn6L3zqu7Mf9CepCL+P7fR8TDuUnqSJf6uJWgP/kCfaLQrRV+Qb+rd3R0N/lYvOoI41nm2WzLaUsEby2pNZVJGPu+/Pb7qTGzbmSkm2XupxlH310KUXTN6anCKWpyRAAiRAAiRAAiRAAiTwqwgwmd0bLk9s+o5a2nXUBAqSW63r5yNBEVUp2YBEShM9T8qqPLBxWc8TX0uqPHFJ9aC9JYF95w6SIKnvbfPO30gcLcHO/qBoTzBTve6T+u0JYWc5S9jV7ti1hEQRWDVNIJ2vvcKrtie52c5V/UkuegO6/TVhjw9WmyazJYlUHcd2frWd2dR85STFo9RJfUiuaX+xmNZ2mWcRxFMSIAESIAESIAESIAES+MUEmMzuDZ4nJP+8gSQ7PaGCZEKuarKBiU8kjZbkLJO4Wl6SIVfZdy1He2hXk7KFvV0I1gu7ICGUasUfPa/+eGIatvV2tV4/vy7JWvEP7Eb1W3au6kceKKwyWKvXk9nF38xGQu5CzZfFrr5fLp/V3nRZbcvJ/EHjsew3TGYTOZ6QAAmQAAmQAAmQAAn8IQJMZvcGc5HMlGQWkjjbBfTXff1Tds7ybt9QvSyfJzO1HiR7Nfkqu5ehC/2ApDDpg3Jpt0jOUUYIntQLOcska7G76HJUtjOzT03eQefCnt52UR5tNvRHHTegfyrPbMf4O1WsW2Ki8kaiiTVl51r+ZnaRdKdKdpLisXLdE+jT8yle567t5PywSKwnAllEAiRAAiRAAiRAAiRAAr+MAJPZvQGrSQqeR9JmwlYTtcXOqiuHpFSLlslXl9zOT/5asZRAu5qUoX2uRj6xHtidkiAo16az8y/Zma1+r/hX7XH/armcdzuTf/7asCR5yMPlyGeVhdfScUlmPfkHPqHbk1l90NBf9U6yxkm0GUWrR9jf8Nj7R7w+viqBF0iABEiABEiABEiABEjg9xFgMrs3Zpr0jL8rTbuANfEpiaQkJv5TLXrsO2VQD+toIuWvMBf7Fnrjy6IsqfLEJclDGZi8gd1S39suErnuu73ebAnmfLcPE7ter+9Yq92e4CV5aFxJZjUZ7ck72q3HY/czmCa52c5V/SgXTSm76Kn9Rj27NPPj1N7w24xhJz+J6ycpHrUCxM3+fhgecCiznigXTlUMz0mABEiABEiABEiABEjgNxNgMrs3ej3pOUliqr/vCTtrKbnoAnsitqhbvq23/v1rvDr6umZYT9L6N/2e8XdQezK31AmyMHkDuy1Z6wktlEdL9GcrEYN6p5f8W7GadCo7SJxDQT+A9pIwR1KHdktVrIf2rJX7bmzVX+WiPYnnSJ6xyiKZ9Itqx2jjfDdj4227rfFwwXd7/SHImi+9Peoa/UseNAx7QBUPSYAESIAESIAESIAESOBXEmAyuzdsW0nPXhlR772dn89NfkqF/z0QgXjN+Bqf39oJktlrJLANCZAACZAACZAACZAACfwlAkxm90bzpsls+dvVvTaw3uMSeD1t/Pbw42Kh5yRAAiRAAiRAAiRAAo9LgMnsD8Q+XgPlTtsP0KdKEiABEiABEiABEiABEiCBv0CAyexfiCJ9IAESIAESIAESIAESIAESIIEHI8Bk9sECTndJgARIgARIgARIgARIgARI4C8QYDL7F6JIH0iABEiABEiABEiABEiABEjgwQgwmX2wgNNdEiABEiABEiABEiABEiABEvgLBJjM/oUo0gcSIAESIAESIAESIAESIAESeDACTGYfJuDv7fx0aMcX/rrtw4ScjpIACZAACZAACZAACZDAHybwqWT27fnQDgf/d2pvHVQuX16f83xrp8Nh8Vua+jM2X/YTNpbgqQ8f0bH3N2dfT+3wdG7fmz4Kx2M7/yuUxZaP+Fiaf+ZUY3hDDlfJ05hZXzy97vfma/vffjt+dc2d40DmjY8+bNkbn2tkt512r8eGD5DW2fAKCZAACZAACZAACXyewPXJbEmONIGdJkt7F3Q/kcyKzpGE78a5N5ndLfCWFVeS2Vuq+KCsq5LPDR1XySv9dUN8urQ3WUqNeHIVgWsSzr3xuUY2k9mrwshGJEACJEACJEACJPBtBP4/omfyOhnBdNAAAAAASUVORK5CYII=) ###Code combust = input(("Digite qual combustível G - Gasolina ou A - Álcool ").upper()) litros = int(input("Quantos litros deu ")) print(combust) if combust == "a": if litros <= 20: saldo = litros * 1.90 desc = saldo - (saldo * 0.03) else: saldo = litros * 1.90 desc = saldo - (saldo * 0.05) print("Você comprou em {0} litros e o valor do álcool e 1.90 seu valor é {1}".format(litros,desc)) elif combust == "g": if litros <= 20: saldo = litros * 2.50 desc = saldo - (saldo * 0.04) else: saldo = litros * 2.50 desc = saldo - (saldo * 0.06) print("Você comprou em {0} litros e o valor do Gasolina e 2.50 seu valor é {1}".format(litros,desc)) else: print("Você digitou algo errado") ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA4QAAACeCAYAAABq68RbAAAgAElEQVR4Ae19S24jOdPtvyetqgsor8PwxPAqXEPXHgqeNjy4owZq/gE1qB3kRQQZ5IlIkko9LFHWaaBamUkyHicOmRHJlPV/C/+7PgL/e1seds/Lx7ksObe8c9lFOXMi8O/z8vzvnKbRKiJABIgAESACRIAIEIHPReD/Plc8pV8agT8/H5bdbrfsXs5WXl7aBeojAkSACBABIkAEiAARIAJE4EIIsCC8ENBUQwSIABEgAkSACBABIkAEiAARmA0BFoSzRYT2EAEiQASIABEgAkSACBABIkAELoQAC8ILAU01RIAIEAEiQASIABEgAkSACBCB2RBgQThbRGgPESACRIAIEAEiQASIABEgAkTgQgiwILwQ0FRDBIgAESACRIAIEAEiQASIABGYDQEWhLNF5Cbt+bO8fd8tDz//3KT1NJoIEAEiQASIABEgAkSACNwrAtMWhB8vu/TzCfITCvAbff669dn3G34fy/Nut/qtNf2Jhk/7eYZUJB38ExBbf0Pw3+dl9/1tuWwJJjg+LG//C9NFbPk0HIOucKoxPCMOR8nTmCUuHvJ7fp/LvwDUVz3dOA9k3Tj0gcXW+Bwje9lodz9sfAjTx4YtRIAIEAEiQASIwCEIzFkQhgJDi8BmwbE1KbpGQSg69xWqjVBtLQgbQz//Uqcg/HzFXQ1HFXBdactylLzA14F417S14HCDeHIUAscUbVvjc4xsFoRHhZGDiAARIAJEgAgQgU9AYM6CMDoqCXeruNr8lH1DQZhlvZWdyeflA3Z+cHdBE0XduZRdocaO2ZL06e6gtP+/t+Vh97A8fM+7nbHoK37guFxMgg0OgzImF8Xf0w/Spx0qlDP6kXrYxQw7qFqEm49ajGNf83mDnmJnDqrzHWXCDm7u81xiAW0iRmSqbQ/L88uD2ylFu0vMzi0P+Vls2SU71N+H5UHsk53L4H8pMnBcftjR5ZXjgGGPRiC2edccd01RF1xPhcyb7p7bTna1oa+n9unxX+zxsS2xkKaOPW7M9+fl+XuOu+NMHm9+AL7qz8tzwt7wF0t+prkhPpodTZ4gpGjjnvi0cKyiPA5lBxnsdn6D3VVGPSp2Z3zMnyij6KlDS0yef8p6lHhSxqs9wFsZhxgY3lleC1NpKvYB1joEZcFanrC77HsO2QV+EAEiQASIABEgAhmBmygINfkICYklQO3EJ8Y3FS6xr8q1ncecsKQ+lsTlokzbGseWAJkMp1Z05qQ6J/RF/yjBdW3ebk22DAexSY+zrcWGdF4SPZXXTu5Vno1TH3O/IlscEnk2HnzKCf9+PTgmJ+dZZ1d/xstkO79zm4tTxsTxBPudIs+SV5Tn4twoUCzu0s9h6f1f86/uKFdsUjwLd0SexczZ4XGW8Yqfs9tzRXUE/0x21e+U5CKhZafv53zTByR5jNpjfPL2uPgpHw8vCMtDE+d3KlSMT05P6IdeOB/UnrbfCUfzqTFnLV4qYz3HPNZ+/MqeEC/zyclAPSigFOmNWMQxgzh5TlfeKV7BvsTb2kfMkX5mtzOPJ0SACBABIkAEiMBVEJi/IHSJCWAkCYwVR3C5fdhOsvYlfCVpURtqMog6nAxs0CQ4J3/RhygPfcE2TdJAb2yDgrDY6myQE5+M1eaICRQeaE8dMJA10pPkJvtAR95FLYVOTlb1POKF9uCxqC3nqCcZLUmy6j1Kni8i1EPZsbTkPqlI/y822K4KxAzbbLcqy+hzBwtHxAyVhuOgp7TG63Kek3ZXRASejGwrsoM/m6/jvC32RT/hHHkvSsoYf+z9gfH5wQ1yEOdL4Qk6MPBNuiE+Xi+2HTPHvN1oUrSzng/0oID4EAdxAV4U/5pxWs8LU1HtSVcqLr01yEbykwgQASJABIgAEbgmAnMXhJoIhtcFM1ox+RiDGBOm1BuTOpdkYqIkXV1CmhK29Mpifj2vVSRggu3GR3k+qXW6NEnLOvIrXuUV1ZIUNxLI1TjbvUCUEibOD3jNS7GJr5WhTyJqkx5IkB0OA/2un8dH7Womqo24iP0SmxPkYeHg+IJQllh4W7ULtoVCwstr2F94hVhBsQk2rHDJbavrgIWfQ6Kj8sTbBorKLhPwstiJ/fzrg1b4q9zCZZMhPiUfrZ97AwBsVg2IKRx7f/y8qG0NnI0n3nxX9Jk9br5kv6vsJKBih3EzX/NDCrA78bO2i46KgxmV7EY+Vr0DPTZcPz0mcqnIQHuMp804re1IKtbXKw625mUfcf46+3hCBIgAESACRIAIXAOBeQtCSVAgQfXg+OTVt7XO1omQ9JJkqOz4hISoJErSERJSTXIgoXFJj1MNNsJ47RLOnUxsCzY58aUt+paSw5pQgh1OQO+66ySpof6kRJKHY7bqAfzE5lI8oKygEzGQpuJrOHZtaOc55EGynMU5vqCKTfalASgDueM4YAl5wQqUoS647DAaXZfxsENYCwwfD7QNxW22EwfBg4SeXM8zGQzxDHxwNgAebs7ieCx8wnVnZjhBW53OEB+MqYio5x5TJ77YnfyscQC/3YA2H9O4gR4no61LZRR70gD03YlwWPoWjz/i4PuNZPuePCMCRIAIEAEiQAQugcCcBaEmgHW3YgVESBBX7Y0LmqRBIWdP5UvhFBIil9yAPp8YSiIGRaXTC0kajNcuem67AClJK6+/ur5Jvtmouu07OsXemEC2xrSxrImrFW3JJp+wiXwbDz6FHZ1km/VzQJTkPu589PRjAa6Siq9yhv557Fqx0WTXYRqLyj3yDG8Xs+Af2ofH0k3Oy4ONpMuKYsS5ZXvqh5hnea1CUe2r+Bdsnd0Zr+bOlteDtqG3fTuxVyxeRHbe2WzZmeelk6245TnS8sHmMuDt5mwo/LDN6cl8qgVZ9QMxaI2xOCrW5p/jZyiK0I9id4pJ0Y9+V1PSkbYhjuGP5BgvUI+T4edLmmeZM8WePGBrnMBfxchwQBv0uO5sS7/ir7OPJ0SACBABIkAEiMA1EJiyIEyJhX+FynY1FKSYvGTkMOlrgZkStyrXCq2WTCfLJTQ5qdfXqZ6Xj44t7rt7bnyyrPr4sLz9xO9Dmnyf+KVX1WrCX3eEUpKHvlTZkjB+wA5fRCUniPnVsJqk7bue7NiuJ782asliMaOjJ+IVMZbzbLP+xUQrDmx3JrdZwj4uMK1oS7wYyav4FAfSAdqHx7lb5d3z8iZ/8dISd/NDzy3uYkfglfUzzgX15RT7ASapKM28h+uO45rYV35pbM3OokAOBnZ2+9nDj9wB7XScAD68vDneVq6F+SKysk/enyTL5oWNtxjWmPQe6AAv9sQn6a1/rdV0JG/BJ3glu85f0CPxffmor3E6PNNJtTtxqerq6HEyMiYvMH/+zR0AxzKkG6dc6OZ5Vm3oXzf8dd52OVg084AIEAEiQASIABG4IAJTFoQX9J+qiAARmBEBfShQC9QZTbw9m3yRfKr9Hy911+9UWRxPBIgAESACRIAIXA8BFoTXw56aiQARAATq7lferbXdK+jDw1MQOG9B6N6COMUsjiUCRIAIEAEiQASuigALwqvCT+VEgAgQgRtEwF4nhdc/b9ALmkwEiAARIAJEgAgsy8KCkDQgAkSACBABIkAEiAARIAJEgAjcKQIsCO808HSbCBABIkAEiAARIAJEgAgQASLAgpAcIAJEgAgQASJABIgAESACRIAI3CkCLAjvNPB0mwgQASJABIgAESACRIAIEAEiwIKQHCACRIAIEAEiQASIABEgAkSACNwpAiwI7zTwdJsIEAEiQASIABEgAkSACBABIsCCkBwgAkSACBABIkAEiAARIAJEgAjcKQIsCO808HSbCBABIkAEiAARIAJEgAgQASLAgpAcIAJEgAgQASJABIgAESACRIAI3CkCLAjvNPB0mwgQASJABIgAESACRIAIEAEiwIKQHCACRIAIEAEiQASIABEgAkSACNwpAiwI7zTwdJsIEAEiQASIABEgAkSACBABIsCCkBwgAkSACBABIkAEiAARIAJEgAjcKQIsCO808HSbCBABIkAEiAARIAJEgAgQASLAgnACDvz9+3fhP2JADpAD5AA5QA6QA+QAOUAO3A8HJihD1AQWhBNEghP/fiY+Y81YkwPkADlADpAD5AA5QA4IB2b5jwXhBJHgosBFgRwgB8gBcoAcIAfIAXKAHLgvDkxQhqgJLAgniAQn/31N/q8X7/fl8Z/X5Tdffear3+QAOUAOkAPkADlADmzmwARlCAvCWYLw9QoEFni3EtPfP74tu90u/TuqqHtfHnffltf/Don57+X1n6xTdG/Ui7Y+/hJ9Vc63H783L763EptZ7Hx/glg9vR+B87UeGAg3d8vuKJsDn/97Xb7ZPNntlm18q/zc7R6Xd0mQipxD50ywh8nWETwkhrOsKbSDXCQHKgdmqUW4QzhBJDgx6sQgFtfA4vfy+vS6vP94bBR278vjORJql8AmfcfsKL4/5cQ6y5MicVtyfg1cv5LOxIOI//zz9Uz8/e91eTzqoUPUL0Uii8H5efOV5i59Id/IgZk5MEEZoiawIJwgEjMTlbbdwUJqya59WvH267HuHtruCBSHuGNXdkBs7PDzPAWh7lyBPX/B3m8/Xs+3OzT05Q74If7/elx0V9Y+ERPAXXZ7X6FoL7uLbhc47dx9e3rUXbdvP97zjjEUSijTdtdQ5+C46NztlsdfviA7mrNxbgz0+zUT9Se/0+525g34Sc7eyVzazB3i4ecS8SAeX5MDE5QhLAhnCQIn+dec5LcS199lZ7BVqGFCC3GSV9+wGJNzl/RD31UChK/SbX9lVPBMO1RpvNsZDPq1KNhsz8hWtlXcBYvAh4B7Ksr9Lq6+2vsUvmMq43KhJ7HSIgmLrv/el3d7DVn6ItdWfKox8jvGmWc2NsqJtg/k1lc90+uzjnujcYaX+gsFr4wJ+snZGsdbWTtpJ2NGDpADp3JgllqEO4QTROJUMnE8F6TjOeCLwFocGqahALDkF3Y2yncQD/4uYdbx63Hza5+2+/P4IxSgq52ra31vzXD7Sp+eA/ja6JovLb89x5SrUPwVeXAtFmDbvgfY1lOKyTNy9v0pFHc2L1af+XuMu8flNb7eTM7ye4ArvrTmD68df38jdsRufg5MUIaoCSwIJ4gEJ+z8E/bLxkh3LuCPhqz+YIYvBgoOq2T2hBhiIbAnQcJEXHaDyut3K3tYEJZY7cF0b79GIWW4f05BKDt7uMvY4eDKr/0Fodm91+eVbM/vbX7LGORh8IucZUG4h2en8pTj/bwlHsRjRg5MUIawIJwlCDMSlDbdx8K5TmxjUo3JuCS0tjOCie5hWOn3uOw1Pn0NNL8yuCE5KrtJ2hdskMIWZKoOvjJ6loTbYy6xBk4E3LVttVMcOZVel7Q/0lLklwcDENe/f5fIl9Ha5B4SZG7V3UUvdyTHt6VXT2sxKbt+WLCO+A9YCWdxNzxgR86OcGSb5yTxIB7kwFfhwCy1CHcIJ4jEVyE1/bitBVoT0PzHYlKya6+3yY5hTXjtNU15NbQmxSm5ra+L+jH7uIC6a8I+xg/HmB3JtmQrtu+eHvnbiBsK7H1xqrHPDwJwRzkX3A53Vwwin2wXWuTU6xJH1SGysmy55mRKLFc71z2upOLNeGl/qKV852+121l5PsbCyzX+bR9jerLv+eHFyk8+xDjLQ4xxXHrc4XXiRg6QA5fnwARliJrAgnCCSHACXn4CEvOvjnnYmTlDcUTOfHXOXNs/cpZz7NocpH5ykBy4NAcmKENYEM4ShEuTj/q44H1JDuDuldupYry/ZLy/QpFPznJH8CvwmD6Qx+TA0RyYpRbhDuEEkWCyxoSdHCAHyAFygBwgB8gBcoAcuC8OTFCGqAksCCeIBCf/fU1+xpvxJgfIAXKAHCAHyAFygByYoAxhQThLELggcEEgB8gBcoAcIAfIAXKAHCAH7osDs9Qi3CGcIBKc/Pc1+RlvxpscIAfIAXKAHCAHyAFyYIIyRE1gQThBJLggcEEgB8gBcoAcIAfIAXKAHCAH7osDE5QhLAhnCQIn/31Nfsab8SYHyAFygBwgB8gBcoAcmKUW4Q7hBJHggsAFgRwgB8gBcoAcIAfIAXKAHLgvDkxQhqgJLAgniAQn/31Nfsab8SYHyAFygBwgB8gBcoAcmKAMYUE4RxD+LK//7Jbd7nF5hx/2/P3j27Ljj2sf/UOf11tk35fHncSz/vv24/faj/9el8fWdeDAygf8Eet/Xpffo76u7XfmWLZpNbba3LTVybre4p3mRPLh8ZfYUf2a2e5VHCfBs9j169Hxtbvu/HpcEu4HcOBozlZOxrVR7Qa5B9t0MfwrP4sPxe5vy+t/B+B4MZtpU5kXxHx93yImxIQcODsH5qhFloU7hBNE4q8UB/98g2TrPZ0fWjBwop59oh6VHIRi7/0pJH8uAQ9t3RhKcgkPDUTG0/tGf38vr0/9AhLtw+OjfO/af55E8/0JMPj7d5EikcXg6dj+/vEIBYqsP54vWIyX4mZvrI/nrOOhFFHOHpSLx6fjcH7Ovy+Pbp6KvVvn/Iz+0Kbzc4SYElNy4J45MEEZoiawIJwgEloQ/ngvSbskX48//A7S+1PdcXKFgBUXknTgsSZrknzUcZg4pwTvcXksckOSUp5k75bd02uSA0lZ1569SeIdLHyhIJS41F2MdbJ93EIYE80RrqOC8H15xQcP0fZmPDOv/nldXgt/duCj2II7PL7NigvhIx5vwQELQuUgJtvG/91u+SbzR3Zpsb3pywi3+2nzBeHfBXGW9enbWXA8hLOIfeDvf6/Lq+4Q5z5ufuE4PN7AWVzz3NsZto7KGonHKL93jD6nOVHXgr91zSZnNz7c6uHM61vWT/YhT8iB+TgwQRnCgnCWIKSCUJJjeUqfkx9IzCVpjsUcnpfkOxdsmNy9/6q7SO6p+19J+iRxzq8zuqRPEpe6E2PFo73Sut+e+SbcRRdBiF16rRGL7ZDcHlmkSAxcYjmUY0ls45VRZ6vEDRPYURxTclv4o69vmp+iz45FRjz/u2iRAQWbK0AGvqR+yZ+qO8uLDyzg/KLxH9g/qx24ZmhsHHZbOTHiS9rN3c5ZkPXrsa5Tgm0sAFcchrEuFiPO+jUvral1DZS4pXXQHm5sxST302IT5wQ5O+tcoF29+cPr5AY58BkcmKUW4Q7hBJGwgtCSZE10S5LTKiDitX5y4nby3FPvsAsghYAlgUW3TX5JwC05irqlT+uajb3DT7fTYAkk4AA7Wdtfv6vjNTE9ZccGE+xVrPtc8gthTKBTgqvfi1zJhDZL0Ft9rG3waXyWHXT3GmEsEpDPA3nep4rxvV23Yid999XmesXDtds6cQCuR3NW5krUF2O9mUsDzkaZ4lu45ormzb6nIlTm+Wt4sBflaxEafd2sp8bq3rhLfxl7coAcuGUOTFCGqAksCCeIRCkI5Sn0r/f0x0JKktMqtuK1ThKPib8+4cbvCbEg/LQFpMQu7wJg8RZisk4Kxwv76jXJYxJGtO/vMa+Mio3wAMFsMLn2adflM16L59h3cIy73FJklB2nkLw37RvI/TQu3IhOLHYcrnlnrODcWEf2YXcsZ7WIbBVIR70yuoezK/6csSAsPuCDtbV8cna89u3jGduJHzlADtwiByYoQ1gQzhKEVbIsSSQkzJisKdmhLZG/XRD6xE6eVPtXlvxrepjg43FKXHAna789d74ohfhIHMrrjW53cLfe/egWEPE1yfhQwF5p8zEWfmhiDUWpJOiY4L//qH9AZBXbrj1p5wPl1LEt2/zDCOT3IQt4l7Pulefsc0nE75yP3RhWXGrs5JpwrfJI+QN/NXf79zL3czbt+K53JGMR6eP+e3n9ga/Cr8e3OTXi7HoN9TqFU4HDG3DVIg/mnjwAKmsBOcvvDW7iUJ2nbV6znbiQA7fMgVlqEe4QXj0SH/VnCnICiwlYSrjttSP7AzE1AcK+6XWvmshpMlISuW/L45P8lEUaW8apzpS46XhLXrBw+edxeSyvjMrC07fnlifleWwHbEpBEhPjVGSXxHBLUhBeQ9VYFfnpZhCTaPSnxBu+t1faMdYW/702yUMD4YVxMvwBF5QZdKYiAMbB91WLTQ396IMVolhQYPvuSWyrhe5I7t23QawKJ5VvuJakIttw34TZXs425oXGHeZQWb/qmie6MdbF5gZnvJ1jzqJMmV9VbsOeTfME1tXC8Swrj3c6yVkWiHs5zMTfz2niQTxunwNXL0OyASwIJ4jE/BN6veMzv82TLxK4U3CWJEASTZ/Af26Mwi7yWXw4Z8zWOz6fi8c5bZ9TlhQvBxWE+zihBaMv9D43RuTs5+I7J2/pM+NCDpADM3NggjJETWBBOEEk5iSqfyp+1kRwX6LI9sOflEtyfbEdMdz58K+fXpXLbkfqksUxb7ZHxV12JjfttJ0DX3L2qBhxLT58LSZmxIwcIAcO4MAEZQgLwlmCwBv1ORI+yiCPyAFygBwgB8gBcoAcIAduhwOz1CLcIZwgEpy4tzNxGSvGihwgB8gBcoAcIAfIAXLgHByYoAxRE1gQThCJcxCKMrgwkQPkADlADpAD5AA5QA6QA7fDgQnKEBaEswSBE/d2Ji5jxViRA+QAOUAOkAPkADlADpyDA7PUItwhnCAS5yAUZXBhIgfIAXKAHCAHyAFygBwgB26HAxOUIWoCC8IJIsGJezsTl7FirMgBcoAcIAfIAXKAHCAHzsGBCcoQFoSzBOEchKIMLkzkADlADpAD5AA5QA6QA+TA7XBgllqEO4QTROJzJ27+PcGn9+XsPyx9wO+sfK6PtzPxiQNjRQ6QA+QAOUAOkAPkADkgHJjlPxaEV4/En+X1n92y2z0u71BgSfG2253+49pWBL4/rXVwMfqMxSgX4DvBO/379uP3+kda/3tdHlvXgQPr+Jhsz5V1v7VfiU/Jnsdfsd3k7pamrUOboqzPO1/7UH9sfGa7t8Tnqn3kB+KBr91159fjsubOvngbtw7k7H+vyzewaRVfaD/cpn02n6u98rOs78Xu09f2q3JmkjWBGJyLq5RDLpED1+LA1cuQbAALwgki8VeKg3++QbL1ns4PLhg4oa81oZ3eUOy9P4XkzyXgoW2QaJmc96cDk+u/78vj03suSiVJ9eNNrviAx86ngV2X7Bd9lyJxVSxMYuslcTlV1+8fj8vrf7Z+yPrzuvwGHLEYL8UNtPf0G59i3Hr9y/Uwh8p11YkcxmOzf6ZPnHtil9i7fc57v2fyi7YwNuQAOUAOnIMDE5QhagILwgkioQXhj/fl9SklYZJ8Pf7AHSR7yt7Z4cEC45/X5bUUDJJ87Nmp2pDUnYPwdyUjJrNuZ2WdbB+KzanJdUz+X/HBQ7S9yY/MK+Va5Zffqelz1ooLKeTweAsO6LvuepdC9+/yF+bBN5k/ssOE7U1feEMT3D0n5MEAPDSQXa0TcXTytsRhxMP/XpdX3OV286sXzw2cLbt3wmks2mwdlWt43NOF17EgTHPCzRNydv32xBZ+sA9xIwfIgS/CgQnKEBaEswQhFYSSHMtT+t+pMHQJ0e/l/Ze9digJSUjW8Gm+Jhi1/f2X7QzNv/uzpSC4iT4hdn5HIMf3hIXs4ORad/5q4eaKJGerJLKYwGJiG49Tclt354SXlkTjsYyL538X4by+EpgLja0+pX4iL7zeKvJgHmixCOc3wZsTOHGqf64gDFhu50TkSD3fGt/ih/Gj9dp1LABXHK56izzFdsRZaavrpvrszqVoltf4d/lNjgPmiXBc/bH5ke0LOJOzvbjxuucx8SAe5MBX4sAstQh3CCeIhBWEliRrku2SnJQA1+/51MTFJXKNhDJ9d9CKgZCQNPp/pUl2NV9CMut2BARz2BU45PU78+fg5PovPlD4u/z+9V5fB3Q8k5vMAYluSJiRx6vvR0Y98XwjF43PsoOOBaBg6nE+fSfW8L6HTyt20hpT1xfz3bUfUWgfzlmf8Nirp2pPjPVmLsWiLz2YUK5GmXmeIqf2rbWGlf9MRajM89f4evNKJznrsfMcYBvxIAfIga/IgQnKEDWBBeEEkSiJtLy6Zck6JDmSjGFigsnVMEn59ei+XzXsuzEh/4qT8ew+Qey0yMfX7UJM1oXM/gUf47/J9ph4/np13xc7/JVRsbGRvJrf9omcitfiOfYdHGNh4OZF9LFl30DuJhy/8HhcGxyu+jqpX3+w71bcDuZswNrpPOqV0T2cXfEnPbjBddfZEOzr44DzRB7sQbG90ol9968DfZ0cS2zIAXKAHLgVDkxQhrAgnCUIWBAWAkPCjEmwFhi4MyO7UVhwSCKcv//iE7t6vejYnNRwYTkIM4idjJM4lFcr3e7gbtmdcbdF9PjvPuW4hQQ6JrbvP+ofEIltfb/Tzkc7YV6/FruSGzDq6/Hc84UFJNBhHigWR2C71Y6v1s/Hx7/im3hlbxkc971MH7ca07TjC0WSrknpjYjKLeEa9vm9vP7AV+Gxrcpex2jE2fXOeLTZYzTSg21BLj4QImf5HSjeg8kBcuDuOTBLLcIdwqtH4iP98Qv5rkxOYDEB06TIvYL4uDzKT0hAsov9fUGQEqD0Gti35fFJCoatyRMmNTxeJ5c9TADzEqOU4JaiUG4AmBhuvCHY65KtV4fFPm13DwfMxqS/jIt9sEiNbV3bpBh7XB7hjxa57yaizPDHXfb50cMaeW7FAhYU2L57EttqoduTyev+FebCUV1z/Cvmgq/hvhW3cawb86LwzXM26sVYF5vLWON9/BxzFmXKXKlyYU7n7zQ6rnf1og+27mZZeZ45neTs3SeGW+cV+8W5zXNy4nY5cPUyJBvAgnCCSJxjIh/39Pp2J9A5MLuqjCMKwrG9kmj6BH7c/9TYw+5cNyE+Vccp48POzJQ2nuLf5cceUxAOOahFpxVKl/CHnB3Gg3OEBSk5QA6QAxfnwARliJrAgnCCSJznJj17snOJhO+OdUhyfaDeK/AAACAASURBVLEdMdz5sL+6OAH2bif9ksXxBL7f4k1cdpE370ifijE5e577zKlx4HjGgRwgB8gB5MAEZQgLwlmCgMQ46thez7tYQcDJfFScbjFpp80Xf1pIbnF9IQfIAXKAHCAH7oMDs9Qi3CGcIBKc9Pcx6RlnxpkcIAfIAXKAHCAHyAFywDgwQRmiJrAgnCASRgp+coEgB8gBcoAcIAfIAXKAHCAH7oMDE5QhLAhnCQIn/X1MesaZcSYHyAFygBwgB8gBcoAcMA7MUotwh3CWSNAOIkAEiAARIAJEgAgQASJABIjAhRFgQXhhwKmOCBABIkAEiAARIAJEgAgQASIwCwIsCGeJBO0gAkSACBABIkAEiAARIAJEgAhcGAEWhBcGnOqIABEgAkSACBABIkAEiAARIAKzIMCCcJZI0A4iQASIABEgAkSACBABIkAEiMCFEWBBeGHAqY4IEAEiQASIABEgAkSACBABIjALAiwIZ4kE7SACRIAIEAEiQASIABEgAkSACFwYARaEFwac6ogAESACRIAIEAEiQASIABEgArMgwIJwlkjQDiJABIgAESACRIAIEAEiQASIwIURYEF4YcCpjggQASJABIgAESACRIAIEAEiMAsCLAhniQTtIAJEgAgQASJABIgAESACRIAIXBgBFoQXBpzqiAARIAJEgAgQASJABIgAESACsyDAgnCWSNAOIkAEiAARIAJEgAgQASJABIjAhRFgQXhhwKmOCBABIkAEiAARIAJEgAgQASIwCwIsCGeJBO0gAkSACBABIkAEiAARIAJEgAhcGAEWhBcGnOqIABEgAkSACBABInBLCHy87Jbdy8ctmUxbiQAROAABFoQHgPW5XT+W591u2ZV/z8tVlt7/vS0Pu6z73+dl9/1t+XOs4yjrWBn3OO5k3P4sb993y8PPIyInMTcODm7+reRAr+0elrf/XTloxYfDbBH7j8Lsyu4eq/4s/m7l6tZ+m9ectF4+/3us9zc+DvHE4xt3C83/8/OhrkW2Ju12i4u5+l7vm/35m9bEsra5+1q89/bXDbTJ2bGADCcbPbrkcfLX23iCfsH5WL82z2m0b875rfE/Fgd07wzHasvgHr1NxQm5giqQOK3nS8oFZF76tnLdYSgyzp3vnpn/28Bs9Grj0+g4xSUWhFOEIS1+eDNLN55zT5INzp4zuTinrA2ms4shcOQir/GyBXwsQxd2uBnpuVvkzZbLf8rcwbm01QLx4ZhxW+XP1u8s/m6d41v7bU4e05p5toR3tuDsswfxxON9426ovZl868Meuy/WpO/jRa71ky+/JoS1bSt+qFvHtNfKuDZeB/KKzXX0n6p1zvnd5OSprh45Xm2Be/BxYsJcOEhIilEs+hxGcc7kHEHmiK3dfm4eZMCg8yz8769JA+Ov1sSC8GrQg2J3c7HrYUHUPvYk1G6I1td/6g0pP1EdJ7g2oUVulok3R5ecpQlmT1htMi+5/7O8TpJ1praebOs39kEXlYYPbd/y5P8pu5tJvvhd+5qudj9FL8fg4TtgoYuZ2VsXMJH78D09vW7hq+0/3+qO78vHUv2xJEK0ekzLbizGIPRp6bPoF3+/Py/PbofQ6ymxs4H2iYv3siT8Ojcc1ZXb3A3AZOET85fn5lPE0hVxhqJyO45FEuCcX29SDj8kXohsPa+73mq7i08tCmvMhAMQN7TX5k01AY46uLv4LssSbAIBeljiuqu2tfogN8wv7efsDTwuu8gjWx8WNy+K8q1zvNGvY9MYC7BROVV90XF57ss61OW4zDrYeULM2jgnnc+nrC0aX+BgsMFxq2ArB1l32QUVHDMPC4cCtgesY4b1W1m7bZ0U3SjXvyqIOOErhL3r3dg0cEH3NU6wHqQ2wAAKwFQQ4ujxsZOtdtQ1oTfSzakcm8QftCnP5+G6kDW0YqXX7J6D8TgPb4tvHT0SQ39vgzkn82sVjyKxzxnAV+W/PJf7tJcHusL8xjl7ynx5K3Nf5hFwvHOfU+/Efl1bHpbnlweHAXIe1xLPeR9HHFPXKfAd1y+N0/Oyyq+KTTA3OzHFCNlxsSHmCltl5H7P/wbu57yhYpEwVj/FZsgZ6tzx+JiN7tPZBfdiy1Mg70vj8toZ1m1tUz769djFa8jxTpyCHclP7Gs247Xxfcr5f6ETFoQXAnqsppKkLhA4AiaVEa9D2rRw5glWJi3KsuOk0/SVm52OyeN14qQbpS4gtmjqYoSJSU1UtZ/ZhrJygmH6XD8zyT5Bb0pMkq6+bxk/06v2mU2pLS0+hjPi4/0w+1Kha5M434yz/KHtFh9LCGwhy9ghjnhsCZjqB9xKXBQb4UF78WxhY4uy04OxM7zLJ/IMcSsdyoHJdHpLqy8mU5+KJXRbPM45PoiV+TvA0cmzZLvFVenouJXjCvoMs3SDqFibv8hHESe+lTHBEB3jOIlcq7KjTSjG4ZsxKBzFjs6vhGPlUsVe5QGPzfaxrYMbF3DVONyUif30uG3TCIu1jdmukTzESI4dTsL3ZEcf58xJF8cD15Y45/S8xr9yKxoLcdSmaq89iNOvFaywhXiNsFE7zBecs0mvxdHNUYef9Mtx7F3fq7/yYOW9JO+GuzV2sDu0IETMU+ytCKt4mEr7lDEFE1vnZe1A/KVz8NnGrz61H8RqdI90+FYe4HxOek1e5q1bPw1rXOdz3GFNQMwRJ5vfzfXHFcgBA7Bd5TmbzF5vR1p/c1sn5is89z5AqfFLdmQ8RvFyMcqYZqz62Nf4iI3Sz3ijY+zepLLTOqD2GNfVX7Qt2I36TVbmjulx8gJQakOIQRrX50UQAafe1/VDLFhLxF/gmfBIbGnzCVTEuAo+2e+WL0negP+Ir6hx8c/jCq5oB66R9uAnx0lkWvzUXpxrdjwY79Vc7YwF4dWgbyhWotYbU5koer0mEKsbEIiShcAWBbmsC0OL3LAYwXB/cysk9wuFm/RuMoWEC3Wc4IPZ1/cNFh3pjHozBgmT0A/boh+m1D4LFgNMc1+PuV8w3Q3BZOtnsk1jDvb3+7vBGucY99Yi72LnReSzFOt9uyzqY9mRAW6qFO9zLKJQrfpXFlLPn+Nw9EVeSizAPoij2IH4Rn6t7NR5FH3DXngM8dTLwD2IrzYFm1BKtMljAj2Rv1E+dMOCqMreZ2u9oaEoPUZdeCyNR9iE9nldbRvLGomdD8DThlUs0pWKM8RNmoKPdVzoh2uL2GMJmCmET+QgXBZ26neBq4/APbRjdTyIF2IT7UI5zhDQi+OxT+869pFj7Bf1h76KS1lj7L7Y8E3l5PbWvS7IjWtCjbXFtxYpdWg7vpqYRj8Ux4adVVg6iv2acmrBgOt7EtC2ae+9bo8eS7ajuWs+rnvUK23OOKwdvyPX43mVfPx8qTHxMsDWqiYdCVbN+9MI+568nk/xOshucSTb43yIczeOA7/qmpUulvMBL2B4OIy+yjnOH/DF8lGZ0zJPxUbxRW1N87eudagm4lPbiu35UuWX1yvNpW/wU3FsxrjqSUdt39TmyJMyFPEZjC/9r3vAgvC6+Pe144RWAtsN0T7r4laFrCeBWzRqx3RjbiUpqjcn0IXkicj2Sqh96o0H+4v8MsZuriBry8093yS23Pyqb2HBCDaVhcDdgBIYpS2MkVZdXNDmvGiUMYgnHPt2XBB8AZKSS4snLIjBFrRjtGAiZtWGQezAZj1UntXiSfV2EqxkU01W3E1T7Ud+egxQrcYQMdbjKrf65GXU2KO0dOzakI/SHM6xb8VMOiZOGdf107BQ/3Lc8EbiTEm4Y7yK/BDfaFMVc8B8Bm6jTyIrxQp4tuLxAbZW49IR+hL44x4EYL+BTX0soo1+zvd89Oau8Uzt6+sVQ69nX0HYjHfg3JBbzuCgG16RdHYgtnicZXWxiXbhWI0lcMZeVbWHKHnO1vmZ17bG9c36ne9ZHswvlWNzMPRNO4QRr9BJTtUvXJvWfXp65Dr6W/ohbiJOz8c6VGsct8Jc8Bc5a34mq9N1t0YJ/orRGotif1cPJM0Gi9qIPMBk3zrlz5XcjIFcX605MgZtHM3vhp9NHqA8kS8ysw0B6zq/Q7/sin1oP+BgXZ8aNhXsjQMZtzI++mha1tdLrILdVX/IJRT7eu92vpsa/Ux2Rx7r+Sp+xj8nIJwAxiC/roNrfSZAfJR+9qnzpmBlvewzYZS4bn6uZde4prZqB3Ab+CjSdUxet+pcMh2mXz7RhjonDEuUY9d8HMbjUdO1jlkQXgt51CsEXS1wQOhAYBwaj8tCkhvkfC3bFqwG6XEBKnrjpAet2F8ulzFBB16H4a3D6IP1iderb4CVdA421XFxAYHzMCYlDoAP2F/lmWX+07d77OKCVRcO8CHaUsR7WeVyTrCrLFj88KaIAxrHFc/cKD63HhpYQl84Czjq0GhnPK/KKx71mh1tw9F6108nE+KmPcI5+oz6VAbcnJzMqsrtMMLlkOxIC2AU4ht1oRy0Sa6jvdhPNcgrdi8f9QYrF2MMwf8qG7inQvu2Rp1urgW/XGKMbQOb3PrhlLVt1Bv+SJ6TgfPCN1Qs0vWKc9CLfrh5B5ipCDgHzKUpxrvHLeNMSWpUdyPBRZvwWJSNsGm16XyX+YqJf2/+BmwSdIXre2MTcCnD80HEyRKyss6Br/bKqIwp7UGgyuusZ9g1csHafJwgvnGNjbiagPgJ9mvTAI+2TT38RRraF84362nLKHx0/gw4A/q8H2g/Hld7RVfkgY8DGhFkKL6N+WJzsNy/evwO+YyoKr4EXWhGOK729sbE6+lceTzgSJW7znnc2hvs8TGANbH4FgYMT9fYefmRF1mY+KX3V/E1x0jnEeRcPb1gp9eF90fAUOXAOYyXJodjT6deX/va7p50pXmCY/C4PfLaV1kQXjsCqr8xaYS09nQrP5mwhVgJ3LmxuTZdTPDGjs56nTpOJiguQDBxZOKVwhLlYn8RD2NcsnioDyUZr3b2fcMJuF4c66KR+tnTSrdotvwoGPtxVR7iWY99u18E1Ae9ESWZJXnReOdYgS1rWZ0FU8fnthwfky0ymrGrJqcjlGHFR4mD7+xkSlPWaRzF9hQ3W/S9HBeDoHPte5VRcQzy4gKPfJSu6qPJSdwybFCfyi++Qz+IjYiTfoZztEQxMBmo12HluRVlJOx8XA3j2Ndi4Ip4F1OvC/0d29rhnBjg8Eg4GR5OJvYb2OTWj+Cgi4nKyPNlJG8kA9akPs4Js4I5+tEoCJtri9hnPDB+lnPgVrDVknrjZ7Ixcxft6B2LvBE2iKHNPV2bkk3mM+rV45JICzbJnt71vfoLDivnV4WA9mj4I3xLBaG320l047ClFV9bH7BfwFIxt35JhuN9wSjIwFOMm1739ifc09zT44JV7de6nuxINhXeOXvreFGLenBNMP6ZXymWB+QTlr8I9tn2lnzHM/MRuNny0eYEwmn2WlvyqzFfzOcSI8HDYukl2kOIZKPHtGWXYhXiKv0MQ10TTS/ExK2V6nvbblwfVb/JymuZ02NYRpdwLqgNtvPd50UUUc/X2DlcUFcd5B5aiu+Kr9jStDnoEJnZb9VleVr2pRUrl2fIeNQDcRATXSzA5tJmmIM+HwvhifHJ276Of28+BcUXOmVBeCGg96tJk7FuWRuh8shMvtQe2oJwJV3j1Z3QrSTxbhte9eQE0E2cvBhGudhfFLgx5pNPaE/xoe1bss1uLD5JhSdg9orKi/3VMJiM0Y/ct9j6b/19RrHBFt4Vpi5JlFa/ILiFQ7CyVxXyzs76hmIYplcUio8NxRWb50X+mlq1sRO7hoy0wNrrEP1CQHXZwpjl2NhkY7X7Qf/i6oCziIMt7ofgGPxYYYyLvy34invCyd9cLLbVfi2wgNfmp8YuyPameNwxdlXGw/L2M9ygvJB0g4rzLvRJp1mfiwva8LC8dXmM/UbzIio2nFpzHPmD/VCXt8mvH1EXjPv+tshfx0yYwnVJ7MDHKEHO6zyxWKde7etJdoldWCdkTJpnuV9rbQHuZE31LxAL31ftYLXqS/PRzSNnB2DrroucATaqN/1F4sjlyk/x7wO+y4jyzPeop3c9xGbktyXtq/mV9ReOm++GUfu3VzG2Zc0tsr2MEuuwdquX5a9UGvcsViCjyI0PTKxv/lzFyvrb+uvXTPShru2ez7aWWdzxr+NWv/p6Kp+zjRKj1j0quCKnXc5AnL38PGfKX9EFbrn5DdieZb7EXSGR77F27gEGiifEF2NSsfdYuOLDzUfkEPiOu/ORI4ClFehFr/Y17uDa67zRk2p3yBWcjAEmRWQbuyq/IQN9EDlFZ6Ov6YEYuAee3fU8cwv+ymjhf9QvOgbyzYT06eNU5+G+6+Zbr5/Xcq0zFoTXQp56r4BAXiTKDegKJtyjSl1sxzeoe4SFPn8lBG5wbWklRjcaEntl9Nzm//n5vLz97zSpn2XbaVZxNBEgAkTAI8CC0OPBsy+NwA0mbTcZD/8UrL76fJPO0GgisAGBG1xbvlBBuCFAR3T5s7y97P99wrHgj+W57GaOe7KVCBABInBNBFgQXhN96iYCRIAIEAEiQASIABEgAkSACFwRARaEVwSfqokAESACRIAIEAEiQASIABEgAtdEgAXhNdGnbiJABIgAESACRIAIEAEiQASIwBURYEF4RfCpmggQASJABIgAESACRIAIEAEicE0EWBBeE33qJgJEgAgQASJABIgAESACRIAIXBEBFoRXBJ+qiQARIAJEgAgQASJABIgAESAC10SABeE10aduIkAEiAARIAJEgAgQASJABIjAFRFgQXhF8Kvqj+V597DhB3A39vvf2/Kw2/ZD4B8vu2W3W/97+PmnmsejZTkA001woTw83jR48k5fzR8HN87BwW/PyW+8bZyDVXySd+zck7n8/G+VdtYjjWlYo/TabtkNf2dNfArjhoadhsFQNBuvjwCuDXgcLPvz82HZfT/wNwCPmnOmWOb1tnumjTjkU++zYZ6ke++euSEYHYSD+PGJ68AhTl+h71G8QTsHv80p8WqvzfeNOcI3xfFgXZnCvomNYEE4RXAwyRwZtLHfAROidaMaWXC3bQdgejBGnyn7YGPOMOCr+eMg2TgHj0pOTyiGRF9IOJ3Zp55oTCF51fONiaf03ZzUnoDBqT5y/OcjsHFtOCqxP2rOJZflPvhpD1OWZYn3WT3fOCcEi3Yh0grXfRcnR/GmBeNB1+4b84OgukTnjWvMJUy5NR0sCK8esZQApV06S7jwmt2o8Jr1SwtR2eGzhPCACRFvVCs49CZrO4j+CaqOzbuL6Waak7nvD7rriNfMRryGN2CRZTc9XdTLrqX5urJsWcQ2uKnqOMUg2fH8U3ZKk+0iu9rr/fCSEeddlR8wrTY+LM8vDy4Zr3qqT2mH8WF5+C72PC8fRR7GsF5/kyfkarv4D30sxmK0ymjHxvmUdXmZ1qPj79KKJdghtqEtJq7YBRgjhyBeGvPMFYs9ilmyDcYdjLXvJ2dg28vb8vY9z5uCcx7R4syKa5k/L7LLl/EFXtW45H62KwfxeNaxFYPKF5HnOV348v15ef4eObMhvoqTl6lzw2yPmP98010E9ePlY6m2BRkIsvpm7QlrnL/SFeW05kTsj+K7GAQOdGU0OY6c+KjqIE64i7vm43hupBgjP7IK4Lv2Afx7cXHX9+xUFax2wJXqHRyB/6P5KiPQZl03M3db86XM+w4+ORbP8PZJihvaU9e6EpliQ15TC24dPWI3xDLOOfRpuHaIjKIrwdfGOM/3I+4rKi/jpvMk6PPr13N4Y0hwq2sJBDgfAj4vMtZyBh/XIQY5zm8lZhafxG9cn+s8H6xlaAOuzYGHiHP3fiJeFm4ke9w6UNoGvPn+tohvyY8cR1u31b68tmUc5P0ose0B7k96Xt6c6mPexQe4Gu8BMaaIC2If+8l5ry9e99iGeShC1DbIT8q1vL4V/kXsMk4Zl67vDcPRvq6PahdyX2y3+xDEQHhlcyqOKfyAPo34rk3syF93DPxEe72MwtsW3i25F77GgvDCgLfVIcnzBDdyK5ltAmC/RLQykTLB3v5nkxtJ2dYqV3VSlht87If6UsJn+nTi27gyATP57XqUD7648XFBLovPHvtg8RbLq8xsh8PQ3wzMj5XHckMs9ov/jcICsbabXR6TFsSMvfbD8XCjLpiFeOUxZl9aNHP8G3pdP/M3OpVlml9qY+7r4w/+WiJesBjwrakPMTD+en6o7p7NkTsZ57KgBp3Oj3wD0L6Is4xBzmi/Ok+qjBZ/zAecE6lfsilh5+JhPO7qsSIKsTKeYizCmhB815s54uh40sDc7Aq8qP5HBcZRwcD7WXo6namP8U37CAaFS2WUHrTmjMPRximOFgcvwwoDN84Sh4Ztrh/OBcDR44Hx2McPm+ehX8Y78QXjIrKrX4KH2Re8TGuc2ejkrXrqQ5Eix2EQ+6JvmWvGEcHc9Lk1Nq7NICPbZboVR5Ohbch3PG7j1o9D0un0mN3OX8Q6+u7va9KK66PxysXMfFE+2nxNOsyWqMV8cFyHTtZe9AMf5Jq0JxtgUD7UscEm7XsABlZwOT8NS/UzxwmPwxqtvtlcLXEOuKBNjlvSr84B5yWOOSA+rTim+CSbKp4w/8Amh2v21eLr2hSTHJ8uPkGn9DOsnLMj/oWOIyzAj/Rg1bD1dpSYKcbIMZjPFufMsTJGzQnYGWdszMhH67vSjX56e/H+rTEo8sFelTdYV/IYF0NUmY+78ld9AYMcE8cTs1G5keMw9Hml4GIXWBBeDOqRIiRUmAA5MU8Ew35RHrThhIjdwrmS3nYSyqctHiDTjYs2WmO6bpPBnnrWhRfG4cLpFi+TlT794uPbcHGQlto32BHwEJ+rjUGmOwV7QUbVkzrjeZRdFhUdb7hago2LFh7Xfig74YkLSh4jZkT56MeqbUtcA4YoT497MsyWZJvab8mKjINYF2xWslsXIBarZrEFb2bQF+Kmw0B/FFOxjr6jr3g80BP1grKqJyV6yMXCH5wfMnYgTzG1m47Ng02Yoy84f8BYO1T99rR4PX/QJxkSz9V+tMnkhkRLLhcMVg8BAG8Yr4dq32nzZsxH1D3gR+QXnuOxGF1i7OMQXavnUS9iVXu1jwY6ih15JHIt2LyKa1EG+IRY4Jx3PB7ocWOKDjmIeuIauH/dceIy3+I9qjkn3b14PScrb6OGFKfytoElwqVbjE08T/MJbSpDEQ+9WPHRWOGcC7GsMpCL6arzBePkBuE8r3pDl3AKvo3sCaPcKY7DY+lUzpM9iFn1KdratimuB/3xUV61ts6Xfp/aW45Gdvuew74FhzCmF8vWnEWe4jiRbW09Pa17AJhSsUwXI9bQ1W1axHG1H+AbbY1zINs+0lnl2hHIt0vlE/hTrsmBXB/lJfV+5YZd8YQF4RXBr6qRUJFEeNPHfraI1yStvIaAE6IqaR7tnRQqK+soE2ttYxIeJ03qV2+ESU5apKsvfpInGW4MJLvOibAYdRffgIfX5yS6V5DMBk0WQEbELOrFm1Bpg/GqEc97x6tFtWJWk0mzH9rskn2ifL0GfbUNOWQLWIzlgG+mxz5Bn/pfHjSYnpS0DeMgsrq2mSL7BH/0EtgOtmiT40yPazBeB6F8PIZ+eJOUMU7vWA/ypWCi8gwv+2zfQArH1NacpG3CHH3B5C4Lwo8cC7U1H9ckOie8ME+jTf2CMGHTxMB234Mv2LeY6PCOvoCfMU6qI+FasDeh2U9bB+Qz+Qxx175Vvvpd1klMULNN2IY2oy7sY7boZ4NHgg3g7rqvONTmT02i82i0S2SAPS6uaHOOUVwrVSLKQNlwPMKtuw7EWEZ5gTcliXUgxViOMA59QZ+IXPEH9Eib6ddjwDT5h7GpfDIRgk+T94OkUzHdhIHn6coX52cDH+Wf2Gzzw6zOnwMeoo1t/5KMhJ+tg/XVvz5vQqxcfGIb4A1cjfGs59FXlNfDR1FVjNJ6Ag8yHFyN8d05Pu7bxDbOGdPtYty61wv2xtGKV8VEBDXsaa5NqR/GW21t9kVuVr1qttoMnDD+gS8rfkCbt92AgM+efOhSDrFvmduJJ3j/kGP1G+woMiY4YEE4QRDczk/niV+aPDghEtlqUgZtB5BNJkU3oQjY1EmbJnTVbR3jdbDJusBnmpAf7lWROIGrThhoh7B4y6XqS7Aj4NFfCOJCBXJARrQJz6PsYhOMV/PxvHc8KghxjAjUc1uwDaD8GdvK+cDfFQ8HfAvqki3wpL6z0EesvJiRbb7n/idx9eaL/MJjkVjjCHFXVchjPIZ+zXgABuUmgXrWSWTBJHA7euzOpS9gXP1wvZInbnccffF2rUYWzuSWkFhEnfFcOQEYoPzic75Yz719OGZ1HPD3+kFO6IfzpuoV6SP+QdzVEJAf44bneCzjAobmk7fdrspn1Itt8VhswgQdbIxdox2IUbBZMEpcG+CD40UXysA2PMY+bswBekBeH8PofJqD9V42wji0gT6R6vnj9VTc5Hr0KcYmnqe5iQl0lR5sAo4cgoGLUfQF/FSZMI+rjmiHWbiVh73xjXmCXMFjUVnOI8YYn6BL/cv3zjIe+ydfanzD+Ih5Ex/DI3+CHt8SZftWf7a1L/SDWDpZ8XrXvjQqYXF87laxrPLwHuZss4d2/+J3fZNPdU50fIx+yDnsENbxXuN6joL82DWc1zmxnsela8S7NFz3gAXhdfHP2j1x9OZhi4oS2BJ97OcXWiWhPcE5gGyqC5JJB0eQIzpsArlx2k9sXE+adT9IUtS3+rRPdKsf5rvtEPTsW2Hjk5Vykw9+iE3mh/M33qizfSoHZRR/ZXSKgy1mKQ65ANF+2V8cL8PwvHdseBT/1/E3PxTngpv3KumqPleMw6KK/sJNLkkb8C2oW/tm/E03Wdtx6MdBBI5siwq93JRoI+7GuSTT9FccRB7GMfI44m7+YD9vr8Yj33j6ekKyk/mSYtrCuxa2DgEZsjxtvQAACXxJREFUh7FXOWajx8Zjjn4dWBDmpNGwxMLKY5ktFW4VHjvrfWHkMMi227jcVuY1itG2io9ibuMsoZDvV+c4t+aNx8bH03GqOTcMb4xbkuExClwUGxu2m33oohy3uNTui3bkcXZ/iEJbmOArYWVcktksCHHtCP7UJH2w7mUbUmwRt/1xMP9xznk++jkQ3RdMTcYY42RL4V/w0/PHa1HbCh8NB+OC57nGuGCe5Mj4oteL9pxYxcF4OcbAxSjP7YIJ+Nnin81r56OOEd19HqqsgolgW211LqpPNreRG9IL5fu2hKO/Fyefcr+s2+EtuvJaGuOJ5w4HwNxdz7YlfMRO8E/GFN+dtz6eYW76nv31QO0o8hFbxAvGQ4yTjkY/WxOkQ/bZsJJLfd+j1bYe+dj0+C2jlVu2u6biUgwLRyEGbj1Vv2zu+LhjPBsW+u9go/zYOWAnOJhdarfFAW0JY6LIa52zILwW8k5vJmpZMOw8bYfXiWLX08KiEzC/EvLwU57WZOIj2fDY6UwnNtHitrYtVqgDJ78l7DYu2Zjsq/aKDrM5+WITJWvXp9ita0nu8/IhExGT3eBDtf950b+iqZMv2BEwGC4EttDl1zRK3yCj4oJ615gW38J4t2jZjUMW3NBP9diCov3gpqJ97ZUJu2EGgOQ0y6x/9Q/69vxdJb22iFscgW9RZfCh3DyUq1V3wTaOt/OubdYBP4Fn+a91Gg9rrB6Wt5/Ip3TTW3Mt8MfhbnokDqEfxONB/5Kn+drTk+yPHPacsfhC3NFtPRY7QjtiBzdyj7nYVcd5rgUl6lvtmy1Pr0Dl+Vlxbs8Ji0eQrKddDIbrB0gKnPO+eD/TfDBcLUYp6SjYi2jE8OUDdoBC3B0/cFzkG7Y1HoTltXy03olZFSt7AAY4wGGNhzwMGsxXGYPcfXl2v2Nb9YW49vAJsfDFhs2F9VqHeOtfiLZ1v6cn2u3mXMAa5gBAlA7FXtOVG6vPiHGIe/BTxjj+gCKVV9bx1GDxSfPCcBEZ8leAca5JW+UpiM2HyS5dx/Jf0yxzDbEbyZB+gIHzxflZ7dSdFjcO7IDdafNT7PM89P172Pkc4mF5k50i9AV8dLxxc+V5kftf0aE+2b0M8AZ/HAZZVhmP65LDfIAP2Gm7VOtYpivKF1sPAm/imHbfAbbge7HDxThrcP2Qj9Ke/Kx41Gvr+2m0OJ2j3V5Oo79iF2xAPHF9jr5gvx7HGypxPZJ8OPIBhyDHcR557kb+jeY0Sr/cMQvCy2FNTV8SgbTw7l3Qrul7XCCvactFdKeYlKToIjqvrERuensSh8tZGOaE8A9uxJez4/qaNOmZJi4b8bi79SIV2dOsF5rA1mRRks2p7y+OVn+Wt5e3RX62Ybb/Rgn9bLbSHiJwDQRYEF4Ddeq8bQTwiVPeSZzaobtL8O6wIMxPsK+W1HbnhMQiPNmderKcaJzONdt99LuAJ0q+3PC7Wy8E2n27cJ8Jf1qv0s6KcAfmi8Ti1h6mTMofFoSfyWHK/goIsCD8ClGkD0SACBABIkAEiAARuCIC5dW5W9sVvyJmVE0EZkGABeEskaAdRIAIEAEiQASIABEgAkSACBCBCyPAgvDCgFMdESACRIAIEAEiQASIABEgAkRgFgRYEM4SCdpBBIgAESACRIAIEAEiQASIABG4MAIsCC8MONURASJABIgAESACRIAIEAEiQARmQYAF4SyRoB1EgAgQASJABIgAESACRIAIEIELI8CC8MKAUx0RIAJEgAgQASJABIgAESACRGAWBFgQzhKJG7LjbL/ng79XJL9jdsrvLaGsG8KSphIBIkAEiAARIAJEgAgQgWsiwILwmujfqO5PKQhPxYIF4akIcjwRIAJEgAgQASJABIjAHSLAgnCGoOdi5u3nw7Lb7Zbd7mF5+181rPzY66rtY3nWa7tl9/K8POM4lSmy5N/z8lHF1aM9ehfZtTP5sHvnCsItelQj2Gr2ZP1qW9ghFB2m++Hnn2Rz7v8Mbc//SlNPtsno+F+R4BERIAJEgAgQASJABIgAEbhLBFgQzhB2K6peUtmmBaAVYFqU1YJGC6XcD49T0WiFZCqQrJDSfiYP/R3p1TaT92d5+y5FZ7JP5CXZG/UsaXwq3pZFbRVZucCLBaHzP9uoY/Nx0y+UlQtE09f1H7HgMREgAkSACBABIkAEiAARuEMEWBDOEHQtZqz4EoOk0MLzamQpplZ9YIwrjpZceDXkDfS6okzUww5eKQgP0lOL2uINji/yU/FoRZ+iYQVotLeMMR+zjlBEu8KzKOcBESACRIAIEAEiQASIABEgAiwIZ+AAFkZqDxR3eXfNXp/Uz7K7hkUejIkF0ap4zE4P9GpBaK+Lls9UcJWCcKueVb+Gfumju5h5N7LozK99Fp+hsCxjWgWhvS5qn4jVDEGnDUSACBABIkAEiAARIAJE4PoIsCC8fgzWO3iwExZ36j51hzDqza+IRohKQRgLShjvxsR+1ojXS3HnXy+1rvqJ/eVCGdMoCFuvyDphPCECRIAIEAEiQASIABEgAkSABeEMHNBCx76Xl79jlwsaXxDKLqD/Lp99ry/t6NkuWOpnr112v0M30JteszR5y4IySkGYv6u3V0/uZ9/pKz5hgQfFXWnX2IAv2F/aYIx/LTSNcfrsD9nMEG/aQASIABEgAkSACBABIkAEJkGABeEMgciFTv3rmfBaZC6m0iujz8sHFkHQ9vDzzX/vMBd7ZVzLz6HeXHCVVzerTbUgtJ05ey2z9lmpa9mT9cc/KiNjtQA13bZTif2lUxOLbIPTVwtbXziurOQFIkAEiAARIAJEgAgQASJwVwiwIJwh3LHQOcYmKY4O3QU7h95jbOUYIkAEiAARIAJEgAgQASJABKZAgAXhDGE4qjCLf3wFdsG2+nSU3q3C2Y8IEAEiQASIABEgAkSACBCB2RFgQTh7hGgfESACRIAIEAEiQASIABEgAkTgkxBgQfhJwFIsESACRIAIEAEiQASIABEgAkRgdgRYEM4eIdpHBIgAESACRIAIEAEiQASIABH4JARYEH4SsBRLBIgAESACRIAIEAEiQASIABGYHQEWhLNHiPYRASJABIgAESACRIAIEAEiQAQ+CQEWhJ8ELMUSASJABIgAESACRIAIEAEiQARmR4AF4ewRon03j8DHy27ZvXzcvB9fxgH9uZUcE/n9TsbmCqFNP5vz8PPPFXSfoFK5c8RP/JygkkOJABEgAkSACHw2Av8fye9eCszUjtMAAAAASUVORK5CYII=) ###Code vezes = True i = 0 valor = 0 alor1 = 0 while vezes == True: fruta = str(input("Digite a fruta que deseja M para morango A para maça").upper()) kg = int(input("Quantos kilos você quer")) if fruta == "m": if kg < 5: valor = 2.50 * kg else: if kg > 8: desc = 1 valor = 2.20 * kg else: if kg < 5: valor1 = 1.80 * kg else: if kg > 8: desc = 1 valor1 = 1.50 * kg t = str(input("deseja comprar mais frutas S-Sim e N-Não").upper()) if t == "N" or i == 2: vezes = False print(i) break i +=1 print(i) if i != 1: soma = valor + valor1 soma = soma - (soma * 0.10) print("Você comprou {0} é teve um desconto de 10%".format(soma)) else: if valor1 == 0: valor = valor1 print("Você comprou {0}".format(valor)) ###Output _____no_output_____ ###Markdown ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA4cAAADDCAYAAAAxzGRxAAAgAElEQVR4Ae19S24kOazt3VOuqguwl/Fg1CRh4C3hAq6hawNvVPC04XEDNW+gB7WDeCAlSocMSRmZTtn5OQ1UZ3wkfg6PGGREOPN/Fv5HBIgAESACRIAIEAEiQASIABEgAnePwP/cPQIEgAgQASJABIgAESACRIAIEAEiQAQWNockAREgAkSACBABIkAEiAARIAJEgAiwOSQHiMCpCLw/75bd8/up0znv3Aj8+7o87nJM/t4zNufGd5O8/5bXh93y+PO/TaMvZpBy53F5/fdiLKIhRIAIEAEiQAS+BIFPf3KoBbUUcPpvv7jSWgo6O3dM0W1F4Za5nSJA7EoFTSpu9n9/STw+Tel/Px+X3cPrsrmEw9gYzvmzi5XMOUaHen95+FduQHiER0f7BvO5eXYEhNPCReX2bqfbH1KinA856kMC72CyYDbM3Zeyvt+XPXBE1ng3jx0Km15TtvIk6S3XuXPkkJKbH5fX/3dKzj3kIM8TASJABIjAPSHwuc1hKBy0UbRCwjVtR9x91gujv+OrcnsXXaenhrrZANTTN7d1dHMICLi4wfHVpsSmF4fV4Ms9cG/cuNxIfLJlmlu2Fv2fbBvVfRAB3xx+SNjm5jDpxKeqH8nDZrPIQJl2nJ9EgAgQASJABE5B4HObw2ghFl+4vSzLtgakdxe6d3xZloPNIczNF/1XecqmT8l8E7qozfkpaGmCcmP7kObs/87yfuZX3nbpCaX6pzKx+Exj7a5yuZOdbX58EF02Hu9A27FlcTbBnXGFvtj7uOyf8clhR2+MV95fxwZtgVctRd/D6/Iqr18e8LU2kRmv/OTWngJF/LVhyxjjE1/DrspbO1Gxt6fFeUzBp+KM+o/Vs56b9LT1H8OT6pPiUF7hQ+yOkdeJX1WzLEtbng5p8VOPreOe7H3VpzYaq+f38qRvt/Prq43TGsOyTpaBHx17nIuyA+P2z/ImQ11bI3tQTjvuCb81P9Pxx5gvVHfGz26gBfvQNrQ74oi2YX7Ya05K/vV5JLO97RVvJ3njuMwjWd+K9X7x+RViaH43x4FuXLfdPBz8eN7Dk0OxKXMv56zyRkXWnd5w6eDgxoBdYVN5Ueyzk4CHHFJZ63WjcXvYL3u9Bsj5ZC9yTZ/YFvuT3MorEQ7YSj42fPW4X3tmHT+JABEgAkTg/hD40ubQXyzThSsVHvnCVoreTmAGF2Ut5MrFD+brnPWFsBZHcLG2C3WW4+x1ctKcdLHFbdGb960oyIUMNhp219fZrOOyndmOWpSBjaJBmlex0dmUj5teJ8PbpHptHOoF2HDT2Zn9Mx+cDc5X3/B7GevYq686PxTnORbO5tXNBJSHlo8wkTmVF4Kp+SS6cNsXVZ3X0cR2wxSKL42VNRyDmFgRn/SmeJkN6BHaZlzDNVRscLFAebhtxWnFoerKnHG29/iZ8Dd7MVa67WTUIlXP4VoL44z/he9inGIoHBn50ben+idbjXHZBtVp8XRx8xJSEW+va1dOoW+mx8Up+20x9HGzeCT7DIeKa/LdjqsNRR7a15if/RNZFi+zweQ525VHZg/K9us78bc1DmzNOJpe1WNrUM95ftm6W8fC9GSOqu+4neyseNWbaDUGJqPGTGYh17o4FA56PPxessd89edsL8XHxoztresGbaz8i/4H/YivqecnESACRIAIEIFl+cIvpGlenNLFUe6uW2EyjNLgouwumChE59idWf+ZLsrpIqr6VzbWwsEVKCK/NAPhItwsWn3Dk/Qm36vfAzsGfqOr1Sa0L48I9ka9VqA4eXnHFUmrARWjVCBWX2shHyeBrxmvak8dizEd24Dy6nwretE3kVPxtwIR56SiF+fUsz09/TlVX5JS/Uiyip4Q4zjPbPDH0Z7T5KXGpYVDkJcbcrU3rpNgO94wqP6KB8CVRiFesHDNP/poKLQ+QfbAHjezOa7ReKLvTkA/7n4Y+hBxjfvghzZmrfWE8rwmt9edH+1GeaIf8zGeQ+lbx8H8wBtc344bYRyeOzYP17wCdmjOMc6n44l7OGbgX+QNwlK2UVY56DeiHPQ7xA6xwu2a89EPrybtAa9ap3mMCBABIkAE7haBr3lyqBc9LDjsTm4tfHwR2YlPvJjCMHfBhONYqOLhWmTDRXwlv15QVb6+KokNZi0k20UIPulI2qveVHyU186y7Fp8V2xWTRc4orihXflph9prTz5kfGkOY9ETC0UQnjdXsdHCBXHIhVbRkScinpkD6G/CDPDPjSKOsacHFTeUjTYEfumwJNvJw9er0CbAyunCMRnnGutsS7whYIcbxytP0e8RT4ow3XC2ZflrHA/I68XPqQr2YYOEcZU5Kg/4Ck2gt7euJ5kWscDmsJ5b87WY2fNjYE+Z27K7+HWAN0VIGod2l1Nd3kRc4z5gtPJP+G5NTcIlcRuxLxbAms/Hin9xzaMNKLeur7WPW8eBbNAvFtUYyx74HcbhOZ2D+U63W3k42VfXKtih68ZwBDuc3oF/bhzg7TaTvjVuMGjEUzkHOQmxwu2a19G/rGPFn+ozWMFNIkAEiAARuHMEPr851AvU+qIkRaMV/hqT1YWyFanGBVCH9Y5bkdzWny7cMFcv+jAW9t0F2ZkG81u2hEKiFstQDDl5ZjMUfEFGGR4xk30rKHBbJpT9tr2jIsbHKhZd4EfLHn2NLemsOtCGuq0Ym/2heKy4iTMjeQWdMq4WiHjOb2N8q66temKxXWVXWelYxbL6rWdCjOM8k+iPYyy2ysM5IhXiZ0r0s+27xjDYunpCrOfTOlrbW9dXG/NkRBenYuPAj6Z9VW8R0RzXajTKjNWG989Ot7FrNvGuwZf5EI+yZk1u57M3Lq5H8NfbjViC/o66dHjrOOAl6BcZGH/nt46DeMG+n4MGgh49PNqXc1H+fnkXvMrruQP/gh9oBW6rrZDP7Jxgr1yIcsDPmq/TLPQbt+u46C/GVGQM/DHD+EkEiAARIAJ3icDnNod4sYtwh8JFi8HGhTRO04thuXuezg7ndmyoxRFcVHWsvXaYixezKcipOmG+mhP2dV5t9Kre1FCUYiTrbhYNemGvT8ZK0eEwTHpLc+jm+HPVdnvqA4XSCvBgp5ObMbJ4qD3VTtUDfw9UmkM3ruJV/FIbUnFj+CBuq+bQyfMOtGS2GhwZZ/ZVXck2O564V/1DTS09Eks93vxbuuq3yhnwBPWsY2f2bJXni8ZkXyv+SV7hE/I/2JoKz7pu0MaKpXjhC1TVnYvxPk6Bf8WOkR/pnMUN7UEsI490XOtvDjPnTR7KULstR5S1MeJNiNOoOSzyksaKkcdReVmaGrSugQP8zWGJbVg/ioPJU7yNYyi7FZfWOPA38Abj77iRdRreDuMS/2RLjS3oyWa6ec5HGYucT3PlKWxq4EF2C4fgh0cF9zz+cqbGUPb8+eoL3sxL8hAr3N7aHCa96DPayW0iQASIABG4ZwQ+tTlMF6T6alJ8BcqfbzdQzWDl4qG8LmgX8NbgUEzYELkQp+IDiop80d83v23TGinzx+yF+So87IdCouqVwbUoEV+sGFo9iZGhzmevO+HwuLz+Ld+Qaue8vfpNhaWI9XqxIDJ88FOLFsAY4/b4811/BFtlSAGG31Za9HlbpOGrOCBeqVgqPMny5JsE6/hsWS72dKyTh5anbbXfXkXr+FEK5VLA5XicqKfEMtuefIIYx6ZgyBP0CTB6fq3YHyGvGz9UY/LgWzQLT4KtOq3Jzxg3sb0WqGoHxAPjhPjFdWJ2DP3o2ONclB0Y9/hTvlW1rh+0x25SrOb34tvlDfJdpMV9jxHaV18pDesJbF7Zh/497yE/9HhUbVpzNkr3ecTHzMaCf2pLxdfHH/zO4z6Wh4MfOS8l7ohNlYc6Ur+hutqWrO/4h37gtrnsPgHn8gosDND58ZqS4wv5E7HC7X5zaI1oku3ytDal3n+wiJtEgAgQASJwZwh8anN4ddgevNBfnUdXYDAUj1dg7f2YeGlx+W95fbZvBb2fKJzV02vJb9di51mDQ2FEgAgQASJABL4GATaHI9xZlIzQOf85xXtX/07y/Boo8WQELq05tKd88enOyQ7e38RryW/XYuf9MYgeEwEiQASIwA0iwObwBoNKl4jArSNQXiGF11Bv3Wf6RwSIABEgAkSACBCB2QiwOZyNMOUTASJABIgAESACRIAIEAEiQASuAAE2h1cQJJpIBIgAESACRIAIEAEiQASIABGYjQCbw9kIUz4RIAJEgAgQASJABIgAESACROAKEGBzeAVBoolEgAgQASJABIgAESACRIAIEIHZCLA5nI0w5RMBIkAEiAARIAJEgAgQASJABK4AATaHVxAkmkgEiAARIAJEgAgQASJABIgAEZiNAJvD2QhTPhEgAkSACBABIkAEiAARIAJE4AoQYHN4BUGiiUSACBABIkAEiAARIAJEgAgQgdkIsDmcjTDlEwEiQASIABEgAkSACBABIkAErgABNodXECSaSASIABEgAkSACBABIkAEiAARmI0Am8PZCFM+ESACRIAIEAEiQASIABEgAkTgChBgc3gFQaKJRIAIEAEiQASIABEgAkSACBCB2QiwOZyNMOUTASJABIgAESACRIAIEAEiQASuAAE2h1cQJJpIBIgAESACRIAIEAEiQASIABGYjQCbw4kI//nzZ+E/YkAOkAPkADlADpAD5AA5QA7cDwcmthfTRbM5nAgxk8D9JAHGmrEmB8gBcoAcIAfIAXKAHBAOXPN/bA4nRo8JggmCHCAHyAFygBwgB8gBcoAcuC8OTGwvpotmczgRYiaC+0oEtxfvt+Xpr5flN1+P5uvh5AA5QA6QA+QAOUAObObAxPZiumg2hxMhvr1mgc3etcT0949vy263S/9OavDelqfdt+Xln2Ni/nt5+SvrFN0b9aKtT79EX5Xz7cfvzYn4WmJzKXa+fYdYfX87Aeevunkg3Nwtu5NsDnz+52X5Zutkt1u28a3yc7d7Wt6kWCpyjl0zwR4WXifwkBheSk6hHeQiOVA5MLG9mC6azeFEiLlI6iIhFl+Bxe/l5fvL8vbjqdHkvS1P5yiuXTGb9J3ypPHtey6yszxpGLcV6l+B6y3pTDyI+F/+ej0Tf/95WZ5OugER9UvDyMbw8nlzS2uXvpBv5MAlc2BiezFdNJvDiRBfMmlp2x0kVSt87dMauV9P9amiPTWBRhGf5JUnIzZ3+Hme5lCfaIE9f8Debz9ezvfUaOjLHfBD/P/1tOjTWvtETAB3eQr8Ag18eerong6nJ3rfvj/p07hvP97yk2RomlCmPXVDnYPtonO3W55++ebsZM7GtTHQ73Mm6k9+p6femTfgJzl7J2tpM3eIh19LxIN43CYHJrYX00WzOZwIMRf8bS74a4nr7/LEsNW0YXELcZLX47Axk33XAMDYVTGEr9ttf61U8ExPrtJ898Qw6NcGYbM9I1t5ruIuWAQ+BNxTg+6f7urrv9/D36TKvNz0Say0YcIG7J+35c1eVZaxyLUVn2qM/JPkzDObG+VE2wdy6+ug6RVbx73RPMNL/YXmV+YE/eRsjeO15E7ayZiRA+TARzkwsb2YLprN4USIP0oszmdyOp0DviGsjaJhGpoBK4ThiUf5m8Wj//Yw6/j1tPnVUHsq9PQjNKOrJ1pf9XduhtstfXoO4Kula760/PYcU65CI1jkwbHYjG37u8G2ntJYnpGzb99Do2frYvWZ/+5x97S8xFegyVn+3eCKL631w2OnX9+IHbG7fA5MbC+mi2ZzOBFiLt7LX7w3GyN9ogFfOLL6sg3fGBQcVoXtB2KITcGBYgmLcnlKVF7RW9nD5rDE6gCmB8c1mirDfU5zKE/88Oljh4Mrvw43h2b3QZ9Xsj2/t/ktc5CHwS9yls3hAZ59lKec79ct8SAel8iBie3FdNFsDidCfIlkpU33kUTXRW4ssLEwl+LWnphg0XscVvp3X/aqn74qml8r3FAoladMOhZskCYXZKoOvlZ6luLbYy6xBk4E3PXc6gly5FR6pdK+4KXILzcJIK5//iyRL6Pc5G4YZG7Vp45e7kiOP5deT62NpTwNxOZ1xH/ASjiLT8kDduTsCEee85wkHsSDHLgVDkxsL6aLZnM4EeJbITj9uK5krcVo/qKZVPjaK3DyJLEWv/Yqp7w+WgvkVOjWV0r9nENcQN21eB/jh3PMjmRbshXP774/8bcXNzTbh+JUY59vCuCT5tx8O9xdY4h8sqfTIqcelziqDpGVZcsxJ1NiuXqi3eNKauSMl/YlL+VvBFdPQSvPx1h4uca/7XNMT/Y938hY+ckbGme5oTGOS487PE7cyAFy4PM5MLG9mC6azeFEiLkYP38xEvNbxzw8sTlDo0TO3Dpnvto/cpZr7Ks5SP3kIDnw2RyY2F5MF83mcCLEn01E6mPyu0kO4FMt9wSL8b7JeN9Cw0/O8knhLfCYPpDH5MDJHJjYXkwXzeZwIsQs3Fi8kwPkADlADpAD5AA5QA6QA/fFgYntxXTRbA4nQsxEcF+JgPFmvMkBcoAcIAfIAXKAHCAHJrYX00WzOZwIMZMDkwM5QA6QA+QAOUAOkAPkADlwXxyY2F5MF83mcCLETAT3lQgYb8abHCAHyAFygBwgB8gBcmBiezFdNJvDiRAzOTA5kAPkADlADpAD5AA5QA6QA/fFgYntxXTRbA4nQsxEcF+JgPFmvMkBcoAcIAfIAXKAHCAHJrYX00WzOZwIMZMDkwM5QA6QA+QAOUAOkAPkADlwXxyY2F5MF83mcCLETAT3lQgYb8abHCAHyAFygBwgB8gBcmBiezFdNJvDiRD/+fN7eflrt+x28d/T8gY/LPr7x7fl6VdcSHXutx+/T/4RTpeg3A8zJ5vOI3uCrYCP8+HLjr8tTyGOTez+eVmeTo3Xr6dlt/PcOOi7zunFstrctPXLsPRcF/7bGknr4B745DE4GOdTYgXcSPh+W17+aej99dTIP41xaEOQvc5fvfk1tk2uQ47aLrOna9bxhg/F7g7GiB23z3M9I47EkRwgBy6YAxPbi+mi2RxOhDgVfG/L0/c3t4Dfvh/RAHyk2WgtmlAIvn3fHV8YtuTKsXPb2tPzVceDf2/fQyHoCuZw7qDNb8vTXy/LyzHc+JPm/M6yoz24j9tTGpGD/o0L9bgmpGG85Ib2EjFs2fT7xxM0hJ4vMh4b82az1otryCMt3c1jOE8aKpcbpemy3IjbY+409fTsPtvxmNfF3mPX/CX6RZu+hk/EnbiTA7fGgYntxXTRbA4nQpyIbkVEo9gpd5sHDVpoSJJMkQVPI12BdSDBYHGmhVK2L9tiBbk0jfK0Qe/el3Mv8PTMijjQ17Q1NY3fylO3Ky6gon8Oy3XhfUyisyI+NklDGdEet/+2vOATTHcOYuaK5cwrbVIrv/wTnPo0svAjy7BGQziE20Mf8lz0W7mHnIam+9uPzEE873zo+Xafx41XFgPEWW7m+ObsCIwc97fP8/b8Xl6+vyx2c0PsecE3KDbp2MBZyLO7HeYfy6NyDLe3+GN5XcamNeHWCTnrboga//i5hVscQ56QA7fAgYntxXTRbA4nQpzIjcV0o6GSwnZUBDWKeim8sRCRfWvqDi6olS4o0IIup0eLHSispOD6Cwo78SPMr/6j34IH7l9REnT+STEJeMgrxFjoHtOwQJHuiveDMnxD6poqZ6tgjMXsCPPE18on9BO3RUbchxsBuXnb6k8aJ/J2nsuBZ+pj5N1BnEb+3vY514wFLLdzooERND/xJsE4B2E+DDfFYm5acbhhh8Z+xNmYb+J+fXqacuoR60Q4Lpi6hjOvAeAoOduLG4+P1wrxIT7kwDVzYGJ7MV00m8OJECdSW7EhhW+nKYpFERa7jQJJi43yJC4/4YFiZLiYVrqgqQm6Vs1heFrjCk+xOcxXO1b6DjTD6PulbWsh2HuilvxKf9slYzqxbviEDRRuD+NocrBI//5UX2FexcJ4eOhisy6eS1xXMhsxb40xWwefxumnH+Gmw4o/viHehNFA763PlzU84qQ7vzWHrPAc5LY49p+35a383ePv5e0X/D11jPVmLg04G2WKPeHYKo9Fm5v7ojOt85d4cy7I1yb8ZGwPrVeev/U1TP/IcXLgOjkwsb2YLprN4USI04LeUJSviglYCI0C6egGAoublS6wL+iKzWF9mpTsWxVVYb76v9K3Ls6uJvGhf9IoYrP868k/8Wr5jXEo2+lpWS3gd8vu1EIS7ftzymulEtdG82Vy7bPYfs7msD6FjbzDp+RN+9AebrvX+XCNOlzz3xsitjj22DW5NSfFcW8/4O2Dk14rPcDZ1joMx07zG9dJaI6DfHIWrmdcn259HrvOOJ5cIgeuhwMT24vpotkcToQ4LWJovnoXxlUxAeRvFeSxEenJbR0PulzBiA1PfkpWikd9QoVPw+CJo+lp2dp4nTEWiFeT7IJ/gl1pmPEJnjxROLHBa2EjevzfSgE/DHt9xRPj82fBwnt7AZyeiJS4axNhX2qyjvlKbsBoa2y931B4IyftC1ROxHarLbc0zsdHmhjfhLubEnizo/CqxTXhSJWjr1aGmKQnwZ6PguvvHy/wTc2RT7+Xlx/1y7s8J1p22LERZ9f5N8r1GJnMQ59BLuZkcpYN0HD9HOIWz99SDqYv98vnie3FdNFsDidCvPopi1B8paK/vqaohZqNic1GaDjsNbxS3Nm80UUpN3xljsh08/Ap1tMir0uVpkSKn+9PS/1iGfh7oQO2Rj9LQzWy9eLO2Wtk2PglvJw/WCQe5UNLfkqqGmsXJ0u2MMe+PAh1Ylya800Ofkpj9rQ89b7wCGUG/qw4ufHVWuSHNaXYXOD5nbw6GxoRXnwxfrANsSoc1RwAjV3jCeI2PJF7Xp7lvaITORlykMXbdGKsm/NRVtkecxZlSu6rctGHnIc3rROfJ9PPEmVZeb7TSc6yWSxchfXJY+QFOXDTHJjYXkwXzeZwIsRW8NzE58lNz51dDM+OkxSdsfieiSk8tbvIC1d4YnORNs6Mz/llSyMTm7QP5SxtANdPDT8kcxhncnYetufnG20lpuQAOXAPHJjYXkwXzeZwIsQ3Q353t/8ziz4m0NYre/N4hU9E4MnwsDD/hBg5/n1mo/wJvn01tjP0yxPLTU/gzoEvOTsvH5wjPpTB+JAD5MB9cmBiezFdNJvDiRAzIdxnQmDcGXdygBwgB8gBcoAcIAfulwMT24vpotkcToSYSeF+kwJjz9iTA+QAOUAOkAPkADlwnxyY2F5MF83mcCLETAj3mRAYd8adHCAHyAFygBwgB8iB++XAxPZiumg2hxMhZlK436TA2DP25AA5QA6QA+QAOUAO3CcHJrYX00WzOZwIMRPCfSYExp1xJwfIAXKAHCAHyAFy4H45MLG9mC6azeFEiJkU7jcpMPaMPTlADpAD5AA5QA6QA/fJgYntxXTRbA4nQsyEcJ8JgXFn3MkBcoAcIAfIAXKAHLhfDkxsL6aLZnM4EWImhftNCow9Y08OkAPkADlADpAD5MB9cmBiezFdNJvDiRD/+fO2PO12yy7/e/oVF0g9vz4Xx3L/axNsjZXF89uP38vKpn9elqfW8dGPjcuPhg95Mog9zF3bU21enxvIHNk64dzvH9+K/2kd1B82v2S7V7GfgM2HdAA3Er++LS//NOL+62k5Nf+8fbf81pG9wqTGdrd7Wt7i+X9elm/dfNmwPc7/lP2GD8XurThcii+040Nr7FP4xhgxRuTANXJgYnsxXTSbw4kQFzJr8fW2PH1/WzcTcnH5QHFWdLiL1ECXG8eE08avg0to/N6+h0LQFePh3Aj3k+P/tjz99bL8zrKjPbiP20f5PLL7jOfevvtGQRpGNoYdHh6B++8fT9AQer4ID7AxbzZrXV2pQTo6Rsh1aahcThSZxgPc/jgO5+d8zLFi7xFrvovrJfpKm87PH2JKTMmBW+fAxPZiumg2hxMhNuJb4Wufdrx8YsGERQM2G3+9LC+lgE6FmT1tcgUazrGnUaUAs3lSxOC2Jan6pElkn/o0ofiFvtzCdmgOfVO/Lrw349CL/yHMoj1u/215wSeY7pzFO35mTijX7IlQ5EGfI9ZoCB9xewsOuDb0aVThbLp5Urn+kp7G4/lDON3xed8c/lkQ5z+r5izyYbB/Ime9Pb+Xl+/15obY84JvV2zSsYGz5amecBobOMyBuD3wu3AJm8O0Jly+hDz87Qc5uyUHcMwW3nEMeUIOXAsHJrYX00WzOZwIcSIwFBG9Yqd1XAoaeCokjQje2X/7VZ9Crp8Kgc5SzNSEYoV7KmZw7O/l7Ze9KinFkt3Fr3OvZVFOsdM1WIJPKDSx0G3g3rUJCsnjmnLfkLqmytkq8cM4j+KZCt16wwH9xG2REff/LNpwyE2J3Ly5ZmSASRon8nb+iWFYB+ojrouBzC7edzTHNWMBy+2cWPNF5f6or0O7xmiIb//mgr/Zkri07RXtEWflHOaxuF+fnq7z4drvyqm8nrTxxDyQ1wBwlJwd4chzlVPEgliQA7fEgYntxXTRbA4nQqwkd41fp0B3Y1JycEVdo9jSgsOeDLq74TK/oyfL6ctOxbk9pcFm9JYW7Mm+aCHYe6Lmn3Cdjp3EAIvZAxcLbCy/P9VXlz/UHAb9Jss+kY/xWNzHsYNt4/PTj/VNEd94+Ib45FgObLklmXYjKK3pENf4Wik0NIcwSPEyeUdw9p+35a383SPejGq8Xr+ZS+uGT25SaGPZyK2xCe3nw9HaSw2prPOX+Ar0Sic5e4hPPD/iGs+RH+TANXJgYnsxXTSbw4kQC5mt6B02XKtiQu5m498KhcTw68k9XVmPPa05lEISC/GtT32ucdGeZLMVnNJYSKOIrzaGmMQC9Bh9J+OO9v055bVS4VmjkDW59omNVTwW93HsYBuffjsertZGw76B3GNwv8WxmBscrrkxxPWOYw9hIXntlLmR228/Pvpa6UJeY1QAACAASURBVAHOrvizbkKP8bvigjwMzfFKJ44NuZzcbf8dPnEhLuQAOXDlHJjYXkwXzeZwIsTNJ3irwmFdrGgBEpsP/ebT9PqSL/LkDnZ4rck9OZTCxZ/vFUNYoKfXA+3JAAsaiwm+5iZxKK9f4hM8eaK7+SlMiJ/EPcwVPf5vpVrxCAWq3JiAwrsX81rsmsz0RKRd+Ie/EdMGI9zEOLk5RK5BMR3WgWIR8Fn7YL7w08fd54LEq/ok3F4F3oRn4Omq6dNvMcWYplj8/vEC31Aa+fR7efmBr8uv57dtG3F2faMs2uox2sqZIBdvDpGzLGqvvKhtr7Ota4PjiB85IBy45v/YHE6L3nv9GYtczGIxhl/aUZ8q1r/VEmLheN8cpGIozfu2PH2X5sEXUvjEshb6OC8Xhfj0S4qa8qqqvKJ4TJNz68kAsCvNiRTb4W/ksEjcXCCA7FWjn58+Y5yKXJznn+ToxQkb1ub8VsykMXtanv7qNA0oE/62UPQh53qvMapdxf6kH3luXE2yEqfx/E5enS34t+znsYIxxKrcxNA1Hm8W+TcGyvwQp9Xxlnyd01gXJsvlmDVnMdbFZpvb/RxzFmUKL6tcv36Us5vWSfLPczzLyvOdTnKWzWKXu8xXq7xCrLheboQD09qLTxDM5nAiyEx6d3jhO6k5HOEkRacv5ufyCp7aXWSCDk9sLtLGUTwv75w0MtaUn4Vb2gD6m1VnkduNNTk7F9/L4yz9ZUzIAXLg0jkwsb2YLprN4USIL524tO8KkqsU2p/2pAyfiKyf6nwZX9zTps9slK+AH92G6QttlyeKm57AncNGcvbL1uUlco828akTOUAOXAgHJrYX00WzOZwIMS/a5yj+KIM8IgfIAXKAHCAHyAFygBy4Hg5MbC+mi2ZzOBFiLuLrWcSMFWNFDpAD5AA5QA6QA+QAOXAODkxsL6aLZnM4EeJzkIsymKTIAXKAHCAHyAFygBwgB8iB6+HAxPZiumg2hxMh5iK+nkXMWDFW5AA5QA6QA+QAOUAOkAPn4MDE9mK6aDaHEyE+B7kog0mKHCAHyAFygBwgB8gBcoAcuB4OTGwvpotmczgRYi7i61nEjBVjRQ6QA+QAOUAOkAPkADlwDg5MbC+mi2ZzOBHic5CLMpikyAFygBwgB8gBcoAcIAfIgevhwMT2YrpoNocTIeYivp5FzFgxVuQAOUAOkAPkADlADpAD5+DAxPZiumg2hxMh/vPH/0Dzbrdbdo0fNP/949vy9Gv+YhQ9ux1/RPy0Rf+2PEn84N+3H7/XPzb7z8vy1Dq+4UdZ376b/CNiJD84nm1a21NtXp+bz7etOCdeJt/TOqjr5pLt3urfl40DbiSOdHj16+nI/FN5Zdzb/qP3Nba73dPyFtfFPy/Lt8znz8iJp8Wm4UOxu4Nx9JP769xJTIgJOUAO3BAHJrYX00WzOZwIcSo83pan7291wf96Wr6y4P3942l5+edyGoPTirMvsj80fm/fQyHoivFwbpjwUrF5PC/elie42RDtwX3cvkTM3777RkEaxuPx+CJeDGP7tTb59e75IjzAxrzZrDV9CzntT9wf+IyNqDRUmBv1ZprxQNaEbQ/kNe37jPHRZ7H3mDX/GTZSxyXmOtpEXpID98GBie3FdNFsDidCnBJALCJgv9xt3rXv3GOz8dfL8lIKaLxzvXNFdCr2npanzlMoLRZ/1KdN/u68fyLgz93HYh4m7dAc/sFCVwpkaNSGcmJB6+QcgXO0x+2/LS/4BNOd6+nIvFKu2VPMyM0+R6zRkKYOt7dggc2hPkHFpgHWwbcfL+kJLp6PeHK/3IzyzeGfBXH+s2rOerw4cPwI/np7fi8v31+W3xavf16WF3yDYpPcDZyFPOvfnLA8Kk0dbh/wV+2FPC5rfxfWCTlbOLhl/XPMFs5xDHlCDlwTBya2F9NFszmcCHEiMRYRfxYpyFavHbaKIClosNnQYqPeSX/7VZ9GxqdCUlyXpy6hANSivciNDc3v5e2XvSopxVLVd00LcpqtLnaCDz4pCIWuFbwbPscN++hi4OPnmipnq8gIPOzalQrdwh99mmN+Nnx2GCR+62uBuXlzzUhXpzUtqUCvurO8wlcZ1341e1rMBzZfg07XjMWcspkTIw5a7MZjKlb9mwv+ZksnVzbjMeKsnMM8Fvfr09N0M+yIdSIc18bT1kfGIOBMzm7lBsfVdUIsiAU5cO0cmNheTBfN5nAixInYvhhqvrrVaA5dUdcoiLTgKH//5osTX5D7BsLLjQ1gKs7L3xG5ooqJKhWCvSdqfxYpbk/BLsXSCtgYkwO4o87v8sQ43zT4UHNotmTdJss+kY/xWNzHsYNt4/PTj/VNEf8E2/P52i8es+23J7iJlyGu8bVSaMK327W1mTIuvS1v5bV2vBmV1o+L9WYurRu+chOukVtjE+pz4oH1Vjhsef1peYmvQK90krPb+bQVf44jpuQAOXDZHJjYXkwXzeZwIsRp4W4onlbFhNzNHvxtYPi7xTj21OZQCkkszrycy16En5IksVgNT2Sl4HRPvBox7dkojRHiHuPZm7c6jvb9OeW1Uolxo5A1ufZZCuTG053WGBzf2can346HKxwb9nVkrvC5w3HIJYdrbgw/zLtVfMZ5IuaUtx8ffa30AGdb9oVjiNF2ziAPww2dIL+5pu6Qi9uxHXOIcogPOUAOXAMHJrYX00WzOZwIcSLvac2hPqWyp0BaSMid6vSE0Bd59bgtFl+AYRETm05f1GCBnp6SrZ80mI67/AyNj8ShNIT4BE+e6B7zFGb1GprHXfT4v5VqXRh8LCU+WHhvL4DTE5F207B+dXYlN2C0lSddzoYmXLE4Bts7L8J9fIQj9S2DxKv6JHz7N45W/rmcAVj7p+F1/O8fL/ANpZFPv5eXH/i6vF8HfS6NOLvOv55rMSdWW/v6ZEyQizeHyFn+vSGshTGPtvKN44gjOXBtHJjYXkwXzeZwIsSrn7JwzV79W5f6KuJuwQLNF2+1qNPCBF4pffouzUMqpMocLaClGMzF3/e38iUhNtZe5SuNjBQ1RW7+UhsW4rnQSQWoxqpgkvAtDaIUBFgkHlMgQHPp5EmTJ39nF7iTkiTYFL8QI9tSuNWc37rYyM2Ep+XJeCN8wLlgp8qGc4VPwKHVTxU0MCmcBR+wucDzO3l1tuDfsp/HygUUYlU4pWscc0nKQ3gzoMxvxMqfi82dYd9YFybL5Rj/xFxkY6yLzTa3+znmLMoUzla5fv1EPntfzTf5hLxaXr3PsvJ6cDrJWTaLXe4ir7jdX3PEhthcHwcmthfTRbM5nAgxF/P1LeYPx+zU5rBbPEjR6Yv5D9vY1SXx8k+a5+o6hR/hic3Ql1Pk398caWROaw47WGkDuPWpX0fGUXElZy9vnZ4jrpTBuJID5MD1cmBiezFdNJvDiRBzUV/vor6Y2Emh/WlPyvCJyPqpzpdh4p42fWajTP6eFHN5YglPlE+Ssbk5JGfn4ss1QHzJAXKAHDiFAxPbi+mi2RxOhPgUMnEOkxA5QA6QA+QAOUAOkAPkADlwvRyY2F5MF83mcCLEXNTXu6gZO8aOHCAHyAFygBwgB8gBcuAUDkxsL6aLZnM4EeJTyMQ5TELkADlADpAD5AA5QA6QA+TA9XJgYnsxXTSbw4kQc1Ff76Jm7Bg7coAcIAfIAXKAHCAHyIFTODCxvZgums3hdIipgAgQASJABIgAESACRIAIEAEicPkIsDm8/BjRQiJABIgAESACRIAIEAEiQASIwHQE2BxOh5gKiAARIAJEgAgQASJABIgAESACl48Am8PLjxEtJAJEgAgQASJABIgAESACRIAITEeAzeF0iKmACBABIkAEiAARIAJEgAgQASJw+QiwObz8GNFCIkAEiAARIAJEgAgQASJABIjAdATYHE6HmAqIABEgAkSACBABIkAEiAARIAKXjwCbw8uPES0kAkSACBABIkAEiAARIAJEgAhMR4DN4XSIqYAIEAEiQASIABEgAkSACBABInD5CLA5vPwY0UIiQASIABEgAkSACBABIkAEiMB0BNgcToeYCogAESACRIAIEAEiQASIABEgApePAJvDy48RLSQCRIAIEAEiQASIABEgAkSACExHgM3hdIipgAgQASJABIgAESACRIAIEAEicPkIsDm8/BjRQiJABIgAESACRIAIEAEiQASIwHQE2BxOh5gKiAARIAJEgAgQASJABIgAESACl48Am8PLjxEtJAJEgAisEfj3dXncPS6v/65P8QgRIAJ3goDmgd2ye35flr/36fNSXBd7dvvlfao978t+t1t2D6/Lf4KFfM7Q9ym+zDCcMonA8QiwOTwesyNm5KQliQv/SRL/gv/++/l43gvHZyZLvQAef5FRn2ddLM4Uw/fn3fL4Uy5n/y2vD7tl//fpgqus02XMmZl8S35+VAPgdCIvqgWyRr+mwfooNyXWXa58GJeKELdOQSDl/m58jhHpYgncP0bGh8aec+1+yJAPT/7omvuwARMEiE/CM/VtN8gJE3SPRQpvPiG3/r1P10+tR+xaOrbMn92ypg74IrpznXH8NRj0u7XureTe8Qi4WECMvKTTcvUt5hLEhc0honH27RbpIBGcXd9YoJL5nI0pm8Mx4BvPugS2cU5v2Dll9XScdjzx/uzN4WnGwKzrbQ7BifUmi4w1Jp96pJX7TzTgy2N5zrV7IgZnmnbrBd2ZYLozMWeoybqNx5FQfvlaP9LeCx++rR46LVffei5hcziV3G3SOcJqg1WfLJY7zZpsHpdHe11CnivJk7/yBLJ/R645DvVYg4jH4Oma2ve8T7pBv0KlySvZu38Or4yM5D0k21vNQdNeF5eEY/I9Pz0EO1avrRQ7Hpf982O5oycixTfD0NlS5sj5/hPKj85HX01/5UO4SKFNG+LTk/0Rf10YlkYcZADaGe5cF7we9sv+Ae/qoqz8SpRXVvaKjCIbcAoXUxxr+JYnssrXHH9dA0lOwsfWEx7r3YXP+n/Ka51JXtW1LMtJ3OzoVVmPy+MD8HKAdwEt4OJiZFyKYzS+bRzsrnjyLdhTlMqG98PP2y+vJYeZnjwZfTL78np97OUinFO4sbbBxabYmmNYntILH7NNIvfhdXktuWK/vENM2/KC3ue9vurm8rnlbvDPcyVgorbiOpG8BHa37Cz+nTHXrdauj3HxEXTbJq7HOg59grUfuZ75uS9x8OsRZfdjgvkpXg86dpjx+pnxbq31aK+MB56460grVjDW2Y+8Rq6E69dBPEf2OB/9juD6+PM1vaopnLV6YSRP/fP1SjPn2JoO8uu1q7UGcgwzD32+FqM8H0vOGdnrXB7EeGvdleMr7/8k/OBNoMCdylurM7L+v5GP/pxdw2vMvc+OP8W3sV8uPpA/1X5Xr3ldDt+iSzZOGJfzbLI/29vKySIe1wXYq8cb3KicynUHxMjZOsrVqMfZEHOJ973GyQF0VTtsDqeGKy12TxQ4li8u9jdDSma7GOhCgESp+5Yw8oUfk7b5MRin8m2O052Jnc+l5JV15QtY8iHZboloPc7sbcgzv8xO+xzYa0P0U+0w/xt2mHxnb7Yjn0vJoueX2Z6acPMRbfjo/JTE7O8hxIekU5NxfK30pPjghSnfTGjiUnWLf+JXy1/03ZKpcblwydnpdbbwwotA0RlkoN6iRw7qOIlfiqvaUo553VakJXs9D9JFxuLtsVBO2xqJa7AYluXZTQRn/2nc7OpV2VAUO13B52IfYmXb5m+2XX0EHGWu+Jv54uzJNwUq3mAP6rTi1fBrzLNCU+PquNmyzxqcxpod4OA4ozZY3kBjg+/YGGvczccQ65CvUKJiZj6hDBfDA/gX7ECyzjcfwO6sw9YRxqy19tJa8HyXcTYfNOYbkR53G4d6/FpCCZmb5k/xIdlvstI6zbF3OBlv6w0lxHfNH4uXt6GfBwZ2OBE5Xq21Hu3NfDff0N6Ek9kYZCKnnMw8LmPoeL0Fz5E9zke/o3bbjZIso3KnHY8VD9S+7Wva8oLjFpqF68makIyLn5Py70F7UbbJc2s3246xWeU3ECLjIHcmDuT4ObmGXzqH42p+tbUe6jy1JdnluKAxqnOqVYFnGBPctgYY7DdfRFYf36rpmHGttbvCQUVDrhrY21xbwA1bjzqu5SNya6RHz4U1jPIs10GcPELXtcfmcGq8UqKyuz72WcgadUOCSYRvLfg0ySeHKKju47jVtiUtGQ66fTJICaaVuGqx3ihSu/Kqba0ttNGd14WJxQpggwsa9Op82Be/EPvqJyQhp9TvnHu+Sa9yK9aKw7Hxady1jP6m/W3+mn36ifi7E2HnAN5oT53Zs6fiUcfKFhwvdqVjKD/iWs+hvrhtyT/ocQYMdBV78oRN3BQbOnpxvrMh7wDe7jTYsZVLFS8nqYG3FX1xXNyPccJ5Ffet9rm4R1WAQzeHuDlgmx6v9sTc63ABXJ045KSeAPlgm56SfW02YIwX5vecTphT5OThMM7Z7Ao88NNrcXut+TV3dLjqJICd7njcAXvUfuBI3C84Jtl1PfubYk5DmZOPxv0yGOwox2RjoCvaB/irCDwfYuXwxXnRvjLvVDw710nno98R26xZUwTsuwrQTjkx8G/7mva4d9duwSHZ2h2H63Bkr3M5xtjbhEO7eiFuNbZBbrAnjlvXWGJHe6117UBjR9x140b1Xxy4lYe9cQGTRs2SGnvR248D1qsxXyM2FWP0MdoW98FniKvTKUPKuX6cQNLVbbI5nBqySJq1Mk3E9rqRfFpDUIhncxKBrcHUT7tTYUP0sz8OF41uo17dThcSt6Aw2YpNdgdVdEGy2y7PGVsuvgf9Al0xGWASUTsMQ1FVcEy4YEGBeNgdZrUD5xdzzz/fRFe8kw5JjtvxrHNEXpTlcJUYG2cUz/yKZdNfsy5/xtjD6TaH13hV23JcHP+gKCyye+sHfC68SMfa/sJ4lY0XnbidMQHbkDPJtCgPcF/hVOX3uZn8jLar3uJfAcW9Gt3lK8wbcamuj2qnakJ+ZCzWxUu1qWxtnlf1jexznMFcZA0PxKnkznCuFhvFypJ36rlqT8UkjXc2AK4oLeWgQSGHa8zJwNhDIY/C3XjgXslteXAZl8YgbxXjzWt/Pb9igPbWtYK6MmqhsAWHdI3UuTt7SlXsj/7k/eJvsi+ul5LbQFV/zW3NQYB3lluwiPYO1v5WTq3sLToS7pWv4GQPz5E9MD1uFv/yicKdkTw5BxzXObg2dbtVX8C6k1VpjWg0qiW/xWfMVSN7nfwYY7SpwTXTizLAvopfkFtimSY2x7kx47Um820NNHkR8qVorTrTts3H6wiOUUvVpqpLxjb1bRoXMHE2xXMYh769cW0hh5wvJUYJ1+qD14u4Ii4qFzhe9Y7jhDS5pm02h1OjFUkYlMXkVciLTU2aE4mJCwCljsbhHNzG+bLtFhQmGJe4Gs1hK2mu5HltI3vdSNSN2zJI93NzgRjKOdj3fuVk07C5h82555t/VW5NUj0bZE4dL3t1jj/nj5uu1udIVxkfMbcTAw57O9HuuC5kv9Uc9nyA48UuOGa2lc94DvX1tsvkxkaSVwti2C/25GmbuIk2BHVR3gBvNxPmjeObdf+NXwEP/qhQwA/kOn0wLuKiF2HEQcbC/sg+zyGwYysO3bvPICvYgzlDTjkbuv4HebguIQcpTNF2PehzlR3ST6cT9EQ5sO9szj50m6dGDmzNT3EdcNUZDXa64zIfi0uQ5/w0jkDDXHDsyXaK0k6Zk8+V/YEdTkzSFTmt+4fs1fPt65LDF+UU+8De4VPmgR8oV8ShPc5HvyO2IVfK/khesHv7mob4n9QctuNTcw7wp+t/5FO1Sf2AhqDrF/hfYxvkBvya49yYaoePUNzrjWtjo9wVe/FGf9N+0dOWURsrs+XUcZhfW3jB+unYG/M1xqhijPk16Im5eqOeqreHv2FznZ9sDqfGTUiDF8KgzC3QRNhy9w0Wq8zySSrJxQRukkfjcNHEC4VeAHISdAsKF05IFDrHFlJIvH15Zmn6HNnrRq6SZn1FFHX5O/geU9Xl7M2xcbIT1rUYqFacZX650FRuVLwhaW3GE+aEQraFbauokXEtf6vnslXtlb0i+xCHA95JT0NWszlMF47C84IJ+AyxKzap4UlH0gfjyzlrRmWcbbf0tdZvklfWarGr4mR4buWmjrMiXeW1uakXJMM0r8dih/qW/we4jNa6WpzvQJvNq4JAY9yxB3WG/JBsxXl1zbpYOfxyDA7logHv6npS75Z9wQuNzTHMmKs9xoOQe508xBXF4ZqQ4yvMjEeo13NP5xgHULbTCVxGHaEBTP7kwljnYxxqwSzjatxBKeKb59u4Lldhumyux8k6G6x956c1M9VWxSfzwvEn5yazz5uB+nDd4vGczyz+TgDOMZtyvoj2BjvUf8v3WzmFsbL1nTlxNJ4je5yPfkf1lDWDOKVtw3nk3yjnyDyTgW/9iBUa19YaCPjVcSk+RZ5bEwN7ncuwpvS4zEsxbvGsXI9QBthX/QtyA1+a4xpjij7gRp0rRoi9sE6KXQPuKk42x4/zstO5Nr5FkUROf4br8Dhba1l39inNy3b0cnLx0duLeUEsqtxIOcjZ1MofyJkBLr4G8jYIZq04IULXts3mcGrEUnJa32UxpZlg+irE4/Iqd+9tAUCySaOTrPQawH55X503mYNxeREUEts+vPIhUlrJofiQF7PYkb7RzBJMLYiKjdkkL8/stM+BvTZEP22cTypRlw4Fv/byTWF2gbaCJb96UpKGJZR8vFlsZ1s0CZx5fsUn8aFgDX7gXb46XozyczQxamzSzwCjvSXuA3+97Oy0fUDsqz0DDju80zdVGuZmZ+LR++D3HVG+L7AVJ7WpcrDtr8fIFyQm3xpE20+v0Zi9BkH6zPLg209LzGRAE6csAWLqudnRG/yzeCfeh5yBRsZ5oLfGDm0y//FYfpXo+b3mhCgXdco26mnMq988WWOmInCe5cBhLkK8Ig6WK5L9LjZoL8Qp5bKMgdgSckbhwdB/sCl/C1/Rjf6BbIcX+I1mWlGS4pa5J9/op3ambwFWPqBct/awGLfiLMc2zEG9dS35tes56GXj/DjOsOiu/Yht3G/EJa0DKMy8AWkPsMc117XDych4t9Z6tE/m6bGMLcazYXuXU2Av8vBoPEf2tGzPfus1AL4huNg5khf8U1HoB2DhrzGyVmvu0Zgc1RwOcs7I3uxr+oA1pQfQJswlg7oL/K/+BbkB8/Y402f5MckwntdY2LhRjhtw1248aC3j82e1K4OEccSc7jA8EIcwVnQkn/aLXBOKX7B+XE4e2DtqDm2Nq3yIkVtLLlcj3h4XdQGwwFzi5EH9Fdy+ql02h1cVLhpLBCYj8O/rstdvTp2s56rFh4v+VfvyScaHwuiTtN62Glfs3LarX+fdpa31/5bXZ/vG64+h8v5sDYiXs2oO/GnuXQ0Cl8bdNnDkWxuXrz7K5vCrI0D9ROCCEPjv536xn1a5ILMuzJTruOheFGhsDs8fDjaH58d0JfEC1/pZ1tL7sm89oVs9rV8BwgNXg8AFcreBHZvDBigXcIjN4QUEgSYQASJABIgAESACRGCEgL0mh38iMBrPc0SACBCBUxBgc3gKapxDBIgAESACRIAIEAEiQASIABG4MQTYHN5YQOkOESACRIAIEAEiQASIABEgAkTgFATYHJ6CGucQASJABIgAESACRIAIEAEiQARuDAE2hzcWULpDBIgAESACRIAIEAEiQASIABE4BQE2h6egxjlEgAgQASJABIgAESACRIAIEIEbQ4DN4Y0FlO4QASJweQjYtwzKD4HLV3fbD4JfnqW06OYQ0B9ubv+m3c35SoeIABEgAkTgwwiwOfwwhEcI4EV6I1jw+zwf/k2n92W/ezzDb/eJnN2yK//upNhS/JPfH2polPsiZxQLiPtyrrhtpNy5hzneil/Cl+Tfbnc8d6ShrNzD7YGsU34Hz9l9blDOI6/8LtaHbb1EjuEaOAdeIm+05gY6TuGPrtsLvPlxQddevVHU+Y3BQTTCqcSTx5//heOHd2MuKTI6OXrGzayyhg+bO2XEVv3bxm1fsxr7h9fl+KhNgeEThMa6ya5dJ+akT7CYKhICbA4/kwkXdIH6TLeP17U92R6WfY4CMCW4chFdliU9CRoU5ocNu44RwtkPFzIJL8TvsPPniNthLdNGfLhx6VmWuLipUZfY3WAhsq1g6+GHxy+RY+fMfejrZ20fwc/PMkn0XNC192ubwx6/5Hgu2DHnSx6bkEPOt4ZPI9FW/dvG9TBd23avzeGm69UaLh75QgTYHM4GX4vEdLdk/7xf3FMDvWDlOykHErAkKXtyUBdauhDb8VLEa1H4uDzKUy6Rq/v7Zf9gMvxdG5TdLeBzsfv68zHb4WWki+/aF02uD2lOTzbqT75Bsg1FNo6t8vJ4xTfboA1NOp7wMXvx2MY73GqDzTfChCII4uxibMPxE8di3Jt8yL79fE3x3O0W8bviYA1qe5yqzfY/avzzeNS16+CAY8zO1rElvSr52IlzaqQxLlasRb5kH/7GGBnuHa4jrrIt9j18kOsqE/UZxsFuxE312pr7v8v/iU+YETecZzbbeMM5+lX2A+/ifJStNr0uryV3gB/lKWaIgXKljnOxg6e+63WNMct5p9gcNpD/B56ion5b76pbnpgEW4c56Hlf1k9qmNFe41jiseVT0xesz3ofF7eeRj7hOYwvcqIcz2sgrPdiA8pC7Bz/0pMJxG78xB65vqs3gzJ/5CmHYr7C0KwCLJ/3+oaFXaO22lBiqiIzBn8vJcb+ugP2jm5cAVbx2iv6mnHGmCC+GQObY/4tYR2V46p7v+xBj55D+WY72Hno2lHszjmucHSzDMGu8t0iuOgT37zuRVbmo+grPtXBYQvin69PZQD6Wzie+fTztb6NY1hksCvxGwAAIABJREFUPO1aknR7+UN7OvqKPeXGboq/4VdwBfuRu2mct6PeeAO+oiLbLjY9LvvnR7hh15NnE/ETOI+87MRd/HkM+FZ/avxb45LWZJuPA9oAeQLNXG2nOZtjhr4111zDriYG65gkX9Mz24qF8KDi4c2PMmDtqM79clJe8koudo/N4dTQpIXhExAk4ELKRMLS3AWblMiWPDMp7RU1k50KpUxyTUZA+JycXKLN8lS2Je28yJoL2RbgwXneF026Jj/4Jbsj39SO4m8ea7KcrVmnnXP+w4K2ZGNYunEN48qhLB8L73JONnwCHPu8tkdj2PJH7Wz5lhpEK0z8havBLyfbCq7KD8cB55c1W/k1GCfno3E2/SgnbSf+IU7peJPrLXtLnFB2eNrrfIlC0A7gqM4xuwMfI5d0bCMWxnnjqrPD2xutSvuea27dR9l53RtuysvMfdx2/EW7dX72IaydyPGuvJUTyX5nk2ERx4r+cq7yQXTpfLRVty02Hke1zYoOh3eVKardOnDjgmGrcz4mHhuvw9vesjfb7uy1cQM9kX+D2Hlvkj6Lh+MT4N/HMDfUFifVmxuKzTZYs2Av2yWbav63fJd12XXTxdx7ZZw2v9D+fpx9rGSczdc5dt1QvWlduFirvzlWmSM2H8c5Wfna0RoXPdJ5jheGy4AXUUi2y5rcur4E82y7+CG+yljzOcqB/bU/mPesucy8xvxjcXTXTz9O1Ch2ZgdiDDboZvYtXT/WcnC4yDTMW7gmGZ6Xzo6GzTYH9aT1FDDIa6Uvz0mQzLS8PpgMuB4NuKOyA1esvkS9aVwrv0T80r5h5vJENNfte266U7pz7JqLdnn56k/Gd81L4LdhE/nlDPS4p5wyWN/GZ+WhYeoEXtUOm8OZ4VKS1OIqLai0r8S1C6rYIEkP94tdkaDlRNiARaYJFPSG/bpowoLPC6UkANSwInzQh7aDPkxEKC5t93yD4wXDka3xHNimCdQWqk8kMemu7QtH1K96x7lcDMBfnVFsDvNlV8YiVjYkHi8yg29BtuCb4hXGYSxXsTOl+TPqxtN4DrdlTLExXMBxft6unAtFuMlRTJIPCVeMYRQ4OAc2ySzUW7FK8rrcDBhH7WUf8Qh6ca2X8bYR5yEfohybUz4jh8uJtBFlw0WwbxPgPvA9YmnFRrBgVcy481H+gJsxXianHAdZalvEMe/7OIOvLjcM1o8pts9oc4wZ2NVb7317B3Yc0oOxNlvzJ8YunAq7sLaAS30MEU8RFfer+JENJaY6HGQErL0MsLWqSVsYAzlS9gf4Oj6gQLAHD698BdnBbuSB86HYlQXHeaDPYwSNy4gXMF833dhkb1nHek6ub1I7yDm5buYx8maDNWhBpvMHzwF/om7PJ8zTgKFOivkunS/X3o36cJhsI5a4befM13iuykE7cLuO0K0WBpinyvCBjMgRmxOPA3c8vn6dYLz8uFEcTKl9epl2dP2Z4lduRqzekOnJ6eGRjpca1fEZ17mvT3D9RRsRD38u2gC2AtYyx8uAcV7gVe2xOZwZrgFxlUy2UMonNHTFrrS4usmwzIXH4wcSUiVyIv9q4bYuAjERwYVU5WHCg7H95CoO9nyDRVlkjWyF8YobLs64XZs787skmoL5hg1MDhrnKNcaUi9rhVU+vToe/C7xL8fTxIpvxAAugGGOzNSLAnIH44cmA5f6NoIunAvbOjfzSrdRt25bQWJ3SDFulugR4za+8SJQ9SZ8MNb1HBgqm3HdwukuboCTDg+Y9+apDYh9mAeq8+Z6zfRkRyxqgWwXUcQz4+70N9ZcjqHoRCyT7Ia86MAK2xDnMn4dLztVdIOtfU5FbuI6Qd0NX4WXW3Kh+uR9t1eVVvHNTvTtRfvS4OLvQM8q1rlxsRynny1fRMVKbl5bcjxzs9igJqGNkY94roFpx4aufIixqFbcigyMX8Kq/F99gutpkdOwCeOs43Isy7qMPpqW9fHiR9GXxwKWzodoJ1xXTUv6THbjmiu6VEabf15GY2+lP4+R44KzfZZmsSEjXE/sWrXiPmBSbM/iKibIHzmZMHY8hlc/0ZqRPhynUkv+WuNabQm5A7mRr1/J12hz1baySfA0XnXl1fm6NYqRuyFU14PHtx4XeV3/3LmGT2oH8qxzDXbmr9eIOy07iINhs6U2lLkrm8Q+s6v67fFIvjlOlZyC1kUMqjx3LXW4yXwYh+KubJvN4cyAQTJUNbCPC3RsQiSojY6LDgiJCUiGh/2quyfbdMCn2m6Lzhb0uojQGbpg04XZL0qQp5s9/XC8YAbHopjVHVzAwi1UPL4S0j8g/qySB9gT8O0LWseijI0yCoagRwYXPNLMim8aV4sH2A9zUkKFwinqLkYFe+O4YmO4iOL8vF055y9Ofij6irGSbWsaZQae8xL6XF/bKNit47rGuGgAf/UY4oHbchIxP2ZeHFuU20bAIo5HO1rnylMBbO4Ad7BbY1Yu1j5ulXdiF3BNzQR5ZrZ9gnw9pPuQV2xcLjgrn+uJohtkIb/qyLRVxusu2oY8wuNRQtgHvXoGMQ9DIx/tdN/eNpaKwxF6RrEzG9Jn4BOuLdDXxzDiVve32xDXJtgUsPa4Yfy8V279yakip9oXZqx2q67enHg87Wusir4sFrCsctGuPE7nbVsPJSYge+XEoQPNueJHskFstTUo+qzx64uFmETZsp+bGZGFubfuR0xBXl9pOjPQF6cW7Bp5ptqCvITYqjC0E7eDppZNmlNH8oKMyCU7HY8Dd9C/eL1E/qGvIrbuR59gTar+rXGJ88z49me1Leq38eF4xNeG5c+Ew3vhsxxWHZ3rmp8edAG+NZ+kGdVu2d+Kjdd2aXtsDqdGJJELk6slx0SuehHQRQmERbPqgrWLiczzi07JaXdM4oIJ+0hkv1CSTLMXbUj21oLSzdNFYxeO5LMl/rQ47W9JnETdafsGixISoNOpC9DsgfFJKvzRvV+oa31m99q2esRjrccFU8M722IXTrXT3dGrkppxl8azi2HwDfAQqRXfjLtxSMdlfoU5vjkM88DU4qeTaXjleblprnZEAWlfMbEGG22zC5LqQF8xbh7/hG9dO07jIa5bXNQG88VJyMm9niu805hbUx1wC3pTPPPY0Txnh8c0WpX2PRbDWKre6kflftJT1jmOA64Uv1Vx0tte1wN5KyeSHNOtNhm/wtiWflljhWtga3NdZbllvMpPtqa1KrZUHrX0mZ3ONNSrJ3xMVI7jWdVRYqAywnFYA/7pgo0b6An8a/lisXO+NHNX1gcy+xiGYgu4tN2GXJQaD0CGW0dW2FkeGRZhnpOKe45Jyy6Nc4irjLP4l7gJeBA7Pe7sztgFWXiTQPU7H+w6FnDwgcpPSXJOUfk2b8CLIMPr9hiVoYK/2Ve2ZazxsIzUDc8NscXbmNZa0mVyMR6pmLY8lcbZtVQUrLG3sd6OFBc75/WFkTWHGKeaNkOuiTfAkKOrm9OoDWOTbYJ1bvxKedxsx/myjTJwvaXjJgO5uI5JjR1yYHscGjZA7owW130/rx7PW2GdHF5zkR9evvpmsRQVOU4ln1q8bc1mbI2X3j7PoSS7vb4RUzaHHkXu9RDISVweYadvj7LiEohbXqvrCckkda8y5CSRjz3+lLsjObnIgijkD09/bHFY8rfkm+W0F4ldEPHb18APMdsWocgB3T5Jtfxr+QYJICSPlMzyqw3FBxivKiRhWDI0+XE/ybDEenhBpyRUX0UwedkniHNtGlv+9rFqYxh8a+CRfMjj4BtbywU2zLEnPcmXx+X1b/km1BBPM73FJePKEXH2yTNgUHSjrz5uKTFbzIDrZqd9BnujXuRPjb1Nhk8XT8PGbMqvriBuQa9d0Ovf72TOCi9xnqjsrB2wBjb9xXAYy2xT+bZSiJfT+fxeiyXHFeT8fnkHH1frGn1AeWB52WxiW866jVa8im5na8CxcAoLPBHd55icRX2HcuE7Wup8CrkBsenFoNib7YNvKy3rWPT19EBskln92KHZst1dWyCzYK6TEUOVoNcezScP6dtxk83bbajrRZ4ovdZrWYixX88iP2CNzgFW8drbizNigdcx443l/xoTzAlQ3Ae7db1Z7I0Pdv0CO8vNY/QDtqvd6ZsSSw5zMgaYHOR4xLT6V3SBPWkT4wwYyEnztVkX1G8rrbIjt0RItUHwr2NXhnT1xZEWZ5NVcfXy3Tj0xeW4ls2gEebtZV1HHgg2Th7MtU0XX7sexXxQj/v16mOKa0jHwbcQGx6GeeX5IE8Mb9J4btj6kU+TbRjrOcNG/fZxT+MbWDtsIveT/uqXCEab/HXN4C6fIDvlkCxfj1e8EdPDtWSRftEbfHJ40eG5IOPCYrggy2iKItBImkSGCByLANf5sYhxPBEgAkTgKhHwTeRVukCjJyHA5nASsDcnlkXjhYeUzeGFB+jizbM7uP4u68WbTQOJABEgAkTgBATYHJ4A2p1MYXN4J4Gmm0SACBABIkAEiAARIAJEgAgQgRECbA5H6PAcESACRIAIEAEiQASIABEgAkTgThBgc3gngaabRIAIEAEiQASIABEgAkSACBCBEQJsDkfo8BwRIAJEgAgQASJABIgAESACROBOEGBzeCeBpptEgAgQASJABIgAESACRIAIEIERAmwOR+jwHBEgAkSACBABIkAEiAARIAJE4E4QYHN4J4Gmm0SACBABIvBJCOgPX9cfSf4krVRDBIjAFSNgPyckP/guPzNhPxR/xS7R9CtFgM3hlQaOZn81Au/Lfve4vP6b7ODvBUE88DcxcRuGyKZeCB9el//C8eHuOYruc8gYGsmTBYFPxPpy1qD85mjNDQUL2LgcW8Eobm5GQOK32+2W3cP/Lv/7MKGIl3VzbG7cbH1n4CBXd2YcOJx+e/fTfzd1qx+amySO47V6wMkznha85IZSwm23m3hz6RPz8hkB+pAozLm4/SGhNzyZzeENB5euzUTAN4czNV2d7I0XZzaHVxfZ4w3+xCKEF/zjw8MZpyBwuPk/Raqbw+bQwTFjR64/n964znDkFJmfmJdPMW/GHF4fjkOVzeFxeB03OhfJrz8f011GvEOlyf9xedS7j/npiS5YuyOJT1SkEcnHn/f1iZXKf1weH+RcvsuEMnZwR1P1vS6vdsdTxuv8JBeTpL3aoHdG0eboPeja/3xdHtEGuOup8p7fy2xZpEn2rp+cW/YWCemVi54MlF9fywAMBUuwB8SmTfCrfffW7uyJH+muY0k8o5iL9IOyzRrUAXG00+XTj2vbmweD7u3x6uCW/dxDLBPWON44BndAiw2Py/75Ee6O47wQH+Dp/nlfuS5uFXlyFx/XTAEobZxDxuJ5d5BbqhPWZxezZCLyFtdj8ESsqPnA1pwMAh9LPpDjgtHDhrXfGqfKE8ceH1IeW8d5e7w25ZaMUzNvmj+WDyHmZQ2OsFB/6v/cnHzHvsQVuVVwTliUMRqL+uSh55/qyfhJbFt6LZ9V2dVO25J5Nm7IEbBd1wzg1Fsz0UbRuUlf5o29AYD5Pvn5Wvn6/J7eGND4VdzMP/xE3QmTAfYHOIOyKm5Znl676nWwjoW8VQxLcywG+7+9TXVuWBO9NQtxEpkl9gHTor6xgZyrvnVi181BjZwyzCf7Za91R70GmmkFg4c0ptjUk2cTy6fH2OV1h1crPpYH0zmx5fF5n+os4VxeB4iZ1QLF7h3WJcmWmvtyvDdxBjHdyIcBRm37wjWw5KkCZt0A2fE66vAY1XwhJxS+lqedaR2V45nHB+vO1rhsucYQcucSdFlMZbiO/WmZqLqO/gkf67h2PMtMwMxdU8sA2/CcLf7b6Sv8ZHM4M2hGrNyIKEHtIq1JDi6OeWwiVSZanqdJAWXY4nVzLClWmWt9dvExIufkqrY0ti0RNBuplPhsEaiNlpjyQrclqnag/YZBtB9joTbVJL3CoCMDdaViWfxK/vqLVMUJ1VqB3YqDGxeKwpJsRjF3/voYe9n5wm64R67AYMTFmgaLCQyTSGqBZue2xWuAW/bFMFV5LiaZTzoOtwMHdc5AT7bb6TGeqWyL4wjP5PvHZOTXYC0mxa+B7Rkjw9y45ezImLm1Gue5QCZ9JrPyveGjxSOvpTQn42QY6jlc+601F7Ed+DyKF+oa5ZbsvxVta2zaMS9rsGWDYeGwjMUEYit4mp4U+xQ3HCPCYNzAP7c+su8tHqTGrepFc9c42FrCUWaTncuxM/8VW5Ofz2VORxs36xO/Tb69Lo4yjWshrqrP1lNwofLarms1jxv3HfZBNtqu28EGtxbMdo2f8T9hYzHy5iEHYNvhIMc9zmZ38c3FIrxm72R57W7Pjatc7PqccTK/XMz1XM4FeQ2ZzW5cxslhmOPY0pt0DfKTcyhc+5wd1T+ZIrrMDycC/FC7m7H3+bxld8u/0pgc5Ezgj4s1cMatlz5Gap/pzDFM9m3EpJUTDReNp8U94u+QTTd2bM1GnM0+lZe53+JKS28eZ/HE3OC4t7puJMyMp95avydyUH7aTrEojZ+LUz8eXnLADP2PA69on83hzGA5ookiWMhKoLog7e6+NVSpSJDzMEdthf2V/OCM6HALturDhVKbqDDfJa5wLtoPiSL6oolNE0pImKFIchq68kcyfNJ18twOYOiO16cs6zjEgV5GwXMVExiH8RBx0ceiQuZYcScHt/o1GBd1bYpXMShvgC/RT/QNZeM2jhGJcb+oi3oqb5Gr7oI5koc2yDjY3yxjcwyi7VYgml7YL/6POF1ASRtguzsTj+s+XqArhoWrAYsVH4vMtX1ON+aoMiePiPswseYFOCibaLueqpiO4lX8ijpX8qq+MkcP4fqpOuto2cIxst8b54tP0WPNrs4qhUqU18N6fdzbDlYWXuVjsH8Iv2rj6fowrt5vjxWOA+sbGNvZiBXIW8W4nos4VZuCj4E3cZ5Z4TkANgHOdazxua4/dw53cD5u45iw3bMxHi8+R5xQD/ovx62IF52DcxjHll4twnF+kQe5MPhVdwHfwVqr472txW8dgLLW69MaBxla5wWONG8019hG/6tdlY8OyzrAYyzHS6yiDcm+ZC/IRVlxu4l/tRuHYzzxuOc9nvG42ji1L/DI4YM2hXGIUY0F6rTtqNuOrz9Rd90eYIv2ibgSjyhbYnBKvRblXNY+m8OZ8YjkwuSGSdmaMGvkxCabuyIkJAMbAz7oQrJXruA1iliI18UBulROWiz22ox+2p0i0BPlFXtlTMs3aA6dbLHxKPkN+4qMuEjBYE0+9ZUsex0URuimJsZWHOJAjCU2uauY1HgdJxttTdt48SrmqD4/tnkXLcRkW7ysgUX5+YIe/UT5eA62V/7jnF589DhcxKI85Lpuw1gD6RwyNN54ATDhR2AEtuvs4v+I06BHNqMvdnp1vPIursfu2i/2ZKHF3mSf45Xqa/Ai2lFkiMyGn6217+bIvOqLcqgT8+JXtAHmG1z2WebogeCn2pF9LDkhjHGy+/55PbG487yKY5OtDdkl75k3eaT8GUOx1+fjTfgBFqfkatWR4+p9qXEUFTjOeyDjPCbZs+XVffkLyOtyJuGGubPqDbEMMrztaCHOw+3sU+Zn0bniY5UlOhzGFjeZY9t1eNha+5YGrI8Xn4OPLjfgObU52GZP0oNtRXZsnPC6uMIAYhe8Stcmr7vkHrUxrskgAPzwMWzECuqSEi/HTT/Hcpi3p15znL4Vhu2bdcX6LkbJBscTXPtbMImyASPzyclv5eXuNXC9XgsOohd4XI6L02hDGIfn3Jwyr8OPAuZ6A+XU7RhfyM0RM5frUX7y3+HnXk3GsdezzeZwZqyU/HB3DPfjYmjt6527mERhHxeX+BHJjDJxG5O2zAM5muhhMdfEH4CKukCGu+BkXakBXC/EILXuRvllfySjdy4mL8CwakxbAacVpmW8l1GSDcZYxuL+ibKLytVG8rde1Hr+N7ixKV4D3HC+2IW+4TncxjFuzhF6QF6XmxEnmKOnYH+zjNUTI1Nymu06u+AxiJupsU+w3Q7pZzyu+1CMwJouXJWJOE/swacFZT/ad5rPm3ML2l5sTL6M4lX8Qp/CfMUK/lfm6LHoVx1Y9QYswNaRf14PFCArXiX5dU2bDUGvHW59Fl7lk7Bf/VhP9Daerk/k2A0/L1PwrdfDvi093eE4YO/yrLgG57wNCfvm9SjwJs6riKEduF1HWMGtTUSQW0aV9ZWPQJxcPi0T1hs9G+Nx2Vefoy2oE8/h8ag2nMM4tvQql1G2yNP9yoWqIuFZ+d/Dd3BzAXR5e7ysQ3Y3ORLXK+gSH6q+mEuA+2FO8T0e133ByNtdxjc20Cd3uik7NbU6B64PXRnR96Ig2pf2NYaBKxUf40BurGVc89qDmIpCkK36o+5i1GoDddfttrzjOAuxXWm93gNsDmfGThdkfc/ZLcKwaFKytLulibB4gbVtlWEX2Ljg3QLLMmzRB311cfhF6mzMd4pMt4cqJT9L4iKvLG61wxJ/GmcyWvJNhpOvMgwPvKDni4L5lW00GWqH3fUqyTXZYHf7HIZOqWFhen0c/FCfEAqeo5jnc8mOkWzvr+cGWpFkmO+pkTXbcZxsnxKvAW4t7llM8BxuZxuc/zpnoCdcDBzPVLbxLGNmNjj3PU6nyWjFRHQPbHe+G7fqXWYsADevC4chroVGfA2LjWs/8qeupYSfrR/jku379dTHuuWj5QUXLsWtkzcHMS9rsMV1w8IpCpzBnBNiJ7andZb8M7vR95F/1bZkAO4rzmafy53e2Jb8svbdUORkttfkb8IvCdusz9mcdBs+6Cc+ARYNKt9ytbO/t9b62FuONDzQdt22wjNzC3OQ8TjJqOvT244GJjuiDO+PjLHchPGANau4mb4QJzlnMUPVYRv9xHXZ9TnwGnOQ979hs2EYbHN+o08Z6xSTJM/i4zjvfEo42DiXk4LtoreMQxkwzscQ4+b518Vr1RB5GR6zxFv017iV5B/gwyBv6fzCB8ASfBUIupgMrqMt2bZ+EVbZ1rjZmlXdyScXT+VA9jVwxcUDbdc5tXZBPW5O8MPxIxob9lFO3Q7rDnyy9WQcU5tKDLxwPedwqb74kdezx+ZwZqwy+es3OtqFIDxpMRvyAtHH046EKRnI8cef8s1veeHh4lIZmej6Wsvj8vp3/xtE6+KIRWvVpc1eWNxmqn6q/vR4P30jWPVPF4vasV/0Wwdt4ViCya/e9JJQumjBN6I5PHKSaspADOoCTck52/rzPbye5LzKT2DzawtBbx1pempy1CQyirlM7sa4Sk5bJt9sLn8F6QeivOd3uHPph+neCfHq4ha553hiHBp9W+lu0W9Mzfh29YjhaLfyv/LM4WnFS8P1s8jIFyZ7fcRf+C1OwK2IUdx3mI04HRwCPMoNmYCTO97QYxc8V9zouNaaS1w0f0XVafEyXghW++U92FW8zDg186YMknm29iHm65yW1zCMKTrKBtj0/OryAvroCnXA3+XjXNwl27x/zjb3hEEM8WsdcS5m5o2aV+vTuThG9wtGj8vrz9BolHM5DiC78AKOFawhh0ed1S6f773fgnW+dhmHujI7mPSwP8CZal+96WC4F7yzDPtebW87eozrYb1teDkswe66NtHH/vW6b0eyqe2bzyfFluBjus7atzzbWsj51dlc4+bn+CZLLKr2JC543RvWJPIzXNO6axLDAz567DBWI7sHHMlrdQtn0NbHWHM4bOF61jvucPVrH/W4PIWYyDbITnnL9FrcD+RllYecrfWVrSXjfsFHYgk1lIsHxClxqnXtSXwqHBIbBvxw8oP/hpPIquMyJ/Rb0BM3i+0Bs7pug2Dd9bg4e1vDr+AYm8OZQULyn0uPLgxb1OcSegY55/Y1JJXjLPxveX22C95xMz88+tw4fNigjoBrsbNjPg9PQOBDa+6M9pCbZwQzF+vdJuysqr5O2C1z5t/XZd/4ev6vA5uaz4IA41ph/LJrj79hUA3iFpvDmRw4ywXL35HofZHKTDc2yT6Lr6Dpo8ni3PaAacPNr9I7NKpx8lrsbJjOQ5MQ+OiaO5dZ5ObHkFT87AlN/W23jwm98Nk3zJn/fu6X138vHH+adxIC+jTr1m/cbEHmy649bA574WFz2EOGx68WAXt9oPvK6tV6RsOJABEgAkSACBCB60ag3vR3rzFet1O0/oYQYHN4Q8GkK0SACBABIkAEiAARIAJEgAgQgVMRYHN4KnKcRwSIABEgAkSACBABIkAEiAARuCEE2BzeUDDpChEgAkSACBABIkAEiAARIAJE4FQE2ByeihznEQEiQASIABEgAkSACBABIkAEbggBNoc3FEy6QgSIABEgAkSACBABIkAEiAAROBUBNoenIsd5RIAIEAEiQASIABEgAkSACBCBG0KAzeGtBBN/5wm3g3/6Mw8PR/5AvPwGzW6/vAdZx+yepPcYBXc0dutvI20d9/68Wx4P/ciy49TM3wZKsg/ac6Px3hSLM/t+fp3vy363Wz70Fe1nyDnngmnrOjqXvkNyzh+vQxp5nggQASJABO4JATaHtxJtV7z3nTqpSTtDoXaS3r4bd31ma7G6ddymYnMjvz4eGDaHn90Yb4r/UYFlc3gUXEcOPn+8jjSAw4kAESACROCmEWBzODu82ljtlp3cSf/5ujzaEzg5Dk/wfCGfiiuZo/+e8zO7XKDvn/Pxcncex++X91jIFxsel/3zI+jFebvF/Wi8ysh2P4cnh0XeDmQ1gCzjot5lkQLH/BsVw4pLxqGOqz8gqzIMR7X5cXl8ENkVhzVe2dZi3wE/zLWM6+vPx2z74/L6L8h6eFwexVazZyC/7dcGXFCm8QKPtXTncahzt6u2Hy42kSfyBBmeHIruh9fltcTTP2HeGucy7mG/7B/wSaaPdftpVLZH11fiVeXKsvT8XrIfiUNJb5WPPsPaaPkLawX1Lq24LCnGj8/7xBXgC9ppcgouO8TECBk/IS6ntEZBAAAKSUlEQVT5FMYW5ffj38E7rq2oGrF83vsnh4DP8A0EGLcPOaeLA2JsuRV8txxT4zryb7+scgXKt/UGdm71pzcO4yNma4xMD+ouuT7zpzzpH/kDuXAVrw6/3bjMp866cvwG+4ofmrdz7jeflo7eg/xyhnGHCBABIkAEJiLA5nAiuEu+EFphkgqcXDzLhd8KeVcUpAuyFYdLvmhqE5KLEjun8kyGnsuyV9v2ilcuJHTOQE+22+mxwgvtsYKwXPgBzGxr8h315gLI2W32wXzZdBhJUZEaGvW76EzFhupxOpclYVeL6jVe1iBl+4rMYIftZvnWRGshZ35oIWfyTLftB/kdv1LxjjFs4+IKSOez1+PGqX21cUMMZdtiba6uPlWPzU96FHOVW+ej3KP8cfxqy0vFqGGKFma/nYw8buC32ufiZ3gneQUT9R3l+XGl8EddOCesE8XI2WryfOG/FT+HhNy4KDyuayZhZ/HLNyHyOIy/2uYwyX47nqHGtL2eZz6l9WlYunFOTGNcxsjFydkB/uUcanp0juGgc5LvTr/Gy/tn83Gck9XKjYZXwx+X+1vjxIZyHNaV44/PmWJby063PhxOzjBBannFGzBBVx09WFdhzjpGGVdrBjUWA71De6tF3CICRIAIEIH5CLA5nIkxFouiRy+AuUBzRUG4Y+xsggIoXJBd84SycTvocXOGemohiXa7IkDmR/kmMx4v+6FAsCcp5U64CfCFcj0at5K82hxaUWJ4w36xwRdbKhHORQ1lP+KvhU+WL/Ot4LdmvxR9Hics7orsBg5aoFqBCwNdsRrtBjvcOJgvm3iuZ4+bgpzKxWVtDttciXJ7/rTGpeI3NQxWYFtRW/fNwu2cqn4Db1RM3DfZ8glrEPDVM1Cob10nHgevF7HAbdNVGz+0D7bRvsgNGFZxwHXmbTG8NRYr7oMw5IMeBjmON401aWKa44RXSZY1QzK84gJxMTlR//A4yI7+AXaIFcZYRcd5pg/jIMeifzYO5/fGyFiwp/oPOKu8gT+mr/k5xrGNfRAE9jm8Qq7xs0Av4uAHcY8IEAEiQAQ+GQE2hzMBhwumqsGLfzjnLqhyLr9KmT5zA4LzRSDKwHOwrXI7TYrOb+lR/e2CX+W5OfkVzoBjX28qYLx/8NpekQOFTjmWN9S/+lqqyKrNYdtunQl4bfXDqQZc03EobkC2nOvL7/m1Pq4yDjSHK5zBRj+/gXuWXYtN563fAbnWNJTmEPlVxm31Zz2u2iP4+jhLrLFYTUYmGdg0Vhk9v5PsOifI0DWAuuEmAPhb9fgGoB9/bG6UKfoUx+yo8ta4+Hj68NS9yskqq+px624V/4gJ2FriWjXVrTgPsIy5BBvtKiDlMri5UpupRvyEE7Yu1K4cpxKXaI8pWh8vGEX/YD073I/yB/kj23CjykyCxtrpyU2wi1f2r9icn8oZd0RkORf9KfryRo/fbhzEMR8v8g/YV+JjudDi1dN7yF5nF3eIABEgAkRgJgJsDmeiGwsJvADKuVLMpIt6uqDGAkb2T28OXQMpvha9R+gBu2MB04Wv6Mkjyv664OjJwEKkjknza4MA8sBOHR/3iw3+yVmVfWBL5UGBh/sgW6SMcGr7BYVdNkPGYZFl1jnZQW9q+FODjON0G/iG53r2mD79dFgC5qIfi3rYj3J7/rTGpfgC950xcafNCZHR9xt8UHG4P1gbAW9nO2CE+EZr3RxoDmQcnsNtO9fiQ1v++/L6ULnaxwF1IgYiFXAF36I+G1ebFJAT5+l+tavIao6rTw6r7DJjtVExB/1uVDw+8A/iXOX6GwAquucPzHcmNHZMvsS7+AnrSKeAvMqLI/xxegf8duMAHz0O+wP7zB8TVfcHemP8bTI/iQARIAJE4NMRYHM4FfJ0MbRGRi7qpZDWi6sVSWlcqznUC+tHmkN3dzld3FNT6i/UTg8WheUOcX4iF4oh9Qmajgonyke9uXEqczxGdX573P5vKFBksOJ4/JPD9GTC8M/NebEJrYBt9b0+uVLMbA4UbzpjgJObB/FJMUCcoVgEM3Sc3YnPNqWiMuOcz+G4lk5rNGqxCUripurJtmFDg/gbV1C/NY7OziBcZXi/3Zpp+ooyPL8wtiO/3TnnB3I389DWYIizww4x0u02v9wcxDI0h2rfFvwQCtnOvuDNJ+dr5lwr/m49q5zsA/oW9dnNEFwL9jQ/63LxtHFOjl/Xakf2vWW7ygs2yTinx/Em+bHZP4iz6jdZm/1pccjWj3O8/G10uTaUGNp4z2/kz2Z/nMqWbZWrdajXi+sKb0LZzYHCN8f9pOvoa1s1gltEgAgQASLwyQiwOZwNuF4o0+tF6RsK7YKfGxJ9bW6/6DdguqI6z/kpTwA6zQ8UMPblN1pghKKpFItSsMk3z+XiTIue/NreI+oRTNDun6/L3opUOWfFZ7a9+/uHMA71iohU/CUfrUhthQLHWeHn9D+/16ct0e+47/Dq++GLQbAqy6vfaFhjqTbFohf8d4Vf8L/4NTgOVlT8rWBFPWiDHddxuUizmAEWsdhEe6pemy8+p6JRG1KVk77pU1+BQ/1b/XHj0lqoNuQCtfD0v2pS2cr26DdcJk6VJzDWCDX8LkWtnHt+revMmp2iE9Yg4CbqEbu0ZgInsgyMv5uDWIJe87/J/6i34GAbKVYmIx21+Ak+++Ud/GjZY68zFhzjWjJV5RPilL+91s/Nax3zSJmbN1RHGvcYcg7igPkCc1hpTlQc2FMaVTnROR79A3xKvrH1BnZiXKM7mEPbr5TajGyTyV/Z/7i8/l2/6foc8ULcVrnfzDJuNtcV4ujtk+lV/mnXNu9jMYgbRIAIEAEi8AkIsDn8BJCLiliAlBPcuCwE3pe9K9SydWeO3/szNBKXAsC/r8u+8eVAXfOwiO4Omn0iFaqlGTlFnca29fTkFGGfMOfYOH2CSVRxawicYV3lBtPfqLg1nOgPESACROC2EGBz+JnxPHNz8Zmm35Wuv/f1b3/Q8bPHT57mXFZD8t/Pff3tRvS9t33FzaF7GuWeLvWcvZzjR8fpckynJVeDwInNoeSE8sS8/XfTVwMBDSUCRIAI3CECbA7vMOh0+QIQsAIqvIZ5AZbRBCJABIgAESACRIAIEIE7RYDN4Z0Gnm4TASJABIgAESACRIAIEAEiQAQQATaHiAa3iQARIAJEgAgQASJABIgAESACd4oAm8M7DTzdJgJEgAgQASJABIgAESACRIAIIAJsDhENbhMBIkAEiAARIAJEgAgQASJABO4UATaHdxp4uk0EiAARIAJEgAgQASJABIgAEUAE2BwiGtwmAkSACBABIkAEiAARIAJEgAjcKQJsDu808HSbCBABIkAEiAARIAJEgAgQASKACLA5RDS4TQSIABEgAkSACBABIkAEiAARuFME2BzeaeDpNhEgAkSACBABIkAEiAARIAJEABFgc4hocJsIEAEiQASIABEgAkSACBABInCnCLA5vNPA020iQASIABEgAkSACBABIkAEiAAi8P8B5Xcvb0k15LQAAAAASUVORK5CYII=) ###Code carne = str(input("Qual carne deseja compra lembrando que você só pode comprar uma tipo de carne F-File duplo A-Alcatra P-Picanha ").lower()) kg = int(input("Digite a quantidade de carne ")) cartao = str(input("Você vai pagar no T-Cartao Tabajara ou a D-dinheiro ").lower()) if(cartao == "t"): if(carne =="f"): if(kg < 5): calc = kg * 4.90 desc = calc - (calc * 0.10) print("Você comprou a carne File Duplo seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc)) else: calc = kg * 5.80 desc = calc - (calc * 0.10) print("Você comprou a carne File Duplo seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc)) if(carne =="a"): if(kg < 5): calc = kg * 5.90 desc = calc - (calc * 0.10) print("Você comprou a carne Alcatra seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc)) else: calc = kg * 6.80 desc = calc - (calc * 0.10) print("Você comprou a carne Alcatra seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc)) elif(carne =="p"): if(kg < 5): calc = kg * 6.90 desc = calc - (calc * 0.10) print("Você comprou a carne Picanha seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc)) else: calc = kg * 7.80 desc = calc - (calc * 0.10) print("Você comprou a carne Picanha seu total é {0} R$ como você vai pagar no cartão tabajara você tem um desconto de 10 porcento seu valor é {1}R$".format(calc,desc)) else: print("Você digitou um numero errado") else: if(carne == "f"): if(kg < 5): calc = kg * 4.90 print("Você comprou a carne File Duplo seu total é {0} R$".format(calc)) else: calc = kg * 5.80 print("Você comprou a carne File Duplo seu total é {0} R$".format(calc)) elif(carne =="a"): if(kg < 5): calc = kg * 5.90 print("Você comprou a carne Alcatra seu total é {0} R$".format(calc)) else: calc = kg * 6.80 desc = calc - (calc * 0.10) print("Você comprou a carne Alcatra seu total é {0} R$".format(calc)) elif(carne =="p"): if(kg < 5): calc = kg * 6.90 print("Você comprou a carne Picanha seu total é {0} R$".format(calc)) else: calc = kg * 7.80 print("Você comprou a carne Picanha seu total é {0} R$".format(calc)) else: print("Você digitou um numero errado") ###Output _____no_output_____
ProcTrack-BarPlots.ipynb
###Markdown Create Target of Interest ###Code net.create_target(target_name='Creative Cloud.exe', new_node=False) net.assign_attribute('targetIDX') df = net._return_df() df df2 = df[df['EventCls']== 'Registry'] df2 import seaborn as sb; import numpy as np df2['Dur'] = df2['Dur'].astype(np.float) sb.boxplot(x='Oper', y='Dur', hue='Cat', data=df2, showfliers=False) net._unique('Oper') net._return_df() net.plot_network(save=True) ## Add the labels to the ndes labels = dict(zip(GA.nodes(),df['ProcName'])) GA = nx.relabel_nodes(GA,labels) from Sandy.graphing import graph #bb = nx.betweenness_centrality(GA) attr = dict(zip(GA.nodes(),df['Target'])) nx.set_node_attributes(GA, attr,'Time') ## Add the colormapping for attributes #attr2 = dict(zip(GA.nodes(),df['Size'])) #nx.set_node_attributes(GA, attr2,'Size') n_colors, c_map, pal = graph.create_color_map(GA,'Time',sb_palette="RdBu_r") #n_colors2, c_map2, pal = graph.create_color_map(GA,'Size',sb_palette="Greys") import seaborn as sb nx.info(GA) attributes = [GA.node[label]['Time'] for label in GA.nodes()] attributes_unique = list(set(attributes)) palette = sb.color_palette("RdBu_r", 2).as_hex() color_map = dict(zip(attributes_unique, palette)) node_colors = [color_map[attribute] for attribute in attributes] fig = plt.figure() layout = nx.spring_layout(GA) nx.draw_networkx_nodes(GA,pos=layout,node_size=20,alpha=0.8,node_color=n fig = plt.figure() layout = nx.spring_layout(GA) nx.draw_networkx_nodes(GA,pos=layout,node_size=20,alpha=1,node_color=node_colors) nx.draw_networkx_edges(GA,pos=layout,width=0.8,style='dotted',edge_color='red') #edge_cmap=plt.cm.Blues) nx.draw_networkx_labels(GA,pos=layout,font_size=4) plt.show() fig.savefig('testTarget.png',format='png', dpi=1000, bbox_inches = 'tight') ###Output _____no_output_____
docs/notebooks/examples/1D_simulation(macro_amorphous)/plot_0_protein_GB1.ipynb
###Markdown Protein GB1, ¹³C and ¹⁵N (I=1/2)¹³C/¹⁵N (I=1/2) spinning sideband simulation. The following is the spinning sideband simulation of a macromolecule, protein GB1. The$^{13}\text{C}$ and $^{15}\text{N}$ CSA tensor parameters were obtainedfrom Hung `et al.` [f1]_, which consists of 42 $^{13}\text{C}\alpha$,44 $^{13}\text{CO}$, and 44 $^{15}\text{NH}$ tensors. In the followingexample, instead of creating 130 spin systems, we download the spin systems froma remote file and load it directly to the Simulator object. ###Code import matplotlib.pyplot as plt from mrsimulator import Simulator from mrsimulator.methods import BlochDecaySpectrum from mrsimulator import signal_processing as sp ###Output _____no_output_____ ###Markdown Create the Simulator object and load the spin systems from an external file. ###Code sim = Simulator() file_ = "https://sandbox.zenodo.org/record/687656/files/protein_GB1_15N_13CA_13CO.mrsys" sim.load_spin_systems(file_) # load the spin systems. print(f"number of spin systems = {len(sim.spin_systems)}") all_sites = sim.sites().to_pd() all_sites.head() ###Output _____no_output_____ ###Markdown Create a $^{13}\text{C}$ Bloch decay spectrum method. ###Code method_13C = BlochDecaySpectrum( channels=["13C"], magnetic_flux_density=11.74, # in T rotor_frequency=3000, # in Hz spectral_dimensions=[ dict( count=8192, spectral_width=5e4, # in Hz reference_offset=2e4, # in Hz label=r"$^{13}$C resonances", ) ], ) ###Output _____no_output_____ ###Markdown Since the spin systems contain both $^{13}\text{C}$ and $^{15}\text{N}$sites, let's also create a $^{15}\text{N}$ Bloch decay spectrum method. ###Code method_15N = BlochDecaySpectrum( channels=["15N"], magnetic_flux_density=11.74, # in T rotor_frequency=3000, # in Hz spectral_dimensions=[ dict( count=8192, spectral_width=4e4, # in Hz reference_offset=7e3, # in Hz label=r"$^{15}$N resonances", ) ], ) ###Output _____no_output_____ ###Markdown Add the methods to the Simulator object and run the simulation ###Code # Add the methods. sim.methods = [method_13C, method_15N] # Run the simulation. sim.run() # Get the simulation data from the respective methods. data_13C = sim.methods[0].simulation # method at index 0 is 13C Bloch decay method. data_15N = sim.methods[1].simulation # method at index 1 is 15N Bloch decay method. ###Output _____no_output_____ ###Markdown Add post-simulation signal processing. ###Code processor = sp.SignalProcessor( operations=[sp.IFFT(), sp.apodization.Exponential(FWHM="10 Hz"), sp.FFT()] ) # apply post-simulation processing to data_13C processed_data_13C = processor.apply_operations(data=data_13C).real # apply post-simulation processing to data_15N processed_data_15N = processor.apply_operations(data=data_15N).real ###Output _____no_output_____ ###Markdown The plot of the simulation after signal processing. ###Code fig, ax = plt.subplots( 1, 2, subplot_kw={"projection": "csdm"}, sharey=True, figsize=(9, 4) ) ax[0].plot(processed_data_13C, color="black", linewidth=0.5) ax[0].invert_xaxis() ax[1].plot(processed_data_15N, color="black", linewidth=0.5) ax[1].set_ylabel(None) ax[1].invert_xaxis() plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Protein GB1, ¹³C and ¹⁵N (I=1/2)¹³C/¹⁵N (I=1/2) spinning sideband simulation. The following is the spinning sideband simulation of a macromolecule, protein GB1. The$^{13}\text{C}$ and $^{15}\text{N}$ CSA tensor parameters were obtainedfrom Hung `et al.` [f1]_, which consists of 42 $^{13}\text{C}\alpha$,44 $^{13}\text{CO}$, and 44 $^{15}\text{NH}$ tensors. In the followingexample, instead of creating 130 spin systems, we download the spin systems froma remote file and load it directly to the Simulator object. ###Code import matplotlib.pyplot as plt from mrsimulator import Simulator from mrsimulator.methods import BlochDecaySpectrum from mrsimulator import signal_processing as sp ###Output _____no_output_____ ###Markdown Create the Simulator object and load the spin systems from an external file. ###Code sim = Simulator() file_ = "https://sandbox.zenodo.org/record/687656/files/protein_GB1_15N_13CA_13CO.mrsys" sim.load_spin_systems(file_) # load the spin systems. print(f"number of spin systems = {len(sim.spin_systems)}") all_sites = sim.sites().to_pd() all_sites.head() ###Output _____no_output_____ ###Markdown Create a $^{13}\text{C}$ Bloch decay spectrum method. ###Code method_13C = BlochDecaySpectrum( channels=["13C"], magnetic_flux_density=11.74, # in T rotor_frequency=3000, # in Hz spectral_dimensions=[ { "count": 8192, "spectral_width": 5e4, # in Hz "reference_offset": 2e4, # in Hz "label": r"$^{13}$C resonances", } ], ) ###Output _____no_output_____ ###Markdown Since the spin systems contain both $^{13}\text{C}$ and $^{15}\text{N}$sites, let's also create a $^{15}\text{N}$ Bloch decay spectrum method. ###Code method_15N = BlochDecaySpectrum( channels=["15N"], magnetic_flux_density=11.74, # in T rotor_frequency=3000, # in Hz spectral_dimensions=[ { "count": 8192, "spectral_width": 4e4, # in Hz "reference_offset": 7e3, # in Hz "label": r"$^{15}$N resonances", } ], ) ###Output _____no_output_____ ###Markdown Add the methods to the Simulator object and run the simulation ###Code # Add the methods. sim.methods = [method_13C, method_15N] # Run the simulation. sim.run() # Get the simulation data from the respective methods. data_13C = sim.methods[0].simulation # method at index 0 is 13C Bloch decay method. data_15N = sim.methods[1].simulation # method at index 1 is 15N Bloch decay method. ###Output _____no_output_____ ###Markdown Add post-simulation signal processing. ###Code processor = sp.SignalProcessor( operations=[sp.IFFT(), sp.apodization.Exponential(FWHM="10 Hz"), sp.FFT()] ) # apply post-simulation processing to data_13C processed_data_13C = processor.apply_operations(data=data_13C).real # apply post-simulation processing to data_15N processed_data_15N = processor.apply_operations(data=data_15N).real ###Output _____no_output_____ ###Markdown The plot of the simulation after signal processing. ###Code fig, ax = plt.subplots( 1, 2, subplot_kw={"projection": "csdm"}, sharey=True, figsize=(9, 4) ) ax[0].plot(processed_data_13C, color="black", linewidth=0.5) ax[0].invert_xaxis() ax[1].plot(processed_data_15N, color="black", linewidth=0.5) ax[1].set_ylabel(None) ax[1].invert_xaxis() plt.tight_layout() plt.show() ###Output _____no_output_____
6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/oligopoly/readingCsvOutput_par_corr_BWter.ipynb
###Markdown select rows (time steps) in the databaseactivating the cell below before running the whole program [a:b] => from a+1 to b [:b] => fron init to b [a:] => fron a+1 to end ###Code #ts_df =ts_df [0:45] #str_df=str_df[0:45] ###Output _____no_output_____ ###Markdown ***Parameters*** ###Code par_df.astype(str,errors='ignore') ###Output _____no_output_____ ###Markdown ***Modified parameters*** ###Code modPars_df.astype(str,errors='ignore') ###Output _____no_output_____ ###Markdown ***Time series, data collected at the end of each period*** ###Code if len(ts_df.columns) == 6: ts_df.columns = \ ['unempl.','totalProfit','totalProd.','plannedP.','price','wage'] # to have shorter names if len(ts_df.columns) == 8: ts_df.columns = \ ['unempl.','totalProfit','totalProd.','plannedP.', 'cQ','hPSd','price','wage'] # to have shorter names ts_df ts_df.describe() ts_df.corr(method="pearson").style.format("{:.2}") ###Output _____no_output_____ ###Markdown The origin of the partial_corr source is [https://gist.github.com/fabianp/9396204419c7b638d38f](https://gist.github.com/fabianp/9396204419c7b638d38f)At [http://en.wikipedia.org/wiki/Partial_correlationUsing_linear_regression](http://en.wikipedia.org/wiki/Partial_correlationUsing_linear_regression) we have the explanation of the need of augmenting the data matrix with a 1 to allow for a constant term in the regression. ###Code wn.filterwarnings(action="ignore") # to eliminate a warning about #LAPACK lib np.set_printoptions(precision=2,suppress=True) ts=ts_df.values ts_int = np.hstack((np.ones((ts.shape[0],1)), ts)) out1=partial_corr.partial_corr(ts_int)[1:, 1:] out1 ts=ts_df.drop(columns="plannedP.").values ts_int = np.hstack((np.ones((ts.shape[0],1)), ts)) out2=partial_corr.partial_corr(ts_int)[1:, 1:] out2 ts=ts_df.drop(columns="totalProd.").values ts_int = np.hstack((np.ones((ts.shape[0],1)), ts)) out3=partial_corr.partial_corr(ts_int)[1:, 1:] out3 ts2_df=ts_df if len(ts_df.columns) == 6: ts2_df.columns = \ ['unempl','totalProfit','totalProd','plannedP','price','wage'] if len(ts_df.columns) == 8: ts2_df.columns = \ ['unempl','totalProfit','totalProd','plannedP','cQ','hPSd','price','wage'] result = sm.ols(formula="totalProfit ~ price + wage + totalProd + unempl", \ data=ts2_df).fit() print (result.summary()) ###Output _____no_output_____ ###Markdown ***Structural infos, data collected at the beginning of each period*** ###Code str_df ###Output _____no_output_____ ###Markdown levels of grayhttps://en.wikipedia.org/wiki/Shades_of_gray ###Code myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="*", color=["OrangeRed","LawnGreen","Blue","Violet","lightblue","Pink","Gray","Brown"]) myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ') myPlot.right_ax.set_ylabel('hPriceSd, price, wage') myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5) myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5) myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="", color=["lightgray","Black","Black","Black","Gray","lightgray","lightgray","lightgray"], style=['-', '--', '-.', ':','-', '--', '-.'], linewidth=1.) myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ') myPlot.right_ax.set_ylabel('hPriceSd, price, wage') myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5) myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5) myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="", color=["silver","Black","Black","Black","Gray","slategray","slategray","slategray"], style=['-', '--', '-.', ':','-', '--', '-.'], linewidth=2.) myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ') myPlot.right_ax.set_ylabel('hPriceSd, price, wage') myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5) myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5) str_df.plot(figsize=(11,8),secondary_y='workers',marker="*",color=["r","b"]) str_df.plot(figsize=(11,8),secondary_y='workers',marker="*",color=["black", "lightgrey"]) str_df.plot(figsize=(11,8),linewidth=2.0,secondary_y='workers',marker="*",color=["black", "gray"]) ###Output _____no_output_____ ###Markdown Best solutions to produce a LaTeX table from these data (the example is related to ts_df.corr table): corr=ts_df.corr(method='pearson') print corr.to_latex() "print" to have the output nicely formatted; copy and paste it to LaTeX and the result works. To output is included within: \begin{table}[htbp] ... output above ... \label{a label} \caption{a caption} \end{table}We add also size specifications (\footnotesize in this case) and the usual [htbp] specification with \begin{table}[htbp]Other solutions:1. online [http://www.tablesgenerator.com](http://www.tablesgenerator.com), reading the csv file;2. using a converter as [http://html2latex.sourceforge.net](http://html2latex.sourceforge.net). The first method is applied in the cells below. ###Code corr=ts_df.corr(method='pearson') def ff(x): return '%1.2f' % x if len(ts_df.columns) == 6: print ("\\begin{table}[!htbp]\n{\\footnotesize \center") if len(ts_df.columns) == 8: print ("\\begin{table}[!htbp]\n{\\tiny \center") print (corr.to_latex(formatters=[ff,ff,ff,ff,ff,ff,ff,ff])) print("}\n\\caption{Correlations among the time series of the model,"+\ " with xxx}") print("\\label{correlations xxx}\n\\end{table}") ou=out1 if len(ts_df.columns) == 6: names=['unempl.','totalProfit','totalProd.','plannedP.','price','wage'] if len(ts_df.columns) == 8: names=['unempl.','totalProfit','totalProd.','plannedP.','cQ','hPSd','price','wage'] if len(ts_df.columns) == 6: print ("\\begin{table}[!htbp]\n{\\footnotesize \center") if len(ts_df.columns) == 8: print ("\\begin{table}[!htbp]\n{\\tiny \center") if len(ts_df.columns) == 6: print ("\\begin{tabular}{lrrrrrr}\n\\toprule\n"+\ "{} & unempl. & totalProfit & totalProd. & plannedP. & price & wage \\\\"+\ "\n\\midrule") if len(ts_df.columns) == 8: print ("\\begin{tabular}{lrrrrrrrr}\n\\toprule\n"+\ "{} & unempl. & totalProfit & totalProd. & plannedP. & cQ & hPSd & price & wage \\\\"+\ "\n\\midrule") for i in range(len(ou)): print(names[i], end="") for j in range(len(ou[i])): print(" & %.2f" % ou[i,j], end="") print(" \\\\") print("\\bottomrule\n\\end{tabular}") print("}\n\\caption{Partial correlations among the time series of the model,"+\ " with xxx}") print("\\label{partial correlations xxx}\n\\end{table}") ou=out2 if len(ts_df.columns) == 6: names=['unempl.','totalProfit','totalProd.','price','wage'] if len(ts_df.columns) == 8: names=['unempl.','totalProfit','totalProd.','cQ','hPSd','price','wage'] print ("\\begin{table}[!htbp]\n{\\footnotesize \center") if len(ts_df.columns) == 6: print ("\\begin{tabular}{lrrrrr}\n\\toprule\n"+\ "{} & unempl. & totalProfit & totalProd. & price & wage \\\\"+\ "\n\\midrule") if len(ts_df.columns) == 8: print ("\\begin{tabular}{lrrrrrrr}\n\\toprule\n"+\ "{} & unempl. & totalProfit & totalProd. & cQ & hPSd & price & wage \\\\"+\ "\n\\midrule") for i in range(len(ou)): print(names[i], end="") for j in range(len(ou[i])): print(" & %.2f" % ou[i,j], end="") print(" \\\\") print("\\bottomrule\n\\end{tabular}") print("}\n\\caption{Partial correlations (no plannedProduction) among the time series of the model,"+\ " with xxx}") print("\\label{partial correlations (no plannedP.) xxx}\n\\end{table}") ou=out3 if len(ts_df.columns) == 6: names=['unempl.','totalProfit','plannedP.','price','wage'] if len(ts_df.columns) == 8: names=['unempl.','totalProfit','plannedP.','cQ','hPSd','price','wage'] print ("\\begin{table}[!htbp]\n{\\footnotesize \center") if len(ts_df.columns) == 6: print ("\\begin{tabular}{lrrrrr}\n\\toprule\n"+\ "{} & unempl. & totalProfit & plannedP. & price & wage \\\\"+\ "\n\\midrule") if len(ts_df.columns) == 8: print ("\\begin{tabular}{lrrrrrrr}\n\\toprule\n"+\ "{} & unempl. & totalProfit & plannedP. & cQ & hPSd & price & wage \\\\"+\ "\n\\midrule") for i in range(len(ou)): print(names[i], end="") for j in range(len(ou[i])): print(" & %.2f" % ou[i,j], end="") print(" \\\\") print("\\bottomrule\n\\end{tabular}") print("}\n\\caption{Partial correlations (no totalProduction) among the time series of the model,"+\ " with xxx}") print("\\label{partial correlations (no totalProd.) xxx}\n\\end{table}") ###Output _____no_output_____ ###Markdown *Data from each firm in each period* ###Code if firms: print(firms_df.describe()) else: print('no data for each firm in each period') ###Output _____no_output_____ ###Markdown *Managing parameter list* ###Code ctitle="" if len(par_df.columns)==2: ctitle=par_df.columns[0] if len(par_df.columns)==3: ctitle=par_df.columns[1] if len(ts_df.columns) == 6: parList=par_df[ctitle].tolist() valList=par_df["Values"].tolist() if len(ts_df.columns) == 8: parList=par_df["Parameter internal names"].tolist() valList=par_df["Values"].tolist() # both parList are generated by the 'print' of parameters.py ###Output _____no_output_____ ###Markdown **dictionay of values*****d_val***it comes from the file \*_par.csv coming from the 'print' of parameters.py**NB** the different versions of the model have different parameters output sequences; the main difference is about the 6 time series case and the 8 time series case in file \*_ts.csv, emerging above\[zip() function take iterables (can be zero or more), makes iterator that aggregates elements based on the iterables passed, and returns an iterator of tuples. zip() function take iterables (can be zero or more), makes iterator that aggregates elements based on the iterables passed, and returns an iterator of tuples\] ###Code d_val=dict(zip(parList,valList)) d_val ###Output _____no_output_____ ###Markdown **dictionay of position*****d_pos***the dict of positions (file parPos.csv) comes from a manual work based on the table of parameter definition of appendix B of the book; the goal is that of retrieving the parameters of a specific experiment in dict d_val and assign their values to the correct position in the rows of the table of the values in the different experiments in the parameter value table of Appendix Bthe vector (row) is pre-filled with '-' signs as values not existent in the specific experimentthe case of the par 'checkResConsUnsoldProd' is handled in a special way: the parameter 'checkResConsUnsoldProd' (not affecting the model, but only working on its output) appears 20180829 in 28ter experiment; in the first commit, of 20180830, the name is checkResCons, but quite immediately became checkResConsUnsoldProd; the commit of 20181013 signals the we have the output from parameters.py (the experiment 80 is of 20181009, so without that output); all the experiments from 28ter to 80 have implicitly 'checkResConsUnsoldProd' set to True'w' case is corrected to 'Q'to check for the consistence of the dictionaries, we list unfound parameters in ***d_val*** when searching for values (the master dict is ***d_pos***) ###Code labelsPositions_df= pd.read_csv('labelsPositions.csv') #labelsPositions_df parList2=labelsPositions_df["name"].tolist() posList=labelsPositions_df["position"].tolist() d_pos=dict(zip(parList2,posList)) #d_pos row=['-']*53 # 52 parameters, pos. 0 is used for unuseful values row[44]='51' # as default value for the par 'startHayekianMarket' for old # SMAC versions where it was not defined for _ in range(len(parList)): if parList[_]=='w': row[d_pos['Q']]=d_val[parList[_]] if parList[_] in d_pos: row[d_pos[parList[_]]]=d_val[parList[_]] else: print('not found:',parList[_]) ###Output _____no_output_____ ###Markdown the parameter checkResConsUnsoldProd (not affecting the model, but only working on its output) appears 20180829 in 28ter experiment; in the first commit, of 20180830, the name is checkResCons, but quite immediately became checkResConsUnsoldProd; the commit of 20181013 signals the we have the output from parameters.py (the experiment 80 is of 20181009, so without that output); all the experiments from 28ter to 80 have internally checkResConsUnsoldProd set to Trueso from >= 20180829 to <= 20181009 the val of checkResConsUnsoldProd is True1535414400 is equivalent to 08/28/2018 @ 12:00am (UTC)1539129600 is equivalent to 10/10/2018 @ 12:00am (UTC) ###Code import platform def creation_date(path_to_file): """ Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. """ if platform.system() == 'Windows': return os.path.getctime(path_to_file) else: #MacOs stat = os.stat(path_to_file) try: return stat.st_birthtime except AttributeError: # We're probably on Linux. No easy way to get creation dates here, # so we'll settle for when its content was last modified. return stat.st_mtime #converter https://www.unixtimestamp.com fileTime=creation_date("./"+nameFilePar) if fileTime >= 1535414400 and fileTime <= 1539129600: row[8]='True' #row #for i in range(1,len(row)-1): # print(row[i],"& ",end='') #print(row[-1]) for i in range(1,26): print(row[i],"& ",end='') print(row[26]) for i in range(27,len(row)-1): print(row[i],"& ",end='') if '[' in row[-1]: row[-1]=row[-1][1:5] # [1:5] is to avoid the [ ] output print(row[-1]) ###Output _____no_output_____
phase3/3.1/3_model_training.ipynb
###Markdown cross validation ###Code days = range(1,32) month = 1 for day in days: filename = trips_cluster_data_path + f'/trips_2019_{month}_{day}.csv' print(filename) dfs = [] day_len = [] day_trip_count = [] days = range(1,32) # days = range(1,2) months = range(1,4) # months = range(1,2) for month in months: for day in days: try: sp_seq_df = pd.read_csv(trips_cluster_data_path + f'/lebeled/trips_2019_{month}_{day}.csv', parse_dates=['time_stamp']) except: pass sp_seq_df = sp_seq_df[sp_seq_df['cluster']>-1].sort_values(by=['trip_id', 'time_stamp']) trip_count = sp_seq_df.trip_id.value_counts() trip_count = trip_count[trip_count >= 12] sp_seq_df = sp_seq_df[sp_seq_df.trip_id.isin(trip_count.index)] sp_seq_df['month'] = month sp_seq_df['day'] = day day_len.append(len(sp_seq_df)) day_trip_count.append(len(trip_count)) dfs.append(sp_seq_df) print(day_len) print(day_trip_count) df = pd.concat(dfs, ignore_index=True) df df = df[['trip_id', 'sp', 'day', 'month','route_cluster']] df.head() ###Output _____no_output_____ ###Markdown get rid of mini cluster ###Code import collections import pickle pkl_filename = train_path+"/QBcluster_q1_treshold12.pkl" with open(pkl_filename, 'rb') as file: clusters = pickle.load(file) print("Nb. clusters:", len(clusters)) print("Cluster sizes:", map(len, clusters)) print("Small clusters:", collections.Counter(clusters < 2)[True], end=' /') print("Small clusters:", clusters < 2) print("Streamlines indices of the first cluster:\n", clusters[0].indices) # print("Centroid of the last cluster:\n", clusters[-1].centroid) is_mini_cluster = (clusters == 1) mini_cluster = [] for ci in range(0, len(is_mini_cluster)): if is_mini_cluster[ci]: mini_cluster.append(ci) print(len(mini_cluster)) df.loc[df.route_cluster.isin(mini_cluster), 'route_cluster'] = -1 df.loc[df.route_cluster == -1].sp.value_counts() ###Output _____no_output_____ ###Markdown X and Label shuffle ###Code state_names = [] uniq_r_cluster = df.route_cluster.unique() for cluster in uniq_r_cluster: state_names.append(str(cluster)) state_names[:5] X = [] labels = [] for month in months: for day in days: day_df = df[(df.day == day) & (df.month == month)] uniq_trips = day_df.trip_id.unique() for trip_id in uniq_trips: trip = day_df[day_df.trip_id==trip_id] X.append(trip.sp.to_numpy().astype(str)) # labels.append(['None-start']+[str(l) for l in trip.route_cluster.to_list()]) labels.append([str(l) for l in trip.route_cluster.to_list()]) print(X[0][:5]) print(labels[0][:5]) print(len(X)) print(len(labels)) import random random.seed(42) X2 = X.copy() labels2 = labels.copy() temp = list(zip(X2, labels2)) random.shuffle(temp) X2, labels2 = zip(*temp) X2 = list(X2) labels2 = list(labels2) print(X[0][:5]) print(X2[0][:5]) print(labels[0][:5]) print(labels2[0][:5]) ###Output _____no_output_____ ###Markdown train and test 10-fold cross validata ###Code from collections import Counter import pickle # pkl_filename = data_path+"/osm_json/dbscan.pkl" # with open(pkl_filename, 'rb') as file: # db = pickle.load(file) # sp_list = db.labels_ # sp_list = set(map(str, sp_list)) def score(model, seq, label): p = [] prob,path = model.viterbi(seq) for s in path[1:]: p.append(s[1].name) # c = Counter(p) # tp = 0 # if c[label]: # tp = c[label] # total = sum(c.values()) tp = 0 for _state_name, _label in zip(p, label): if _state_name == _label: tp+=1 total = len(label) # print("best prob : {}% ".format(np.exp(prob)*100)) # print("beat path : {}".format(p)) # print("labels : {}".format(label)) # print("sequen : {}".format(seq)) return tp/total from pomegranate import * total = len(X2) fold = int(total/10) start = 0 starts = [] # [0, 952, 1904, 2856, 3808, 4760, 5712, 6664, 7616, 8568] for i in range(10): starts.append(start) start+=fold accuracy_train = [] accuracy_test = [] for start in starts: # 10 X_train = X2[:start]+X2[start+fold:] X_test = X2[start:start+fold] labels_train = labels2[:start]+labels2[start+fold:] labels_test = labels2[start:start+fold] # บรรทัดนี้ปลอดภัยไว้ก่อน uniq_sp = set() for seq in X_train: for sp in seq: uniq_sp.add(sp) uniq_label = set() # for label in labels_train: for label in labels_train: for uniq in list(set(label)): uniq_label.add(uniq) new_label = [] for label2 in labels_train: new_label.append(['None-start']+label2) labels_train = new_label unseen_sp = sp_list - uniq_sp # print(unseen_sp) X_train.append(np.array(list(unseen_sp))) labels_train.append(['None-start'] + ['-1'] * len(unseen_sp)) model = HiddenMarkovModel.from_samples( DiscreteDistribution, n_components=len(uniq_label), X=X_train, labels=labels_train, algorithm='labeled', state_names=list(uniq_label), inertia=0.001, max_iterations=10, n_jobs=-1 ) accuracy_train_percent = [] for percent in [0.25, 0.5, 0.75, 0.9]: accu = 0 for seq, label in zip(X_train, labels_train): sc = score(model, np.array(seq[:int(len(seq)*percent)]), label[1:]) if sc > 0: # accu += sc accu += 1 train_score = accu/len(X_train) accuracy_train_percent.append(train_score) accuracy_train.append(accuracy_train_percent) accuracy_test_percent = [] for percent in [0.25, 0.5, 0.75, 0.9]: accu = 0 for seq, label in zip(X_test, labels_test): sc = score(model, np.array(seq[:int(len(seq)*percent)]), label[1:]) if sc > 0: # accu += sc accu += 1 test_score = accu/len(X_test) accuracy_test_percent.append(test_score) accuracy_test.append(accuracy_test_percent) print(f'Score {start} - {start+fold}') print(f'Train score') print(f'\t25% trip traverse : {accuracy_train_percent[0]}') print(f'\t50% trip traverse : {accuracy_train_percent[1]}') print(f'\t75% trip traverse : {accuracy_train_percent[2]}') print(f'\t90% trip traverse : {accuracy_train_percent[3]}') print(f'Test score') print(f'\t25% trip traverse : {accuracy_test_percent[0]}') print(f'\t50% trip traverse : {accuracy_test_percent[1]}') print(f'\t75% trip traverse : {accuracy_test_percent[2]}') print(f'\t90% trip traverse : {accuracy_test_percent[3]}') print('all score') print('train score: ') for i in range(4): sum = 0 for fold in accuracy_train: sum += fold[i] print(f'\ttrip traverse {i} : {sum/10}') print('test score: ') for i in range(4): sum = 0 for fold in accuracy_test: sum += fold[i] print(f'\ttrip traverse {i} : {sum/10}') ###Output _____no_output_____
notebook/pretraining_model.ipynb
###Markdown Model config ###Code optimizer_config = { "SGD": { "learning_rate": 1e-1, "end_learning_rate": 1e-3 }, "Adam": { "learning_rate": 1e-3, "end_learning_rate": 1e-5, "weight_decay": 0.01, "epsilon": 1e-8 }, "RAdam": { "learning_rate": 1e-3, "end_learning_rate": 2e-5, "weight_decay": 0.01, "epsilon": 1e-8 } } model_config = { "epochs": 3, "num_train_epochs": 3, "per_gpu_train_batch_size": 16, "per_gpu_eval_batch_size": 32, "batch_size": 128, "max_tokens_length": 512, "threshold": 0.5, "optimizer_method": "Adam", "learning_rate": optimizer_config['Adam']['learning_rate'], "end_learning_rate": optimizer_config['Adam']['end_learning_rate'], "weight_decay": optimizer_config['Adam']['weight_decay'], "epsilon": optimizer_config['Adam']['epsilon'], "lsm": 0.0, "hidden_dropout_prob": 0.1, "max_grad_norm": 1, "use_warmup": True, "n_gpu": torch.cuda.device_count(), "gradient_accumulation_steps": 8, "output_dir": str(pretrain_model_path / run_id), "max_steps": 125000, "logging_steps": 100, "save_steps": 25000, "evaluate_during_training": False, "save_total_limit": 3, "seed": 9527, } ###Output _____no_output_____ ###Markdown Data loader pipeline ###Code #torch.multiprocessing.set_start_method('spawn') #datasets.config.IN_MEMORY_MAX_SIZE @dataclass(eq=False) class GenerateDatasets: #files_list: str = field( # default=None, metadata={"help": "The files list of data path"} #) data_path: str = field( default=None, metadata={"help": "The prefix path of files location"} ) regex_file_format: str = field( default='*.parquet', metadata={"help": "The files format."} ) batch_size: int = field( default=128, metadata={"help": "Batch size"} ) is_training: bool = field( default=True, metadata={"help": "Is use training mode to create data pipeline"} ) device: str = field( default='cpu', metadata={"help": "Which device to use [cpu, cuda]"} ) cache_data_path: str = field( default=None, metadata={"help": "The path to cache data."} ) use_streaming_mode: bool = field( default=False, metadata={"help": "Use streaming mode to download data."} ) def __post_init__(self): self.get_files_list = glob.glob(os.path.join(str(self.data_path), self.regex_file_format)) #self.get_files_list = '/home/jupyter/gogolook/data/jp_data/valid_pretraining_data/valid_all-maxseq512_BG.parquet' self.encoding_columns = ['input_ids', 'token_type_ids', 'attention_mask'] self.target_columns = ['masked_lm_labels', 'next_sentence_labels'] def __call__(self, **kwargs): # data 已經存在 device (cuda) 裡,所以再用 pin_memory 會出現 error # RuntimeError: cannot pin 'torch.cuda.LongTensor' only dense CPU tensors can be pinned dataset = load_dataset('parquet', data_files=self.get_files_list, cache_dir=self.cache_data_path, split='train') dataset.set_format(type='torch', columns=self.encoding_columns + self.target_columns) # , device=self.device #dataset = dataset.rename_column(self.target_column, 'labels') if self.is_training: drop_last = True else: drop_last = False #dataloader = torch.utils.data.DataLoader( # dataset, # batch_size=self.batch_size, # pin_memory=True, # shuffle=True, # drop_last=drop_last, # num_workers=multiprocessing.cpu_count()) return dataset # dataloader get_train_dataset = GenerateDatasets( data_path=training_data_path, batch_size=model_config['batch_size'], is_training=True, device=device, cache_data_path=cache_data_path) get_valid_dataset = GenerateDatasets( data_path=training_data_path, batch_size=model_config['batch_size'], is_training=False, device=device, cache_data_path=cache_data_path) train_dataset = get_train_dataset() valid_dataset = get_valid_dataset() ###Output WARNING:datasets.builder:Using custom data configuration default-597f3863485f3654 WARNING:datasets.builder:Reusing dataset parquet (/home/jupyter/gogolook/data/cache_data_dir/parquet/default-597f3863485f3654/0.0.0/03dda9603b6ba3760d9d286684a3d7d8ec00448c154f765795485acd3229ecba) WARNING:datasets.builder:Using custom data configuration default-597f3863485f3654 WARNING:datasets.builder:Reusing dataset parquet (/home/jupyter/gogolook/data/cache_data_dir/parquet/default-597f3863485f3654/0.0.0/03dda9603b6ba3760d9d286684a3d7d8ec00448c154f765795485acd3229ecba) ###Markdown Testing load from gcs ###Code #import gcsfs #from datasets import load_from_disk #gcs = gcsfs.GCSFileSystem(project='data-research-216307') #gcs_files_list = gcs.glob('gs://gogolook-ml-data-production/serve-dev/sms/data/experimental_jp_data/train_pretraining_data/*.parquet') #gcs_files_list = [ "gs://" + path for path in gcs_files_list] #dataset = load_from_disk(dataset_path="gs://gogolook-ml-data-production/serve-dev/sms/data/experimental_jp_data/train_pretraining_data/", fs=gcs) # saves encoded_dataset to your s3 bucket #train_dataset.save_to_disk('gcs://gogolook-ml-data-production/serve-dev/sms/data/experimental_jp_data/preprocessing_dataset', fs=gcs) #train_dataset.save_to_disk('/home/jupyter/gogolook/data/jp_data/preprocessing_dataset/') ###Output _____no_output_____ ###Markdown Streaming test ###Code ''' get_files_list = glob.glob(os.path.join(str(experiment_train_data_path), "*.parquet")) dataset = load_dataset('parquet', data_files=get_files_list[0], cache_dir=cache_data_path, split='train', streaming=True) map_dataset = dataset.map(lambda example: (example["input_ids"], example["token_type_ids"], example["attention_mask"]), batched=True, batch_size=64) shuffled_dataset = map_dataset.shuffle(buffer_size=100, seed=seed) torch_dataset = shuffled_dataset.with_format("torch") assert isinstance(torch_dataset, torch.utils.data.IterableDataset) #sampler = torch.utils.data.Sampler(torch_dataset) #batch_sampler = torch.utils.data.BatchSampler(sampler, 64, False) def worker_init_fn(_): worker_info = torch.utils.data.get_worker_info() dataset = worker_info.dataset worker_id = worker_info.id split_size = len(dataset.data) // worker_info.num_workers dataset.data = dataset.data[worker_id * split_size:(worker_id + 1) * split_size] def worker_init_fn(worker_id): ... worker_info = torch.utils.data.get_worker_info() ... dataset = worker_info.dataset # the dataset copy in this worker process ... overall_start = dataset.start ... overall_end = dataset.end ... # configure the dataset to only process the split workload ... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers))) ... worker_id = worker_info.id ... dataset.start = overall_start + worker_id * per_worker ... dataset.end = min(dataset.start + per_worker, overall_end) dataloader = torch.utils.data.DataLoader( torch_dataset, batch_size=128, pin_memory=True, drop_last=False, num_workers=multiprocessing.cpu_count()) def worker_init_fn(_): worker_info = torch.utils.data.get_worker_info() dataset = worker_info.dataset worker_id = worker_info.id split_size = 64 // worker_info.num_workers dataset.data = dataset.data[worker_id * split_size:(worker_id + 1) * split_size] dataloader = torch.utils.data.DataLoader(torch_dataset, batch_size=64, worker_init_fn=worker_init_fn, num_workers=multiprocessing.cpu_count()) ''' #get_train_dataset.get_files_list #get_valid_dataset.get_files_list #model_config['training_steps'] = len(train_dataloader) * model_config['epochs'] #if model_config['use_warmup']: # model_config['warmup_steps'] = int(len(train_dataloader) * model_config['epochs'] * 0.1) # model_config['decay_steps'] = len(train_dataloader) * model_config['epochs'] #else: # model_config['warmup_steps'] = None # model_config['decay_steps'] = None torch.cuda.empty_cache() albert_config = AlbertConfig.from_json_file(albert_zh_path / 'albert_config' / 'albert_config_tiny.json') pretrained_model_name_or_path = 'voidful/albert_chinese_tiny' albert_pretrain_model = AlbertForPreTraining.from_pretrained( pretrained_model_name_or_path, config=albert_config, cache_dir=cache_models_path) albert_pretrain_model.resize_token_embeddings(corpus_size) albert_config if model_config["n_gpu"] > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") #device_ids = [idx for idx in range(torch.cuda.device_count())] #albert_pretrain_model = nn.DataParallel(albert_pretrain_model, device_ids=device_ids) ###Output Let's use 4 GPUs! ###Markdown Define Optimizer ###Code model_params = list(albert_pretrain_model.named_parameters()) no_decay = ["bias", "gamma", "beta", "LayerNorm.weight"] optimizer_grounded_parameters_by_name = [ {'params': [n for n, p in model_params if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0 }, {'params': [n for n, p in model_params if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0 } ] optimizer_grounded_parameters_by_name from torch.optim.lr_scheduler import _LRScheduler class PolynomialDecay(_LRScheduler): def __init__(self, optimizer, decay_steps, end_learning_rate=0.0001, power=0.5, cycle=False, last_epoch=-1, verbose=False): if decay_steps <= 1.: raise ValueError('max_decay_steps should be greater than 1.') self.decay_steps = decay_steps self.end_learning_rate = end_learning_rate self.power = power self.cycle = cycle super(PolynomialDecay, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.") #dtype = initial_learning_rate.dtype #end_learning_rate = math_ops.cast(self.end_learning_rate, dtype) #power = math_ops.cast(self.power, dtype) #global_step_recomp = math_ops.cast(step, dtype) #decay_steps_recomp = math_ops.cast(self.decay_steps, dtype) global_step_recomp = self.last_epoch decay_steps_recomp = self.decay_steps if self.cycle: if global_step_recomp == 0: multiplier = 1.0 else: multiplier = math.ceil(global_step_recomp / self.decay_steps) decay_steps_recomp = decay_steps_recomp * multiplier else: global_step_recomp = min(global_step_recomp, decay_steps_recomp) p = global_step_recomp / decay_steps_recomp ic(self.last_epoch, optimizer.param_groups[0]['lr'], p) return [((group['lr'] - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate) for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [(base_lr - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate for base_lr in self.base_lrs] from transformers import ( AdamW, get_linear_schedule_with_warmup, ) def get_optimizer(config: dict, model: PreTrainedModel, num_training_steps: int): model_params = list(model.named_parameters()) no_decay = ["bias", "gamma", "beta", "LayerNorm.weight"] optimizer_grouped_parameters = [ {'params': [p for n, p in model_params if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 1e-2 }, {'params': [p for n, p in model_params if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0 } ] optimizer = AdamW(optimizer_grouped_parameters, lr=config["learning_rate"], eps=config["epsilon"]) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=int(num_training_steps * 0.1), num_training_steps=num_training_steps ) return optimizer, scheduler #optimizer = torch.optim.Adam( # params=optimizer_grounded_parameters, # lr=model_config["learning_rate"], # betas=(0.9, 0.98), # weight_decay=config["weight_decay"], # eps=config["adam_epsilon"]) #scheduler = LinearWarmupCosineAnnealingLR( # optimizer, # warmup_epochs=model_config['warmup_steps'], # max_epochs=model_config['training_steps'], # eta_min=model_config["end_learning_rate"]) #optimizer = optim.SGD(sms_model.parameters(), lr=model_config['init_learning_rate'], weight_decay=1e-4) #optimizer = optim.SGD(filter(lambda p: p.requires_grad, sms_model.parameters()), lr=model_config['init_learning_rate'], weight_decay=1e-4) #scheduler = CyclicLR( # optimizer, # base_lr=1e-5, # max_lr=model_config['init_learning_rate'], # step_size_up=model_config['training_steps'] * 1, # mode='triangular2', # scale_mode='cycle', # cycle_momentum=False #) #if model_config["use_multi_gpus"]: #optimizer = nn.DataParallel(optimizer, device_ids=device_ids) # Define Mertice from torchmetrics import MetricCollection metric_collection = MetricCollection([ torchmetrics.Accuracy(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device), torchmetrics.Precision(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device), torchmetrics.Recall(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device), torchmetrics.F1(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device) ], prefix='Train_') val_metric_collection = MetricCollection([ torchmetrics.Accuracy(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device), torchmetrics.Precision(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device), torchmetrics.Recall(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device), torchmetrics.F1(num_classes=2, average='macro', multiclass=True, dist_sync_on_step=True, mdmc_average='global').to(device), ], prefix='Val_') ###Output _____no_output_____ ###Markdown Training model ###Code def set_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def _sorted_checkpoints(config, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]: if not os.path.isdir(config["output_dir"]): os.makedirs(config["output_dir"], exist_ok=True) ordering_and_checkpoint_path = [] glob_checkpoints = glob.glob(os.path.join(config["output_dir"], "{}-*".format(checkpoint_prefix))) for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path) if regex_match and regex_match.groups(): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] return checkpoints_sorted def _rotate_checkpoints(config, checkpoint_prefix="checkpoint", use_mtime=False) -> None: if not config["save_total_limit"]: return if config["save_total_limit"] <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = _sorted_checkpoints(config, checkpoint_prefix, use_mtime) if len(checkpoints_sorted) <= config["save_total_limit"]: return number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - config["save_total_limit"]) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) shutil.rmtree(checkpoint) ###Output _____no_output_____ ###Markdown Init wandb ###Code #wandb.tensorboard.patch(root_logdir=str(tensorboard_path / run_id)) wandb.init( project=project, group=group_tag, job_type=job_type, name=run_id, notes=method_tag, tags=addition_tag, sync_tensorboard=True, config={**model_config}, reinit=True ) wandb_config = wandb.config ###Output wandb: Currently logged in as: yuyuliao20 (use `wandb login --relogin` to force relogin) wandb: wandb version 0.12.1 is available! To upgrade, please run: wandb: $ pip install wandb --upgrade ###Markdown Define training stepsm ###Code from tqdm import tqdm, trange, tqdm_notebook def training_step( config: dict, train_dataset: torch.utils.data.Dataset, eval_dataset: torch.utils.data.Dataset, model: PreTrainedModel, device: str, init_wandb: object ): if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() train_batch_size = config["per_gpu_train_batch_size"] * max(1, config["n_gpu"]) ic(train_batch_size) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=train_batch_size, pin_memory=True, drop_last=True, num_workers=multiprocessing.cpu_count()) if config["max_steps"] > 0: t_total = config["max_steps"] config["num_train_epochs"] = config["max_steps"] // (len(train_dataloader) // config["gradient_accumulation_steps"]) + 1 else: t_total = len(train_dataloader) // config["gradient_accumulation_steps"] * config["num_train_epochs"] optimizer, scheduler = get_optimizer(config, model, t_total) if config["n_gpu"]: model = torch.nn.DataParallel(model) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train ! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", config["num_train_epochs"]) logger.info(" Instantaneous batch size per GPU = %d", config["per_gpu_train_batch_size"]) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", train_batch_size * config["gradient_accumulation_steps"]* (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", config["gradient_accumulation_steps"]) logger.info(" Total optimization steps = %d", t_total) set_seed(config["seed"]) global_step = 0 epochs_trained = 0 train_loss, logging_loss = 0.0, 0.0 #train_iterator = trange( # epochs_trained, int(config["num_train_epochs"]), desc="Epoch", disable=args.local_rank not in [-1, 0] #) train_iterator = tqdm_notebook(range( epochs_trained, int(config["num_train_epochs"])), desc="Epoch", disable=args.local_rank not in [-1, 0] ) scaler = GradScaler() model.train() for epoch in train_iterator: #epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) epoch_iterator = tqdm_notebook(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) if args.local_rank != -1: train_sampler.set_epoch(epoch) for step, batch in enumerate(epoch_iterator): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) token_type_ids = batch['token_type_ids'].to(device) mlm_labels = batch['masked_lm_labels'].to(device) sop_labels = batch['next_sentence_labels'].to(device) with autocast(): # Forward pass outputs = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=mlm_labels, sentence_order_label=sop_labels ) assert outputs.prediction_logits.dtype is torch.float16 if config["n_gpu"] > 1: loss = outputs.loss.mean() # mean() to average on multi-gpu parallel training if config["gradient_accumulation_steps"] > 1: loss = loss / config["gradient_accumulation_steps"] assert loss.dtype is torch.float32 scaler.scale(loss).backward() train_loss += loss.item() if (step + 1) % config["gradient_accumulation_steps"] == 0: #torch.nn.utils.clip_grad_norm_(model.parameters(), config["max_grad_norm"]) # Backward pass # Zero gradients, perform a backward pass, and update the weights. scaler.step(optimizer) scaler.update() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if (args.local_rank in [-1, 0]) and (config["logging_steps"] > 0) and (global_step % config["logging_steps"] == 0): ic(global_step % config["logging_steps"]) # Log metrics if (args.local_rank == -1 and config["evaluate_during_training"]): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate_step( config=config, model=model, dataset=eval_dataset, device=device, init_wandb=init_wandb ) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) #last_lr = optimizer.param_groups[0]['lr'] #last_lr = scheduler.optimizer.param_groups[0]["lr"] print("=== Sent event to wandb===") init_wandb.log({'lr': scheduler.get_lr()[0]}, step=global_step) show_logs( _wandb=init_wandb, loss=(train_loss - logging_loss) / config["logging_steps"], step=global_step ) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (train_loss - logging_loss) / config["logging_steps"], global_step) logging_loss = train_loss if (args.local_rank in [-1, 0]) and (config["save_steps"] > 0) and (global_step % config["save_steps"] == 0): checkpoint_prefix = "checkpoint" # Save model checkpoint output_dir = os.path.join(config["output_dir"], "{}-{}".format(checkpoint_prefix, global_step)) os.makedirs(output_dir, exist_ok=True) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) logger.info("Saving model checkpoint to %s", output_dir) _rotate_checkpoints(args, checkpoint_prefix) if config["max_steps"]> 0 and global_step > config["max_steps"]: epoch_iterator.close() break if config["max_steps"] > 0 and global_step > config["max_steps"]: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate_step( config: dict, model: PreTrainedModel, dataset: torch.utils.data.Dataset, device: str, init_wandb: object, prefix: Optional[str]="") -> dict: eval_output_dir = config["output_dir"] if args.local_rank in [-1, 0]: os.makedirs(eval_output_dir, exist_ok=True) eval_batch_size = config["per_gpu_eval_batch_size"] * max(1, config["n_gpu"]) eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader( dataset, sampler=eval_sampler, batch_size=eval_batch_size, pin_memory=True, drop_last=False, num_workers=multiprocessing.cpu_count()) if config["n_gpu"] > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() for batch in tqdm_notebook(eval_dataloader, desc="Evaluating"): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) token_type_ids = batch['token_type_ids'].to(device) mlm_labels = batch['masked_lm_labels'].to(device) sop_labels = batch['next_sentence_labels'].to(device) with torch.no_grad(): outputs = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=mlm_labels, sentence_order_label=sop_labels ) loss = outputs.loss.mean() eval_loss += loss.item() nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps perplexity = torch.exp(torch.tensor(eval_loss)) result = { "loss": eval_loss, "perplexity": perplexity } show_logs(init_wandb, eval_loss, nb_eval_steps, prefix="Eval", perplexity=perplexity.item()) return result ''' def training_step(model, input_ids, attention_mask, token_type_ids, mlm_labels, sop_labels, scaler, use_multi_gpus=False): with autocast(): # Forward pass outputs = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=mlm_labels, sentence_order_label=sop_labels ) assert outputs.prediction_logits.dtype is torch.float16 loss = outputs.loss.mean() assert loss.dtype is torch.float32 # Backward pass # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() scaler.scale(loss).backward() #if use_multi_gpus: # scaler.step(optimizer.module) #else: scaler.step(optimizer) scaler.update() #torch.nn.utils.clip_grad_norm_(optimizer_grounded_parameters, max_norm=0.5) #if epoch > swa_start: # swa_model.update_parameters(model) # swa_scheduler.step() #else: # scheduler.step() return loss @torch.no_grad() def validataion_step(model, input_ids, attention_mask, token_type_ids, mlm_labels, sop_labels): with autocast(): outputs = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=mlm_labels, sentence_order_label=sop_labels ) loss = outputs.loss.mean() return loss ''' ###Output _____no_output_____ ###Markdown Training ###Code print('[RUN ID]: {}'.format(run_id)) torch.cuda.empty_cache() use_epoch_tracking = False use_step_tracking = True #wandb.watch(sms_model, log="all", log_freq=1000) def show_logs(_wandb, loss, step, is_epoch=False, prefix='Train', **kwargs): loss = float(loss) if is_epoch: _wandb.log({"epoch": step, f"{prefix}_loss": loss}, step=step) else: _wandb.log({f"{prefix}_step_loss": loss}, step=step) #print(f"{prefix} loss after " + str(example_ct).zfill(5) + f" examples: {loss:.3f}") if "perplexity" in kwargs.keys(): _wandb.log({f"{prefix}_perplexity": kwargs["perplexity"]}, step=step) def save_model(model, save_model_path): logging.info("[INFO] Start to save model ...") #if not save_model_path.parent.exists(): # save_model_path.parent.mkdir() torch.save(model.state_dict(), save_model_path) start_time = time.time() albert_pretrain_model.to(device) if args.local_rank not in [-1, 0]: torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab logger.info("Training/evaluation parameters %s", args) global_step, tr_loss = training_step( config=model_config, train_dataset=train_dataset, eval_dataset=valid_dataset, model=albert_pretrain_model, device=device, init_wandb=wandb) ic(" global_step = %s, average loss = %s", global_step, tr_loss) end_time = time.time() each_steps_compute_time = (end_time - start_time) print(each_steps_compute_time) ''' print('[RUN ID]: {}'.format(run_id)) torch.cuda.empty_cache() use_epoch_tracking = False use_step_tracking = True #wandb.watch(sms_model, log="all", log_freq=1000) def show_logs(loss, step, is_epoch=False, prefix='Train', **kwargs): loss = float(loss) if is_epoch: wandb.log({"epoch": step, f"{prefix}_loss": loss}, step=step) else: wandb.log({f"{prefix}_step_loss": loss}, step=step) #print(f"{prefix} loss after " + str(example_ct).zfill(5) + f" examples: {loss:.3f}") if "perplexity" in kwargs.keys(): wandb.log({f"{prefix}_perplexity": kwargs["perplexity"]}, step=step) def save_model(model, save_model_path): logging.info("[INFO] Start to save model ...") #if not save_model_path.parent.exists(): # save_model_path.parent.mkdir() torch.save(model.state_dict(), save_model_path) # Creates a GradScaler once at the beginning of training. scaler = GradScaler() for epoch in tqdm(range(model_config['epochs'])): # model_config['epochs'] start_time = time.time() train_batch_loss = 0 valid_batch_loss = 0 train_perplexity = 0 valid_perplexity = 0 # Training Step albert_pretrain_model = albert_pretrain_model.train() for step, train_batch in tqdm(enumerate(train_dataloader), dynamic_ncols=False, bar_format="{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}", total=len(train_dataloader), leave=True, unit='steps'): input_ids = train_batch['input_ids'].to(device) attention_mask = train_batch['attention_mask'].to(device) token_type_ids = train_batch['token_type_ids'].to(device) mlm_labels = train_batch['masked_lm_labels'].to(device) sop_labels = train_batch['next_sentence_labels'].to(device) train_loss = training_step( model=albert_pretrain_model, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, mlm_labels=mlm_labels, sop_labels=sop_labels, scaler=scaler, use_multi_gpus=model_config["use_multi_gpus"] ) scheduler.step() train_batch_loss += train_loss.item() train_perplexity += torch.exp(train_loss) #if model_config["use_multi_gpus"]: # last_lr = optimizer.module.param_groups[0]['lr'] #else: #last_lr = optimizer.param_groups[0]['lr'] last_lr = scheduler.optimizer.param_groups[0]["lr"] if use_step_tracking: record_step = (step + 1) + (len(train_dataloader)) * epoch wandb.log({'learning_rate': last_lr}, step=record_step) show_logs( train_batch_loss / record_step, record_step, perplexity=train_perplexity.item() / record_step) save_model_checkpoint_path = str(save_model_path / f'{wandb.run.name}_{epoch}_model_weight.pt') save_model(albert_pretrain_model, save_model_checkpoint_path) if use_epoch_tracking: train_epoch_loss = train_batch_loss / step wandb.log({'learning_rate': last_lr}, step=epoch) show_log(train_epoch_loss, epoch, is_epoch=True) end_time = time.time() each_steps_compute_time = (end_time - start_time) print(each_steps_compute_time) ''' wandb.finish() ###Output _____no_output_____ ###Markdown Save model ###Code save_models_path = main_model_path / wandb.run.name if not save_models_path.exists(): save_models_path.mkdir() torch.save({ 'epoch': epoch, 'model_state_dict': albert_pretrain_model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), }, str(save_models_path / 'jp_pretrain_model.pt')) torch.save(albert_pretrain_model.state_dict(), str(save_models_path / 'jp_pretrain_model_weight.pt')) checkpoint = torch.load(str(save_models_path / 'jp_pretrain_model_weight.pt')) albert_pretrain_model.load_state_dict(checkpoint) albert_pretrain_model.module.state_dict().keys() checkpoint.keys() assert 1 == 2 ###Output _____no_output_____ ###Markdown Test ###Code print('[RUN ID]: {}'.format(run_id)) torch.cuda.empty_cache() use_epoch_tracking = False use_step_tracking = True #wandb.watch(sms_model, log="all", log_freq=1000) def show_logs(loss, step, is_epoch=False, prefix='Train', **kwargs): loss = float(loss) if is_epoch: wandb.log({"epoch": step, f"{prefix}_loss": loss}, step=step) else: wandb.log({f"{prefix}_step_loss": loss}, step=step) #print(f"{prefix} loss after " + str(example_ct).zfill(5) + f" examples: {loss:.3f}") if "perplexity" in kwargs.keys(): wandb.log({f"{prefix}_perplexity": kwargs["perplexity"]}, step=step) # Creates a GradScaler once at the beginning of training. scaler = GradScaler() for epoch in tqdm(range(model_config['epochs'])): # model_config['epochs'] start_time = time.time() train_batch_loss = 0 valid_batch_loss = 0 train_perplexity = 0 valid_perplexity = 0 # Training Step albert_pretrain_model = albert_pretrain_model.train() for step, train_batch in tqdm(enumerate(train_dataloader), dynamic_ncols=False, bar_format="{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}", total=len(train_dataloader), leave=True, unit='steps'): input_ids = train_batch['input_ids'].to(device) attention_mask = train_batch['attention_mask'].to(device) token_type_ids = train_batch['token_type_ids'].to(device) mlm_labels = train_batch['masked_lm_labels'].to(device) sop_labels = train_batch['next_sentence_labels'].to(device) train_loss = training_step( model=albert_pretrain_model, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, mlm_labels=mlm_labels, sop_labels=sop_labels, scaler=scaler, use_multi_gpus=model_config["use_multi_gpus"] ) scheduler.step() train_batch_loss += train_loss.item() train_perplexity += torch.exp(train_loss) #if model_config["use_multi_gpus"]: # last_lr = optimizer.module.param_groups[0]['lr'] #else: #last_lr = optimizer.param_groups[0]['lr'] last_lr = scheduler.optimizer.param_groups[0]["lr"] if use_step_tracking: record_step = (step + 1) * (epoch + 1) wandb.log({'learning_rate': last_lr}, step=record_step) show_logs( train_batch_loss / record_step, record_step, perplexity=train_perplexity.item() / record_step) if use_epoch_tracking: train_epoch_loss = train_batch_loss / step wandb.log({'learning_rate': last_lr}, step=epoch) show_log(train_epoch_loss, epoch, is_epoch=True) #train_metric_records = metric_collection.compute() #wandb.log(train_metric_records, step=epoch) # Validation Step albert_pretrain_model = albert_pretrain_model.eval() for step, valid_batch in tqdm(enumerate(val_dataloader), dynamic_ncols=False, bar_format="{n_fmt}/{total_fmt}{bar} ETA: {remaining}s - {desc}", total=len(val_dataloader), leave=True, unit='steps'): input_ids = valid_batch['input_ids'].to(device) attention_mask = valid_batch['attention_mask'].to(device) token_type_ids = valid_batch['token_type_ids'].to(device) mlm_labels = valid_batch['masked_lm_labels'].to(device) sop_labels = valid_batch['next_sentence_labels'].to(device) valid_loss = validataion_step( model=albert_pretrain_model, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, mlm_labels=mlm_labels, sop_labels=sop_labels ) valid_batch_loss += valid_loss.item() valid_perplexity += torch.exp(valid_loss) if use_step_tracking: record_step = (step + 1) * (epoch + 1) show_logs( valid_batch_loss / record_step, record_step, prefix='valid', perplexity=valid_perplexity.item() / record_step) #sk_metrics = sklearn_metrics(val_outputs, labels, 'train') #ic(sk_metrics) #ic(val_metric_collection(outputs, labels).compute()) #ic(val_metric_collection(outputs, labels)) if use_epoch_tracking: valid_epoch_loss = valid_batch_loss / step show_logs(valid_epoch_loss, epoch, is_epoch=True, prefix='Val') #val_metric_records = val_metric_collection.compute() #wandb.log(val_metric_records, step=epoch) loss_template = ("Epoch {}/{} - {:.0f}s {:.0f}ms/step - lr:{:} - loss: {:.6f} - val_loss: {:.6f}") #metrics_template = ( # """ # categorical_accuracy: {:.4f} - f1_score: {:.4f} - multi_precision: {:.4f} - multi_recall: {:.4f} # val_categorical_accuracy: {:.4f} - val_f1_score: {:.4f} - val_multi_precision: {:.4f} - val_multi_recall: {:.4f} # """ #) end_time = time.time() each_steps_compute_time = (end_time - start_time) print(loss_template.format( epoch, model_config['epochs'], each_steps_compute_time, each_steps_compute_time * 1000 / model_config['training_steps'], last_lr, train_epoch_loss, val_epoch_loss) ) #print(metrics_template.format( # train_metric_records['Train_Accuracy'], # train_metric_records['Train_F1'], # train_metric_records['Train_Precision'], # train_metric_records['Train_Recall'], # val_metric_records['Val_Accuracy'], # val_metric_records['Val_F1'], # val_metric_records['Val_Precision'], # val_metric_records['Val_Recall'] #)) if use_epoch_tracking: metric_collection.reset() val_metric_collection.reset() %matplotlib inline import math from torch.optim.lr_scheduler import _LRScheduler from torch import nn from torch import cuda from torch import optim from torch.optim.swa_utils import AveragedModel, SWALR from torch.optim.lr_scheduler import CosineAnnealingLR, CyclicLR from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(28*28, 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10), nn.ReLU() ) def forward(self, x): x = self.flatten(x) logits = self.linear_relu_stack(x) return logits net = NeuralNetwork() optimizer = optim.SGD(net.parameters(), lr = 1e-2) lambda1 = lambda epoch: 0.2 if epoch % 5 == 0 else 1 lambda2 = lambda epoch: 0.2 #scheduler = optim.lr_scheduler.MultiplicativeLR(optimizer, lr_lambda = lambda2) #scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5,10,15], gamma=0.1) #scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma = 0.9) #scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: PolynomialDecay(step)) class PolynomialDecay(_LRScheduler): def __init__(self, optimizer, decay_steps, end_learning_rate=0.0001, power=0.5, cycle=False, last_epoch=-1, verbose=False): if decay_steps <= 1.: raise ValueError('max_decay_steps should be greater than 1.') self.decay_steps = decay_steps self.end_learning_rate = end_learning_rate self.power = power self.cycle = cycle super(PolynomialDecay, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.") #dtype = initial_learning_rate.dtype #end_learning_rate = math_ops.cast(self.end_learning_rate, dtype) #power = math_ops.cast(self.power, dtype) #global_step_recomp = math_ops.cast(step, dtype) #decay_steps_recomp = math_ops.cast(self.decay_steps, dtype) global_step_recomp = self.last_epoch decay_steps_recomp = self.decay_steps if self.cycle: if global_step_recomp == 0: multiplier = 1.0 else: multiplier = math.ceil(global_step_recomp / self.decay_steps) decay_steps_recomp = decay_steps_recomp * multiplier else: global_step_recomp = min(global_step_recomp, decay_steps_recomp) p = global_step_recomp / decay_steps_recomp #c(self.last_epoch, optimizer.param_groups[0]['lr'], p) return [((group['lr'] - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate) for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [(base_lr - self.end_learning_rate) * math.pow(1 - p, self.power) + self.end_learning_rate for base_lr in self.base_lrs] def polynomial_decay_scale_fun(global_steps, initial_learning_rate=1e-2, decay_steps=100, power=0.5, end_learning_rate=1e-5, cycle=False): if cycle: if global_steps == 0: multiplier = 1.0 else: multiplier = math.ceil(global_steps / decay_steps) decay_steps = decay_steps * multiplier else: global_steps = min(global_steps, decay_steps) p = global_steps / decay_steps #ic(global_steps, p) return (initial_learning_rate - end_learning_rate) * math.pow(1 - p, power) + end_learning_rate #optimizer = optim.SGD(net.parameters(), lr=1e-2) optimizer = optim.Adam(net.parameters(), lr=1e-3) #scheduler = PolynomialDecay(optimizer, decay_steps=1000, end_learning_rate=1e-5) scheduler = LinearWarmupCosineAnnealingLR( optimizer, warmup_epochs=model_config['warmup_steps'], max_epochs=model_config['training_steps'], eta_min=model_config["end_learning_rate"]) #scheduler = optim.lr_scheduler.CyclicLR( # optimizer, # base_lr=1e-5, # max_lr=1e-2, # step_size_up=20, # scale_fn=polynomial_decay_scale_fun, # mode='triangular2', # scale_mode='cycle', # cycle_momentum=False) iteration = model_config['epochs'] scheduler_lr_list = [] for epoch in range(1, iteration): scheduler.step() #print(epoch, scheduler.get_last_lr()[0]) scheduler_lr_list.append(scheduler.get_last_lr()[0]) plt.xlabel('Training Iterations') plt.ylabel('Learning Rate') plt.title("CLR - 'triangular' Policy") plt.plot(range(1, iteration), scheduler_lr_list) ###Output _____no_output_____
translation_final_lin.ipynb
###Markdown Project for Machine Learning- Key words: `NMT`, `Transformer`, `PyTorch`, `Multi30k` ###Code import math import time import torch import torchtext import torch.nn as nn from torch.utils.data import DataLoader from torchtext.data.utils import get_tokenizer from torch.optim.lr_scheduler import StepLR,LambdaLR import matplotlib.pyplot as plt import numpy as np from utils import * from my_transformer_lin import * % matplotlib inline SEED = 42 np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # torch.use_deterministic_algorithms(True) ###Output UsageError: Line magic function `%` not found. ###Markdown Data Prerocessing ###Code pth_base = "./.data/multi30k/task1/raw/" train_pths = ('train.de', 'train.en') val_pths = ('val.de', 'val.en') test_pths = ('test_2016_flickr.de', 'test_2016_flickr.en') train_filepaths = [(pth_base + pth) for pth in train_pths] val_filepaths = [(pth_base + pth) for pth in val_pths] test_filepaths = [(pth_base + pth) for pth in test_pths] de_tokenizer = get_tokenizer('spacy', language='de_core_news_sm') en_tokenizer = get_tokenizer('spacy', language='en_core_web_sm') de_vocab = build_vocab(train_filepaths[0], de_tokenizer, min_freq=3) en_vocab = build_vocab(train_filepaths[1], en_tokenizer, min_freq=3) train_data = data_process(train_filepaths, de_vocab, en_vocab, de_tokenizer, en_tokenizer) val_data = data_process(val_filepaths, de_vocab, en_vocab, de_tokenizer, en_tokenizer) test_data = data_process(test_filepaths, de_vocab, en_vocab, de_tokenizer, en_tokenizer) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device) print("train size:", len(train_data)) print("val size:", len(val_data)) print("test size:", len(test_data)) print("de vocab size:", len(de_vocab)) print("en vocab size:", len(en_vocab)) ###Output cpu train size: 29000 val size: 1014 test size: 1000 de vocab size: 5374 en vocab size: 4555 ###Markdown Hyper-parameters Tuning ###Code SRC_VOCAB_SIZE = len(de_vocab) TGT_VOCAB_SIZE = len(en_vocab) BATCH_SIZE = 128 NUM_ENCODER_LAYERS = 3 # no help, 3 is better NUM_DECODER_LAYERS = 3 # no help, 3 is better EMB_SIZE = 256 FFN_HID_DIM = 512 NHEAD = 8 # no help, hard converge DROPOUT = 0.1 NUM_EPOCHS = 50 LEARNING_RATE = 0.0001 # LR_STEP = 30 # warmup_steps = 4000 model_name = "./models/transformer-6-3-1" PAD_IDX = de_vocab['<pad>'] BOS_IDX = de_vocab['<bos>'] EOS_IDX = de_vocab['<eos>'] train_iter = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, collate_fn=get_collate_fn(PAD_IDX,BOS_IDX,EOS_IDX)) valid_iter = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=True, collate_fn=get_collate_fn(PAD_IDX,BOS_IDX,EOS_IDX)) test_iter = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, collate_fn=get_collate_fn(PAD_IDX,BOS_IDX,EOS_IDX)) ###Output _____no_output_____ ###Markdown Model Setup ###Code transformer = MyTf(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE, NHEAD, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE, PAD_IDX, FFN_HID_DIM, DROPOUT) transformer = transformer.to(device) # lrate = lambda step_num: EMB_SIZE**-0.5 * np.minimum(step_num**-0.5,step_num*warmup_steps**-1.5) # scheduler = StepLR(optimizer, step_size=LR_STEP, gamma=0.1) loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX) optimizer = torch.optim.Adam(transformer.parameters(), lr=LEARNING_RATE, betas=(0.9, 0.98), eps=1e-9) print(f'The model has {count_parameters(transformer):,} trainable parameters') ###Output The model has 7,667,147 trainable parameters ###Markdown Train and Evaluate ###Code train_loss_curve = [] val_loss_curve = [] min_val_loss = 999 steps = 1 a = (torch.triu(torch.ones((1, 1))) == 1).transpose(0, 1) torch.save(transformer, model_name+"-best.pth.tar") for epoch in range(1, NUM_EPOCHS+1): start_time = time.time() train_loss = train(transformer, train_iter, optimizer, loss_fn, device) end_time = time.time() val_loss = evaluate(transformer, valid_iter, loss_fn, device) # scheduler.step() if val_loss < min_val_loss: min_val_loss = val_loss transformer.eval() torch.save(transformer, model_name+"-best.pth.tar") if epoch % 30 == 0: transformer.eval() torch.save(transformer, model_name+"-ckpt-"+str(epoch)+".pth.tar") train_loss_curve.append(train_loss) val_loss_curve.append(val_loss) print((f"Epoch: {epoch}, Train loss: {train_loss:.3f}, Val loss: {val_loss:.3f}, Epoch time = {(end_time - start_time):.3f}s")) print("min val loss:",min_val_loss) plt.plot(train_loss_curve) plt.plot(val_loss_curve) plt.grid() plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend(("train loss","val loss")) plt.savefig("./images/" + model_name.split(sep="/")[-1] + ".png") plt.show() translate(transformer, "eine gruppe von menschen steht vor einem iglu .", de_vocab, en_vocab,de_tokenizer, BOS_IDX, EOS_IDX, device) ###Output _____no_output_____ ###Markdown Save the Model ###Code transformer.eval() torch.save(transformer, model_name + ".pth.tar") ###Output _____no_output_____ ###Markdown Calculate the BLEU Score ###Code '''load reference''' with open(test_filepaths[0], 'r', encoding='utf8') as f: test_data_ = f.readlines() '''make predictions''' predictions = [] for data in test_data_: temp_trans = translate(transformer, data.lower(), de_vocab, en_vocab, de_tokenizer, BOS_IDX, EOS_IDX, device) predictions.append(temp_trans[1:-3]+" . \n") '''update predictions.txt''' with open("predictions.txt",'w+') as f: f.writelines(predictions) '''eliminate <unk>''' # for i,pre in enumerate(predictions): # predictions[i] = pre.replace("<unk>"," ") # '''update predictions.txt''' # with open("predictions.txt",'w+') as f: # f.writelines(predictions) ! perl ./multi-bleu.perl -lc reference.txt < predictions.txt with open(model_name + ".txt",'w+') as f: f.writelines(predictions) from torchtext.data.metrics import bleu_score references_corpus = [] candidate_corpus = [] '''update reference.txt''' '''update reference.txt''' with open(test_filepaths[1], 'r', encoding='utf8') as f: reference = f.readlines() for i in range(len(reference)): reference[i] = " ".join(en_tokenizer(reference[i])).lower() for pred,ref in zip(predictions, reference): temp = pred.rstrip(" \n").split(" ") candidate_corpus.append(temp) temp = ref.rstrip(" \n").split(" ") references_corpus.append([temp]) bleu_torchtext = bleu_score(candidate_corpus, references_corpus) print(f'BLEU score = {bleu_torchtext*100:.2f}') ###Output _____no_output_____
sklearn/LSTM Time Series Forecasting.ipynb
###Markdown LSTM Time Series ForecastingExamples of LSTM time series forecasting. Here are some articles if you are interested in learning more:* How to Develop LSTM Models for Time Series Forecasting Imports ###Code from numpy.random import seed from numpy.random import randn from numpy import array from math import sin, fabs, sqrt from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,LSTM, ConvLSTM2D, Flatten import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown Dataset GenerationWe generate a dataset to be used in the examples ###Code seed(1) no = 400 # Generate univariate observations x = [] y = [] for i in range(0,no): x.append(sin(i/15) * 20 + sin(i/3.5) * 5 + randn() * 2 + 30) y.append(i) # Plot plt.figure(figsize=(18,5)) plt.ylim(0,60) plt.scatter(y,x,s=6) plt.show() # Settings n_steps = 4 xt = x[n_steps:] yt = y[n_steps:] ###Output _____no_output_____ ###Markdown Useful functions ###Code def calc_error(xhat): # Calculate error mse = 0 mae = 0 for v,vhat in zip(xt, xhat): mae += fabs(v-vhat) mse += (v-vhat)**2 mae /= len(xt) mse /= len(xt) print("Result: mae={0:.3f}, mse={1:.3f}, rmse={2:.3f}".format(mae, mse, sqrt(mse))) def plot_result(xhat): # Plot plt.figure(figsize=(18,5)) plt.ylim(0,60) plt.plot(yt, xhat, c="red") plt.scatter(yt, xt, c="blue", s=6) plt.show() def predict(model): X_test = Xs.reshape((Xs.shape[0], Xs.shape[1], n_features)) xhat = model.predict(X_test).flatten() return xhat ###Output _____no_output_____ ###Markdown Data Preparation ###Code # split a univariate sequence into samples def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the sequence if end_ix > len(sequence)-1: break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # split into samples Xs, y = split_sequence(x, n_steps) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = Xs.reshape((Xs.shape[0], Xs.shape[1], n_features)) ###Output _____no_output_____ ###Markdown Vanilla LSTM ###Code # define model model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # make predictions xhat = predict(model) # Show results calc_error(xhat) plot_result(xhat) ###Output Result: mae=2.289, mse=8.054, rmse=2.838 ###Markdown Stacked LSTM ###Code # define model model = Sequential() model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))) model.add(LSTM(50, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # make predictions xhat = predict(model) # Show results calc_error(xhat) plot_result(xhat) ###Output Result: mae=2.238, mse=7.804, rmse=2.794 ###Markdown ConvLSTM ###Code n_seq = 2 n_step = 2 X = Xs.reshape((Xs.shape[0], n_seq, 1, n_step, n_features)) # define model model = Sequential() model.add(ConvLSTM2D(filters=64, kernel_size=(1,2), activation='relu', input_shape=(n_seq, 1, n_step, n_features))) model.add(Flatten()) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X, y, epochs=200, verbose=0) # make predictions xhat = model.predict(X).flatten() # Show results calc_error(xhat) plot_result(xhat) ###Output Result: mae=2.293, mse=8.033, rmse=2.834 ###Markdown ForecastingEvaluates how well forecasting works. ###Code Xs, y = split_sequence(x, n_steps) # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = Xs.reshape((Xs.shape[0], Xs.shape[1], n_features)) X_train = X[:300] y_train = X[:300] # define model model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit model model.fit(X_train, y_train, epochs=200, verbose=0) # Number of previous values to include start = 310 # Number of forecasts to evaluate no_f = 60 # Forecasts xhat = [] # Metrics mae = 0 mse = 0 # Iterate over number of forecasts for i in range(0, no_f): # Cut out input Xn = X[start+i-1:start+i] # Make a one-step forecast fval = model.predict(Xn).flatten()[0] xhat.append(fval) # Actual value aval = x[start+i] # Metrics mae += fabs(fval-aval) mse += (fval-aval)**2 # Metrics mae /= no_f mse /= no_f print("Result: mae={0:.3f}, mse={1:.3f}, rmse={2:.3f}".format(mae, mse, sqrt(mse))) # Plot plt.figure(figsize=(18,5)) plt.ylim(0,60) plt.plot(yt[start:start+no_f], xhat, c="red") plt.scatter(yt[start:start+no_f], x[start:start+no_f], c="blue", s=6) plt.show() ###Output Result: mae=1.388, mse=3.277, rmse=1.810
Probabilistic_Models/NLP_C2_probability_models_W1_Assignment_autocorrect.ipynb
###Markdown Assignment 1: Auto CorrectWelcome to the first assignment of Course 2. This assignment will give you a chance to brush up on your python and probability skills. In doing so, you will implement an auto-correct system that is very effective and useful. Outline- [0. Overview](0) - [0.1 Edit Distance](0-1)- [1. Data Preprocessing](1) - [1.1 Exercise 1](ex-1) - [1.2 Exercise 2](ex-2) - [1.3 Exercise 3](ex-3)- [2. String Manipulation](2) - [2.1 Exercise 4](ex-4) - [2.2 Exercise 5](ex-5) - [2.3 Exercise 6](ex-6) - [2.4 Exercise 7](ex-7)- [3. Combining the edits](3) - [3.1 Exercise 8](ex-8) - [3.2 Exercise 9](ex-9) - [3.3 Exercise 10](ex-10)- [4. Minimum Edit Distance](4) - [4.1 Exercise 11](ex-11)- [5. Backtrace (Optional)](5) 0. OverviewYou use autocorrect every day on your cell phone and computer. In this assignment, you will explore what really goes on behind the scenes. Of course, the model you are about to implement is not identical to the one used in your phone, but it is still quite good. By completing this assignment you will learn how to: - Get a word count given a corpus- Get a word probability in the corpus - Manipulate strings - Filter strings - Implement Minimum edit distance to compare strings and to help find the optimal path for the edits. - Understand how dynamic programming worksSimilar systems are used everywhere. - For example, if you type in the word **"I am lerningg"**, chances are very high that you meant to write **"learning"**, as shown in **Figure 1**. Figure 1 0.1 Edit DistanceIn this assignment, you will implement models that correct words that are 1 and 2 edit distances away. - We say two words are n edit distance away from each other when we need n edits to change one word into another. An edit could consist of one of the following options: - Delete (remove a letter): ‘hat’ => ‘at, ha, ht’- Switch (swap 2 adjacent letters): ‘eta’ => ‘eat, tea,...’- Replace (change 1 letter to another): ‘jat’ => ‘hat, rat, cat, mat, ...’- Insert (add a letter): ‘te’ => ‘the, ten, ate, ...’You will be using the four methods above to implement an Auto-correct. - To do so, you will need to compute probabilities that a certain word is correct given an input. This auto-correct you are about to implement was first created by [Peter Norvig](https://en.wikipedia.org/wiki/Peter_Norvig) in 2007. - His [original article](https://norvig.com/spell-correct.html) may be a useful reference for this assignment.The goal of our spell check model is to compute the following probability:$$P(c|w) = \frac{P(w|c)\times P(c)}{P(w)} \tag{Eqn-1}$$The equation above is [Bayes Rule](https://en.wikipedia.org/wiki/Bayes%27_theorem). - Equation 1 says that the probability of a word being correct $P(c|w) $is equal to the probability of having a certain word $w$, given that it is correct $P(w|c)$, multiplied by the probability of being correct in general $P(C)$ divided by the probability of that word $w$ appearing $P(w)$ in general.- To compute equation 1, you will first import a data set and then create all the probabilities that you need using that data set. Part 1: Data Preprocessing ###Code import re from collections import Counter import numpy as np import pandas as pd ###Output _____no_output_____ ###Markdown As in any other machine learning task, the first thing you have to do is process your data set. - Many courses load in pre-processed data for you. - However, in the real world, when you build these NLP systems, you load the datasets and process them.- So let's get some real world practice in pre-processing the data!Your first task is to read in a file called **'shakespeare.txt'** which is found in your file directory. To look at this file you can go to `File ==> Open `. Exercise 1Implement the function `process_data` which 1) Reads in a corpus (text file)2) Changes everything to lowercase3) Returns a list of words. Options and Hints- If you would like more of a real-life practice, don't open the 'Hints' below (yet) and try searching the web to derive your answer.- If you want a little help, click on the green "General Hints" section by clicking on it with your mouse.- If you get stuck or are not getting the expected results, click on the green 'Detailed Hints' section to get hints for each step that you'll take to complete this function. General Hints General Hints to get started Python input and output Python 're' documentation Detailed Hints Detailed hints if you're stuck Use 'with' syntax to read a file Decide whether to use 'read()' or 'readline(). What's the difference? Choose whether to use either str.lower() or str.lowercase(). What is the difference? Use re.findall(pattern, string) Look for the "Raw String Notation" section in the Python 're' documentation to understand the difference between r'\W', r'\W' and '\\W'. For the pattern, decide between using '\s', '\w', '\s+' or '\w+'. What do you think are the differences? ###Code # UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: process_data def process_data(file_name): """ Input: A file_name which is found in your current directory. You just have to read it in. Output: words: a list containing all the words in the corpus (text file you read) in lower case. """ words = [] # return this variable correctly ### START CODE HERE ### with open(file_name,'r') as file: for line in file: wordlist=re.findall(r"\w+", line) for word in wordlist: words.append(word.lower()) ### END CODE HERE ### return words ###Output _____no_output_____ ###Markdown Note, in the following cell, 'words' is converted to a python `set`. This eliminates any duplicate entries. ###Code #DO NOT MODIFY THIS CELL word_l = process_data('shakespeare.txt') vocab = set(word_l) # this will be your new vocabulary print(f"The first ten words in the text are: \n{word_l[0:10]}") print(f"There are {len(vocab)} unique words in the vocabulary.") ###Output The first ten words in the text are: ['o', 'for', 'a', 'muse', 'of', 'fire', 'that', 'would', 'ascend', 'the'] There are 6116 unique words in the vocabulary. ###Markdown Expected Output```PythonThe first ten words in the text are: ['o', 'for', 'a', 'muse', 'of', 'fire', 'that', 'would', 'ascend', 'the']There are 6116 unique words in the vocabulary.``` Exercise 2Implement a `get_count` function that returns a dictionary- The dictionary's keys are words- The value for each word is the number of times that word appears in the corpus. For example, given the following sentence: **"I am happy because I am learning"**, your dictionary should return the following: Key Value I 2 am 2 happy 1 because 1 learning 1 **Instructions**: Implement a `get_count` which returns a dictionary where the key is a word and the value is the number of times the word appears in the list. Hints Try implementing this using a for loop and a regular dictionary. This may be good practice for similar coding interview questions You can also use defaultdict instead of a regualr dictionary, along with the for loop Otherwise, to skip using a for loop, you can use Python's Counter class ###Code # UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: get_count def get_count(word_l): ''' Input: word_l: a set of words representing the corpus. Output: word_count_dict: The wordcount dictionary where key is the word and value is its frequency. ''' word_count_dict = {} # fill this with word counts ### START CODE HERE for word in word_l: word_count_dict[word]=(word_count_dict[word]+1 if word in word_count_dict.keys() else 1) ### END CODE HERE ### return word_count_dict #DO NOT MODIFY THIS CELL word_count_dict = get_count(word_l) print(f"There are {len(word_count_dict)} key values pairs") print(f"The count for the word 'thee' is {word_count_dict.get('thee',0)}") ###Output There are 6116 key values pairs The count for the word 'thee' is 240 ###Markdown Expected Output```PythonThere are 6116 key values pairsThe count for the word 'thee' is 240``` Exercise 3Given the dictionary of word counts, compute the probability that each word will appear if randomly selected from the corpus of words.$$P(w_i) = \frac{C(w_i)}{M} \tag{Eqn-2}$$where $C(w_i)$ is the total number of times $w_i$ appears in the corpus.$M$ is the total number of words in the corpus.For example, the probability of the word 'am' in the sentence **'I am happy because I am learning'** is:$$P(am) = \frac{C(w_i)}{M} = \frac {2}{7} \tag{Eqn-3}.$$**Instructions:** Implement `get_probs` function which gives you the probability that a word occurs in a sample. This returns a dictionary where the keys are words, and the value for each word is its probability in the corpus of words. HintsGeneral advice Use dictionary.values() Use sum() The cardinality (number of words in the corpus should be equal to len(word_l). You will calculate this same number, but using the word count dictionary. If you're using a for loop: Use dictionary.keys() If you're using a dictionary comprehension: Use dictionary.items() ###Code # UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: get_probs def get_probs(word_count_dict): ''' Input: word_count_dict: The wordcount dictionary where key is the word and value is its frequency. Output: probs: A dictionary where keys are the words and the values are the probability that a word will occur. ''' probs = {} # return this variable correctly ### START CODE HERE ### M=0 for word in word_count_dict.keys(): M=M+word_count_dict[word] for word in word_count_dict.keys(): probs[word]=word_count_dict[word]/M ### END CODE HERE ### return probs #DO NOT MODIFY THIS CELL probs = get_probs(word_count_dict) print(f"Length of probs is {len(probs)}") print(f"P('thee') is {probs['thee']:.4f}") ###Output Length of probs is 6116 P('thee') is 0.0045 ###Markdown Expected Output```PythonLength of probs is 6116P('thee') is 0.0045``` Part 2: String ManipulationsNow, that you have computed $P(w_i)$ for all the words in the corpus, you will write a few functions to manipulate strings so that you can edit the erroneous strings and return the right spellings of the words. In this section, you will implement four functions: * `delete_letter`: given a word, it returns all the possible strings that have **one character removed**. * `switch_letter`: given a word, it returns all the possible strings that have **two adjacent letters switched**.* `replace_letter`: given a word, it returns all the possible strings that have **one character replaced by another different letter**.* `insert_letter`: given a word, it returns all the possible strings that have an **additional character inserted**. List comprehensionsString and list manipulation in python will often make use of a python feature called [list comprehensions](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions). The routines below will be described as using list comprehensions, but if you would rather implement them in another way, you are free to do so as long as the result is the same. Further, the following section will provide detailed instructions on how to use list comprehensions and how to implement the desired functions. If you are a python expert, feel free to skip the python hints and move to implementing the routines directly. Python List Comprehensions embed a looping structure inside of a list declaration, collapsing many lines of code into a single line. If you are not familiar with them, they seem slightly out of order relative to for loops. Figure 2 The diagram above shows that the components of a list comprehension are the same components you would find in a typical for loop that appends to a list, but in a different order. With that in mind, we'll continue the specifics of this assignment. We will be very descriptive for the first function, `deletes()`, and less so in later functions as you become familiar with list comprehensions. Exercise 4**Instructions for delete_letter():** Implement a `delete_letter()` function that, given a word, returns a list of strings with one character deleted. For example, given the word **nice**, it would return the set: {'ice', 'nce', 'nic', 'nie'}. **Step 1:** Create a list of 'splits'. This is all the ways you can split a word into Left and Right: For example, 'nice is split into : `[('', 'nice'), ('n', 'ice'), ('ni', 'ce'), ('nic', 'e'), ('nice', '')]`This is common to all four functions (delete, replace, switch, insert). Figure 3 **Step 2:** This is specific to `delete_letter`. Here, we are generating all words that result from deleting one character. This can be done in a single line with a list comprehension. You can make use of this type of syntax: `[f(a,b) for a, b in splits if condition]` For our 'nice' example you get: ['ice', 'nce', 'nie', 'nic'] Figure 4 Levels of assistanceTry this exercise with these levels of assistance. - We hope that this will make it both a meaningful experience but also not a frustrating experience. - Start with level 1, then move onto level 2, and 3 as needed. - Level 1. Try to think this through and implement this yourself. - Level 2. Click on the "Level 2 Hints" section for some hints to get started. - Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions. - If you are still stuck, look at the images in the "list comprehensions" section above. Level 2 Hints Use array slicing like my_string[0:2] Use list comprehensions or for loops Level 3 Hints splits: Use array slicing, like my_str[0:2], to separate a string into two pieces. Do this in a loop or list comprehension, so that you have a list of tuples. For example, "cake" can get split into "ca" and "ke". They're stored in a tuple ("ca","ke"), and the tuple is appended to a list. We'll refer to these as L and R, so the tuple is (L,R) When choosing the range for your loop, if you input the word "cans" and generate the tuple ('cans',''), make sure to include an if statement to check the length of that right-side string (R) in the tuple (L,R) deletes: Go through the list of tuples and combine the two strings together. You can use the + operator to combine two strings When combining the tuples, make sure that you leave out a middle character. Use array slicing to leave out the first character of the right substring. ###Code # UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: deletes def delete_letter(word, verbose=False): ''' Input: word: the string/word for which you will generate all possible words in the vocabulary which have 1 missing character Output: delete_l: a list of all possible strings obtained by deleting 1 character from word ''' delete_l = [] split_l = [] ### START CODE HERE ### split_l=[(word[:i],word[i:]) for i in range(len(word)+1)] delete_l=[L+R[1:] for (L,R) in split_l if R] ### END CODE HERE ### if verbose: print(f"input word {word}, \nsplit_l = {split_l}, \ndelete_l = {delete_l}") return delete_l delete_word_l = delete_letter(word="cans", verbose=True) ###Output input word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')], delete_l = ['ans', 'cns', 'cas', 'can'] ###Markdown Expected Output```CPPNote: You might get a slightly different result with split_linput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's')], delete_l = ['ans', 'cns', 'cas', 'can']``` Note 1- Notice how it has the extra tuple `('cans', '')`.- This will be fine as long as you have checked the size of the right-side substring in tuple (L,R).- Can you explain why this will give you the same result for the list of deletion strings (delete_l)?```CPPinput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')], delete_l = ['ans', 'cns', 'cas', 'can']``` Note 2If you end up getting the same word as your input word, like this:```Pythoninput word cans, split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')], delete_l = ['ans', 'cns', 'cas', 'can', 'cans']```- Check how you set the `range`.- See if you check the length of the string on the right-side of the split. ###Code # test # 2 print(f"Number of outputs of delete_letter('at') is {len(delete_letter('at'))}") ###Output Number of outputs of delete_letter('at') is 2 ###Markdown Expected output```CPPNumber of outputs of delete_letter('at') is 2``` Exercise 5**Instructions for switch_letter()**: Now implement a function that switches two letters in a word. It takes in a word and returns a list of all the possible switches of two letters **that are adjacent to each other**. - For example, given the word 'eta', it returns {'eat', 'tea'}, but does not return 'ate'.**Step 1:** is the same as in delete_letter() **Step 2:** A list comprehension or for loop which forms strings by swapping adjacent letters. This is of the form: `[f(L,R) for L, R in splits if condition]` where 'condition' will test the length of R in a given iteration. See below. Figure 5 Levels of difficultyTry this exercise with these levels of difficulty. - Level 1. Try to think this through and implement this yourself.- Level 2. Click on the "Level 2 Hints" section for some hints to get started.- Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions. Level 2 Hints Use array slicing like my_string[0:2] Use list comprehensions or for loops To do a switch, think of the whole word as divided into 4 distinct parts. Write out 'cupcakes' on a piece of paper and see how you can split it into ('cupc', 'k', 'a', 'es') Level 3 Hints splits: Use array slicing, like my_str[0:2], to separate a string into two pieces. Splitting is the same as for delete_letter To perform the switch, go through the list of tuples and combine four strings together. You can use the + operator to combine strings The four strings will be the left substring from the split tuple, followed by the first (index 1) character of the right substring, then the zero-th character (index 0) of the right substring, and then the remaining part of the right substring. Unlike delete_letter, you will want to check that your right substring is at least a minimum length. To see why, review the previous hint bullet point (directly before this one). ###Code # UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: switches def switch_letter(word, verbose=False): ''' Input: word: input string Output: switches: a list of all possible strings with one adjacent charater switched ''' switch_l = [] split_l = [] ### START CODE HERE ### split_l=[(word[:i],word[i:]) for i in range(len(word)+1)] switch_l= [L+R[1]+R[0]+R[2:] for (L,R) in split_l if len(R)>1] ### END CODE HERE ### if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nswitch_l = {switch_l}") return switch_l switch_word_l = switch_letter(word="eta", verbose=True) ###Output Input word = eta split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a'), ('eta', '')] switch_l = ['tea', 'eat'] ###Markdown Expected output```PythonInput word = eta split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a')] switch_l = ['tea', 'eat']``` Note 1You may get this:```PythonInput word = eta split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a'), ('eta', '')] switch_l = ['tea', 'eat']```- Notice how it has the extra tuple `('eta', '')`.- This is also correct.- Can you think of why this is the case? Note 2If you get an error```PythonIndexError: string index out of range```- Please see if you have checked the length of the strings when switching characters. ###Code # test # 2 print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}") ###Output Number of outputs of switch_letter('at') is 1 ###Markdown Expected output```CPPNumber of outputs of switch_letter('at') is 1``` Exercise 6**Instructions for replace_letter()**: Now implement a function that takes in a word and returns a list of strings with one **replaced letter** from the original word. **Step 1:** is the same as in `delete_letter()`**Step 2:** A list comprehension or for loop which form strings by replacing letters. This can be of the form: `[f(a,b,c) for a, b in splits if condition for c in string]` Note the use of the second for loop. It is expected in this routine that one or more of the replacements will include the original word. For example, replacing the first letter of 'ear' with 'e' will return 'ear'.**Step 3:** Remove the original input letter from the output. Hints To remove a word from a list, first store its contents inside a set() Use set.discard('the_word') to remove a word in a set (if the word does not exist in the set, then it will not throw a KeyError. Using set.remove('the_word') throws a KeyError if the word does not exist in the set. ###Code # UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: replaces def replace_letter(word, verbose=False): ''' Input: word: the input string/word Output: replaces: a list of all possible strings where we replaced one letter from the original word. ''' letters = 'abcdefghijklmnopqrstuvwxyz' replace_l = [] split_l = [] ### START CODE HERE ### split_l=[(word[:i],word[i:]) for i in range(len(word)+1)] replace_set=set([L+c+R[1:] for (L,R) in split_l if R for c in letters]) ### END CODE HERE ### # turn the set back into a list and sort it, for easier viewing replace_l = sorted(list(replace_set)) if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nreplace_l {replace_l}") return replace_l replace_l = replace_letter(word='can', verbose=True) ###Output Input word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'can', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan'] ###Markdown Expected Output**: ```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Note how the input word 'can' should not be one of the output words. Note 1If you get something like this:```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Notice how split_l has an extra tuple `('can', '')`, but the output is still the same, so this is okay. Note 2If you get something like this:```PythonInput word = can split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')] replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cana', 'canb', 'canc', 'cand', 'cane', 'canf', 'cang', 'canh', 'cani', 'canj', 'cank', 'canl', 'canm', 'cann', 'cano', 'canp', 'canq', 'canr', 'cans', 'cant', 'canu', 'canv', 'canw', 'canx', 'cany', 'canz', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']```- Notice how there are strings that are 1 letter longer than the original word, such as `cana`.- Please check for the case when there is an empty string `''`, and if so, do not use that empty string when setting replace_l. ###Code # test # 2 print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}") ###Output Number of outputs of switch_letter('at') is 1 ###Markdown Expected output```CPPNumber of outputs of switch_letter('at') is 1``` Exercise 7**Instructions for insert_letter()**: Now implement a function that takes in a word and returns a list with a letter inserted at every offset.**Step 1:** is the same as in `delete_letter()`**Step 2:** This can be a list comprehension of the form: `[f(a,b,c) for a, b in splits if condition for c in string]` ###Code # UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: inserts def insert_letter(word, verbose=False): ''' Input: word: the input string/word Output: inserts: a set of all possible strings with one new letter inserted at every offset ''' letters = 'abcdefghijklmnopqrstuvwxyz' insert_l = [] split_l = [] ### START CODE HERE ### split_l=[(word[:i],word[i:]) for i in range(len(word)+1)] insert_l=[L+c+R for (L,R) in split_l for c in letters] ### END CODE HERE ### if verbose: print(f"Input word {word} \nsplit_l = {split_l} \ninsert_l = {insert_l}") return insert_l insert_l = insert_letter('at', True) print(f"Number of strings output by insert_letter('at') is {len(insert_l)}") ###Output Input word at split_l = [('', 'at'), ('a', 't'), ('at', '')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz'] Number of strings output by insert_letter('at') is 78 ###Markdown Expected output```PythonInput word at split_l = [('', 'at'), ('a', 't'), ('at', '')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz']Number of strings output by insert_letter('at') is 78``` Note 1If you get a split_l like this:```PythonInput word at split_l = [('', 'at'), ('a', 't')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']Number of strings output by insert_letter('at') is 52```- Notice that split_l is missing the extra tuple ('at', ''). For insertion, we actually **WANT** this tuple.- The function is not creating all the desired output strings.- Check the range that you use for the for loop. Note 2If you see this:```PythonInput word at split_l = [('', 'at'), ('a', 't'), ('at', '')] insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']Number of strings output by insert_letter('at') is 52```- Even though you may have fixed the split_l so that it contains the tuple `('at', '')`, notice that you're still missing some output strings. - Notice that it's missing strings such as 'ata', 'atb', 'atc' all the way to 'atz'.- To fix this, make sure that when you set insert_l, you allow the use of the empty string `''`. ###Code # test # 2 print(f"Number of outputs of insert_letter('at') is {len(insert_letter('at'))}") ###Output Number of outputs of insert_letter('at') is 78 ###Markdown Expected output```CPPNumber of outputs of insert_letter('at') is 78``` Part 3: Combining the editsNow that you have implemented the string manipulations, you will create two functions that, given a string, will return all the possible single and double edits on that string. These will be `edit_one_letter()` and `edit_two_letters()`. 3.1 Edit one letter Exercise 8**Instructions**: Implement the `edit_one_letter` function to get all the possible edits that are one edit away from a word. The edits consist of the replace, insert, delete, and optionally the switch operation. You should use the previous functions you have already implemented to complete this function. The 'switch' function is a less common edit function, so its use will be selected by an "allow_switches" input argument.Note that those functions return *lists* while this function should return a *python set*. Utilizing a set eliminates any duplicate entries. Hints Each of the functions returns a list. You can combine lists using the `+` operator. To get unique strings (avoid duplicates), you can use the set() function. ###Code # UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: edit_one_letter def edit_one_letter(word, allow_switches = True): """ Input: word: the string/word for which we will generate all possible wordsthat are one edit away. Output: edit_one_set: a set of words with one possible edit. Please return a set. and not a list. """ edit_one_set = set() ### START CODE HERE ### edit_one_list=[] edit_one_list=delete_letter(word) edit_one_list+=replace_letter(word) edit_one_list+=insert_letter(word) if allow_switches: edit_one_list+=switch_letter(word) if word in edit_one_list: edit_one_list.remove(word) edit_one_set=set(edit_one_list) ### END CODE HERE ### return edit_one_set tmp_word = "at" tmp_edit_one_set = edit_one_letter(tmp_word) # turn this into a list to sort it, in order to view it tmp_edit_one_l = sorted(list(tmp_edit_one_set)) print(f"input word {tmp_word} \nedit_one_l \n{tmp_edit_one_l}\n") print(f"The type of the returned object should be a set {type(tmp_edit_one_set)}") print(f"Number of outputs from edit_one_letter('at') is {len(edit_one_letter('at'))}") ###Output input word at edit_one_l ['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt'] The type of the returned object should be a set <class 'set'> Number of outputs from edit_one_letter('at') is 129 ###Markdown Expected Output```CPPinput word at edit_one_l ['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt']The type of the returned object should be a set Number of outputs from edit_one_letter('at') is 129``` Part 3.2 Edit two letters Exercise 9Now you can generalize this to implement to get two edits on a word. To do so, you would have to get all the possible edits on a single word and then for each modified word, you would have to modify it again. **Instructions**: Implement the `edit_two_letters` function that returns a set of words that are two edits away. Note that creating additional edits based on the `edit_one_letter` function may 'restore' some one_edits to zero or one edits. That is allowed here. This accounted for in get_corrections. Hints You will likely want to take the union of two sets. You can either use set.union() or use the '|' (or operator) to union two sets See the documentation Python sets for examples of using operators or functions of the Python set. ###Code # UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: edit_two_letters def edit_two_letters(word, allow_switches = True): ''' Input: word: the input string/word Output: edit_two_set: a set of strings with all possible two edits ''' edit_two_set = set() ### START CODE HERE ### edit_one_set=edit_one_letter(word) edit_two_list=[] for tmpword in edit_one_set: edit_two_list+=edit_one_letter(tmpword) edit_two_set=set(edit_two_list) ### END CODE HERE ### return edit_two_set tmp_edit_two_set = edit_two_letters("a") tmp_edit_two_l = sorted(list(tmp_edit_two_set)) print(f"Number of strings with edit distance of two: {len(tmp_edit_two_l)}") print(f"First 10 strings {tmp_edit_two_l[:10]}") print(f"Last 10 strings {tmp_edit_two_l[-10:]}") print(f"The data type of the returned object should be a set {type(tmp_edit_two_set)}") print(f"Number of strings that are 2 edit distances from 'at' is {len(edit_two_letters('at'))}") ###Output Number of strings with edit distance of two: 2654 First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag'] Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza'] The data type of the returned object should be a set <class 'set'> Number of strings that are 2 edit distances from 'at' is 7154 ###Markdown Expected Output```CPPNumber of strings with edit distance of two: 2654First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag']Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza']The data type of the returned object should be a set Number of strings that are 2 edit distances from 'at' is 7154``` Part 3-3: suggest spelling suggestionsNow you will use your `edit_two_letters` function to get a set of all the possible 2 edits on your word. You will then use those strings to get the most probable word you meant to type aka your typing suggestion. Exercise 10**Instructions**: Implement `get_corrections`, which returns a list of zero to n possible suggestion tuples of the form (word, probability_of_word). **Step 1:** Generate suggestions for a supplied word: You'll use the edit functions you have developed. The 'suggestion algorithm' should follow this logic: * If the word is in the vocabulary, suggest the word. * Otherwise, if there are suggestions from `edit_one_letter` that are in the vocabulary, use those. * Otherwise, if there are suggestions from `edit_two_letters` that are in the vocabulary, use those. * Otherwise, suggest the input word.* * The idea is that words generated from fewer edits are more likely than words with more edits.Note: - Edits of one or two letters may 'restore' strings to either zero or one edit. This algorithm accounts for this by preferentially selecting lower distance edits first. Short circuitIn Python, logical operations such as `and` and `or` have two useful properties. They can operate on lists and they have ['short-circuit' behavior](https://docs.python.org/3/library/stdtypes.html). Try these: ###Code # example of logical operation on lists or sets print( [] and ["a","b"] ) print( [] or ["a","b"] ) #example of Short circuit behavior val1 = ["Most","Likely"] or ["Less","so"] or ["least","of","all"] # selects first, does not evalute remainder print(val1) val2 = [] or [] or ["least","of","all"] # continues evaluation until there is a non-empty list print(val2) ###Output [] ['a', 'b'] ['Most', 'Likely'] ['least', 'of', 'all'] ###Markdown The logical `or` could be used to implement the suggestion algorithm very compactly. Alternately, if/then constructs could be used. **Step 2**: Create a 'best_words' dictionary where the 'key' is a suggestion and the 'value' is the probability of that word in your vocabulary. If the word is not in the vocabulary, assign it a probability of 0.**Step 3**: Select the n best suggestions. There may be fewer than n. Hints edit_one_letter and edit_two_letters return *python sets*. Sets have a handy set.intersection feature To find the keys that have the highest values in a dictionary, you can use the Counter dictionary to create a Counter object from a regular dictionary. Then you can use Counter.most_common(n) to get the n most common keys. To find the intersection of two sets, you can use set.intersection or the & operator. If you are not as familiar with short circuit syntax (as shown above), feel free to use if else statements instead. To use an if statement to check of a set is empty, use 'if not x:' syntax ###Code # UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # UNIT TEST COMMENT: Candidate for Table Driven Tests # GRADED FUNCTION: get_corrections def get_corrections(word, probs, vocab, n=2, verbose = False): ''' Input: word: a user entered string to check for suggestions probs: a dictionary that maps each word to its probability in the corpus vocab: a set containing all the vocabulary n: number of possible word corrections you want returned in the dictionary Output: n_best: a list of tuples with the most probable n corrected words and their probabilities. ''' suggestions = [] n_best = [] ### START CODE HERE ### if(word in vocab): suggestions.append(word) n_best.append((word,probs[word])) else: tmp_edit_one_set = edit_one_letter(word) tmp_edit_one_l = sorted(list(tmp_edit_one_set)) if(len(tmp_edit_one_l)>0): for tmpword in tmp_edit_one_l: if(tmpword in vocab): suggestions.append(tmpword) n_best.append((tmpword,probs[tmpword])) else: if(n>1): tmp_edit_two_set = edit_two_letters(word) tmp_edit_two_l = sorted(list(tmp_edit_two_set)) if(len(tmp_edit_two_l)>0): for tmpword in tmp_edit_two_l: if(tmpword in vocab): suggestions.append(tmpword) n_best.append((tmpword,probs[tmpword])) if(len(suggestions)==0): suggestions.append(word) n_best.append(0) ### END CODE HERE ### if verbose: print("entered word = ", word, "\nsuggestions = ", suggestions) return n_best # Test your implementation - feel free to try other words in my word my_word = 'dys' tmp_corrections = get_corrections(my_word, probs, vocab, 2, verbose=True) # keep verbose=True for i, word_prob in enumerate(tmp_corrections): print(f"word {i}: {word_prob[0]}, probability {word_prob[1]:.6f}") # CODE REVIEW COMMENT: using "tmp_corrections" insteads of "cors". "cors" is not defined print(f"data type of corrections {type(tmp_corrections)}") ###Output entered word = dys suggestions = ['days', 'dye'] best = [('days', 0.0004103405826836274), ('dye', 1.865184466743761e-05)] word 0: days, probability 0.000410 word 1: dye, probability 0.000019 data type of corrections <class 'list'> ###Markdown Expected Output- Note: This expected output is for `my_word = 'dys'`. Also, keep `verbose=True````CPPentered word = dys suggestions = {'days', 'dye'}word 0: days, probability 0.000410word 1: dye, probability 0.000019data type of corrections ``` Part 4: Minimum Edit distanceNow that you have implemented your auto-correct, how do you evaluate the similarity between two strings? For example: 'waht' and 'what'Also how do you efficiently find the shortest path to go from the word, 'waht' to the word 'what'?You will implement a dynamic programming system that will tell you the minimum number of edits required to convert a string into another string. Part 4.1 Dynamic ProgrammingDynamic Programming breaks a problem down into subproblems which can be combined to form the final solution. Here, given a string source[0..i] and a string target[0..j], we will compute all the combinations of substrings[i, j] and calculate their edit distance. To do this efficiently, we will use a table to maintain the previously computed substrings and use those to calculate larger substrings.You have to create a matrix and update each element in the matrix as follows: $$\text{Initialization}$$\begin{align}D[0,0] &= 0 \\D[i,0] &= D[i-1,0] + del\_cost(source[i]) \tag{4}\\D[0,j] &= D[0,j-1] + ins\_cost(target[j]) \\\end{align} $$\text{Per Cell Operations}$$\begin{align} \\D[i,j] =min\begin{cases}D[i-1,j] + del\_cost\\D[i,j-1] + ins\_cost\\D[i-1,j-1] + \left\{\begin{matrix}rep\_cost; & if src[i]\neq tar[j]\\0 ; & if src[i]=tar[j]\end{matrix}\right.\end{cases}\tag{5}\end{align} So converting the source word **play** to the target word **stay**, using an input cost of one, a delete cost of 1, and replace cost of 2 would give you the following table: s t a y 0 1 2 3 4 p 1 2 3 4 5 l 2 3 4 5 6 a 3 4 5 4 5 y 4 5 6 5 4 The operations used in this algorithm are 'insert', 'delete', and 'replace'. These correspond to the functions that you defined earlier: insert_letter(), delete_letter() and replace_letter(). switch_letter() is not used here. The diagram below describes how to initialize the table. Each entry in D[i,j] represents the minimum cost of converting string source[0:i] to string target[0:j]. The first column is initialized to represent the cumulative cost of deleting the source characters to convert string "EER" to "". The first row is initialized to represent the cumulative cost of inserting the target characters to convert from "" to "NEAR". Figure 6 Initializing Distance Matrix Filling in the remainder of the table utilizes the 'Per Cell Operations' in the equation (5) above. Note, the diagram below includes in the table some of the 3 sub-calculations shown in light grey. Only 'min' of those operations is stored in the table in the `min_edit_distance()` function. Figure 7 Filling Distance Matrix Note that the formula for $D[i,j]$ shown in the image is equivalent to:\begin{align} \\D[i,j] =min\begin{cases}D[i-1,j] + del\_cost\\D[i,j-1] + ins\_cost\\D[i-1,j-1] + \left\{\begin{matrix}rep\_cost; & if src[i]\neq tar[j]\\0 ; & if src[i]=tar[j]\end{matrix}\right.\end{cases}\tag{5}\end{align}The variable `sub_cost` (for substitution cost) is the same as `rep_cost`; replacement cost. We will stick with the term "replace" whenever possible. Below are some examples of cells where replacement is used. This also shows the minimum path from the lower right final position where "EER" has been replaced by "NEAR" back to the start. This provides a starting point for the optional 'backtrace' algorithm below. Figure 8 Examples Distance Matrix Exercise 11Again, the word "substitution" appears in the figure, but think of this as "replacement". **Instructions**: Implement the function below to get the minimum amount of edits required given a source string and a target string. Hints The range(start, stop, step) function excludes 'stop' from its output words ###Code # UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: min_edit_distance def min_edit_distance(source, target, ins_cost = 1, del_cost = 1, rep_cost = 2): ''' Input: source: a string corresponding to the string you are starting with target: a string corresponding to the string you want to end with ins_cost: an integer setting the insert cost del_cost: an integer setting the delete cost rep_cost: an integer setting the replace cost Output: D: a matrix of len(source)+1 by len(target)+1 containing minimum edit distances med: the minimum edit distance (med) required to convert the source string to the target ''' # use deletion and insert cost as 1 m = len(source) n = len(target) #initialize cost matrix with zeros and dimensions (m+1,n+1) D = np.zeros((m+1, n+1), dtype=int) ### START CODE HERE (Replace instances of 'None' with your code) ### # Fill in column 0, from row 1 to row m, both inclusive for row in range(1,m+1): # Replace None with the proper range D[row,0] = D[row-1,0]+del_cost # Fill in row 0, for all columns from 1 to n, both inclusive for col in range(1,n+1): # Replace None with the proper range D[0,col] = D[0,col-1]+ins_cost # Loop through row 1 to row m, both inclusive for row in range(1,m+1): # Loop through column 1 to column n, both inclusive for col in range(1,n+1): # Intialize r_cost to the 'replace' cost that is passed into this function r_cost = rep_cost # Check to see if source character at the previous row # matches the target character at the previous column, if source[row-1]==target[col-1]: # Update the replacement cost to 0 if source and target are the same r_cost = 0 # Update the cost at row, col based on previous entries in the cost matrix # Refer to the equation calculate for D[i,j] (the minimum of three calculated costs) D[row,col] = min(D[row-1,col]+del_cost,D[row,col-1]+ins_cost,D[row-1,col-1]+r_cost) # Set the minimum edit distance with the cost found at row m, column n med = D[m,n] ### END CODE HERE ### return D, med #DO NOT MODIFY THIS CELL # testing your implementation source = 'play' target = 'stay' matrix, min_edits = min_edit_distance(source, target) print("minimum edits: ",min_edits, "\n") idx = list('#' + source) cols = list('#' + target) df = pd.DataFrame(matrix, index=idx, columns= cols) print(df) ###Output minimum edits: 4 # s t a y # 0 1 2 3 4 p 1 2 3 4 5 l 2 3 4 5 6 a 3 4 5 4 5 y 4 5 6 5 4 ###Markdown **Expected Results:** ```CPPminimum edits: 4 s t a y 0 1 2 3 4p 1 2 3 4 5l 2 3 4 5 6a 3 4 5 4 5y 4 5 6 5 4``` ###Code #DO NOT MODIFY THIS CELL # testing your implementation source = 'eer' target = 'near' matrix, min_edits = min_edit_distance(source, target) print("minimum edits: ",min_edits, "\n") idx = list(source) idx.insert(0, '#') cols = list(target) cols.insert(0, '#') df = pd.DataFrame(matrix, index=idx, columns= cols) print(df) ###Output minimum edits: 3 # n e a r # 0 1 2 3 4 e 1 2 1 2 3 e 2 3 2 3 4 r 3 4 3 4 3 ###Markdown **Expected Results** ```CPPminimum edits: 3 n e a r 0 1 2 3 4e 1 2 1 2 3e 2 3 2 3 4r 3 4 3 4 3``` We can now test several of our routines at once: ###Code source = "eer" targets = edit_one_letter(source,allow_switches = False) #disable switches since min_edit_distance does not include them for t in targets: _, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one if min_edits != 1: print(source, t, min_edits) ###Output _____no_output_____ ###Markdown **Expected Results** ```CPP(empty)```The 'replace()' routine utilizes all letters a-z one of which returns the original word. ###Code source = "eer" targets = edit_two_letters(source,allow_switches = False) #disable switches since min_edit_distance does not include them for t in targets: _, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one if min_edits != 2 and min_edits != 1: print(source, t, min_edits) ###Output eer erfe 3 eer jre 3 eer erne 3 eer bre 3 eer sre 3 eer ore 3 eer erte 3 eer xre 3 eer erwe 3 eer fre 3 eer erle 3 eer erqe 3 eer erze 3 eer erve 3 eer erie 3 eer zre 3 eer erce 3 eer lre 3 eer erae 3 eer erde 3 eer eroe 3 eer hre 3 eer wre 3 eer erue 3 eer mre 3 eer kre 3 eer dre 3 eer rre 3 eer tre 3 eer erhe 3 eer erge 3 eer eer 0 eer are 3 eer ire 3 eer yre 3 eer cre 3 eer pre 3 eer ure 3 eer nre 3 eer erbe 3 eer erse 3 eer erke 3 eer erxe 3 eer gre 3 eer erje 3 eer vre 3 eer qre 3 eer erye 3 eer erpe 3 eer erme 3 ###Markdown **Expected Results** ```CPPeer eer 0```We have to allow single edits here because some two_edits will restore a single edit. SubmissionMake sure you submit your assignment before you modify anything below Part 5: Optional - BacktraceOnce you have computed your matrix using minimum edit distance, how would find the shortest path from the top left corner to the bottom right corner? Note that you could use backtrace algorithm. Try to find the shortest path given the matrix that your `min_edit_distance` function returned.You can use these [lecture slides on minimum edit distance](https://web.stanford.edu/class/cs124/lec/med.pdf) by Dan Jurafsky to learn about the algorithm for backtrace. ###Code # Experiment with back trace - insert your code here ###Output _____no_output_____
examples/ike_simple.ipynb
###Markdown Cloud-optimized loading of NetCDF4/HDF5 in XarrayUsing the new Fsspec "ReferenceFileSystem" functionality ###Code import xarray as xr import fsspec mapper = fsspec.get_mapper("reference://", fo='s3://pangeo-data-uswest2/esip/adcirc/adcirc_01d_offsets.json', target_options={'requester_pays': True}, remote_protocol='s3', remote_options={'requester_pays': True}) ds = xr.open_dataset(mapper, engine="zarr", backend_kwargs={"consolidated": False}) ds.nbytes/1e9 ds.zeta.encoding ds.zeta ###Output _____no_output_____ ###Markdown Cloud-optimized loading of NetCDF4/HDF5 in XarrayUsing the new Fsspec "ReferenceFileSystem" functionality ###Code import xarray as xr import fsspec mapper = fsspec.get_mapper("reference://", references='s3://pangeo-data-uswest2/esip/adcirc/adcirc_01d_offsets.json', ref_storage_args={'requester_pays': True}, target_protocol='s3', target_options={'requester_pays': True}) ds = xr.open_zarr(mapper) ds.nbytes/1e9 ds.zeta.encoding ds.zeta ###Output _____no_output_____
notebooks/zinb_regression.ipynb
###Markdown Regression ###Code import pandas as pd import numpy as np import os import statsmodels.api as sm from google.colab import drive from statsmodels.genmod import families import statsmodels.discrete.count_model as reg_models drive.mount('/content/drive') root_path = "/content/drive/MyDrive/University/Dissertation" regression_path = "/regression" regression_file = "/regression_table_with_persp.csv" reg_df = pd.read_csv(root_path + regression_path + regression_file, parse_dates=['date']) reg_df['hatebase_proportion'] = reg_df['tweets_containing_slurs'] / reg_df['total_tweets'] reg_df['perspective_proportion'] = reg_df['tweets_flagged_perspective'] / reg_df['total_perspective_tweets'] # Set to 0 where there's 0 tweets received reg_df['hatebase_proportion'].fillna(0.0, inplace=True) reg_df['perspective_proportion'].fillna(0.0, inplace=True) reg_df['player_rating'].fillna(0.0, inplace=True) reg_df['player_rating_in_previous_game'].fillna(0.0, inplace=True) reg_df['club_coefficient'].fillna(0.0, inplace=True) reg_df.head() reg_df['day_of_week'] = reg_df['date'].dt.day_name() reg_df["featured"] = reg_df["featured"].astype(int) reg_df["featured_in_previous_game"] = reg_df["featured_in_previous_game"].astype(int) reg_df["matchday"] = reg_df["matchday"].astype(int) reg_df["red_card"] = reg_df["red_card"].astype(int) reg_df["penalty"] = reg_df["penalty"].astype(int) reg_df["penalty_outcome"] = reg_df["penalty_outcome"].astype(int) reg_df = pd.get_dummies(reg_df, columns=['ethnicity']) reg_df = pd.get_dummies(reg_df, columns=['result']) reg_df = pd.get_dummies(reg_df, columns=['result_in_previous_game']) reg_df = pd.get_dummies(reg_df, columns=['day_of_week']) reg_df = pd.get_dummies(reg_df, columns=['country']) reg_df.drop(columns=['country_ranking_points', 'club', 'name', 'date', 'opponent', 'round', 'ethnicity_white', 'result_D', 'result_W'], inplace=True) print('Hatebase absolute: Mean='+str(np.mean(reg_df['tweets_containing_slurs'])) + ' Variance='+str(np.var(reg_df['tweets_containing_slurs']))) print('Hatebase proportion: Mean='+str(np.mean(reg_df['hatebase_proportion'])) + ' Variance='+str(np.var(reg_df['hatebase_proportion']))) print('Perspective absolute: Mean='+str(np.mean(reg_df['tweets_flagged_perspective'])) + ' Variance='+str(np.var(reg_df['tweets_flagged_perspective']))) print('Perspective proportion: Mean='+str(np.mean(reg_df['perspective_proportion'])) + ' Variance='+str(np.var(reg_df['perspective_proportion']))) num_obs = len(reg_df) h_zeroes_abs = len(reg_df[reg_df['tweets_containing_slurs'] == 0]) h_zeroes_prop = len(reg_df[reg_df['hatebase_proportion'] == 0]) p_zeroes_abs = len(reg_df[reg_df['tweets_flagged_perspective'] == 0]) p_zeroes_prop = len(reg_df[reg_df['perspective_proportion'] == 0]) print(f'Total observations: {num_obs}') print(f'Hatebase absolute: {h_zeroes_abs} zeroes, {100*(h_zeroes_abs / num_obs)}%') print(f'Hatebase proportion: {h_zeroes_prop} zeroes, {100*(h_zeroes_prop / num_obs)}%') print(f'Perspective absolute: {p_zeroes_abs} zeroes, {100*(p_zeroes_abs / num_obs)}%') print(f'Perspective proportion: {p_zeroes_prop} zeroes, {100*(p_zeroes_prop / num_obs)}%') X = reg_df[['club_coefficient', 'ethnicity_non_white', 'result_L', 'pen', 'day_of_week_Monday', 'day_of_week_Tuesday', 'day_of_week_Wednesday', 'day_of_week_Thursday', 'day_of_week_Friday', 'day_of_week_Saturday', 'day_of_week_Sunday']] # X = reg_df[['club_coefficient', 'ethnicity_non_white', 'result_L', 'pen']] X = sm.add_constant(X) # ZINB for Hatebase absolute number of tweets (since mostly zeroes, and overdispersed) h_abs_model=reg_models.ZeroInflatedNegativeBinomialP(reg_df['tweets_containing_slurs'], X) h_abs_res = h_abs_model.fit_regularized() print(h_abs_res.summary()) # ZIGP for Hatebase proportion of tweets (since mostly zeroes, underdispersed) h_prop_model=reg_models.ZeroInflatedGeneralizedPoisson(reg_df['hatebase_proportion'], X) h_prop_res = h_prop_model.fit_regularized() print(h_prop_res.summary()) # ZINB for Perspective absolute number of tweets (since mostly zeroes, and overdispersed) p_abs_model=reg_models.ZeroInflatedNegativeBinomialP(reg_df['tweets_flagged_perspective'], X) p_abs_res = p_abs_model.fit_regularized() print(p_abs_res.summary()) # ZIGP for Perspective proportion of tweets (since mostly zeroes, underdispersed) p_prop_model=reg_models.ZeroInflatedGeneralizedPoisson(reg_df['perspective_proportion'], X) p_prop_res = p_prop_model.fit_regularized() print(p_prop_res.summary()) ###Output /usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1396: RuntimeWarning: invalid value encountered in log np.log(a1) - gammaln(endog + 1) - a2 / a1) /usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1684: RuntimeWarning: overflow encountered in exp return np.exp(linpred) /usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1394: RuntimeWarning: invalid value encountered in multiply a2 = mu + (a1 - 1) * endog /usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1394: RuntimeWarning: invalid value encountered in add a2 = mu + (a1 - 1) * endog /usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1393: RuntimeWarning: overflow encountered in multiply a1 = 1 + alpha * mu_p /usr/local/lib/python3.7/dist-packages/statsmodels/discrete/discrete_model.py:1396: RuntimeWarning: invalid value encountered in true_divide np.log(a1) - gammaln(endog + 1) - a2 / a1)
lessons/5 ETLPipelines/11_duplicatedata_exercise/11_duplicatedata_exercise-solution.ipynb
###Markdown Duplicate DataA data set might have duplicate data: in other words, the same record is represented multiple times. Sometimes, it's easy to find and eliminate duplicate data like when two records are exactly the same. At other times, like what was discussed in the video, duplicate data is hard to spot. Exercise 1From the World Bank GDP data, count the number of countries that have had a project totalamt greater than 1 billion dollars (1,000,000,000). To get the count, you'll have to remove duplicate data rows. ###Code import pandas as pd # read in the projects data set and do some basic wrangling projects = pd.read_csv('../data/projects_data.csv', dtype=str) projects.drop('Unnamed: 56', axis=1, inplace=True) projects['totalamt'] = pd.to_numeric(projects['totalamt'].str.replace(',', '')) projects['countryname'] = projects['countryname'].str.split(';', expand=True)[0] projects['boardapprovaldate'] = pd.to_datetime(projects['boardapprovaldate']) # TODO: filter the data frame for projects over 1 billion dollars # TODO: count the number of unique countries in the results projects[projects['totalamt'] > 1000000000]['countryname'].nunique() ###Output _____no_output_____ ###Markdown Exercise 2 (challenge)This exercise is more challenging. The projects data set contains data about Yugoslavia, which was an Eastern European country until 1992. Yugoslavia eventually broke up into 7 countries: Bosnia and Herzegovina, Croatia, Kosovo, Macedonia, Montenegro, Serbia, and Slovenia.But the projects dataset has some ambiguity in how it treats Yugoslavia and the 7 countries that came from Yugoslavia. Your task is to find Yugoslavia projects that are probably represented multiple times in the data set. ###Code # TODO: output all projects for the 'Socialist Federal Republic of Yugoslavia' # HINT: You can use the exact country name or use the pandas str.contains() method to search for Yugoslavia projects[projects['countryname'].str.contains('Yugoslavia')] ###Output _____no_output_____ ###Markdown Yugoslavia officially ended on [April 27th, 1992](https://en.wikipedia.org/wiki/Yugoslavia). In the code cell below, filter for projects with a 'boardapprovaldate' prior to April 27th, 1992 **and** with 'countryname' Bosnia and Herzegovina, Croatia, Kosovo, Macedonia, Serbia **or** Slovenia. You'll see there are a total of 12 projects in the data set that match this criteria. Save the results in the republics variable ###Code import datetime # TODO: filter the projects data set for project boardapprovaldate prior to April 27th, 1992 AND with countryname # of either 'Bosnia and Herzegovina', 'Croatia', 'Kosovo', 'Macedonia', 'Serbia', or 'Sovenia'. Store the # results in the republics variable republics = projects[(projects['boardapprovaldate'] < '1992, 4, 27') & ((projects['countryname'].str.contains('Bosnia')) | (projects['countryname'].str.contains('Croatia')) | (projects['countryname'].str.contains('Kosovo')) | (projects['countryname'].str.contains('Macedonia')) | (projects['countryname'].str.contains('Montenegro')) | (projects['countryname'].str.contains('Serbia')) | (projects['countryname'].str.contains('Slovenia')))][['regionname', 'countryname', 'lendinginstr', 'totalamt', 'boardapprovaldate', 'location', 'GeoLocID', 'GeoLocName', 'Latitude', 'Longitude', 'Country', 'project_name']].sort_values('boardapprovaldate') republics republics.countryname.unique() ###Output _____no_output_____ ###Markdown Are these projects also represented in the data labeled Yugoslavia? In the code cell below, filter for Yugoslavia projects approved between February 1st, 1980 and May 23rd, 1989 which are the minimum and maximum dates in the results above. Store the results in the yugoslavia variable.The goal is to see if there are any projects represented more than once in the data set. ###Code # Return Yugoslavia projects that might overlap with the other country projects yugoslavia = projects[(projects['countryname'].str.contains('Yugoslavia')) & (projects['boardapprovaldate'] >= '1980, 2, 1') & (projects['boardapprovaldate'] <= '1989, 5, 23')][['regionname', 'countryname', 'lendinginstr', 'totalamt', 'boardapprovaldate', 'location', 'GeoLocID', 'GeoLocName', 'Latitude', 'Longitude', 'Country', 'project_name']].sort_values('boardapprovaldate') yugoslavia.shape ###Output _____no_output_____ ###Markdown And as a final step, try to see if there are any projects in the republics variable and yugoslavia variable that could be the same project.There are multiple ways to do that. As a suggestion, find unique dates in the republics variable. Then separately find unique dates in the yugoslavia variable. Concatenate (ie append) the results together. And then count the number of times each date occurs in this list. If a date occurs twice, that means the same boardapprovaldate appeared in both the Yugoslavia data as well as in the republics data.You'll should find that there are three suspicious cases:* July 26th, 1983* March 31st, 1987* October 13th, 1987* May 23rd, 1989 ###Code import numpy as np # TODO: find the unique dates in the republics variable republic_unique_dates = republics['boardapprovaldate'].unique() # TODO: find the unique dates in the yugoslavia variable yugoslavia_unique_dates = yugoslavia['boardapprovaldate'].unique() # TODO: make a list of the results appending one list to the other dates = np.append(republic_unique_dates, yugoslavia_unique_dates) # TODO: print out the dates that appeared twice in the results unique_dates, count = np.unique(dates, return_counts=True) for i in range(len(unique_dates)): if count[i] == 2: print(unique_dates[i]) ###Output 1983-07-26 00:00:00+00:00 1987-03-31 00:00:00+00:00 1987-10-13 00:00:00+00:00 1989-05-23 00:00:00+00:00 ###Markdown ConclusionOn July 26th, 1983, for example, projects were approved for Bosnia and Herzegovina, Croatia, Macedonia, Slovenia, and Yugoslavia. The code below shows the projects for that date. You'll notice that Yugoslavia had two projects, one of which was called "Power Transmission Project (03) Energy Managem...". The projects in the other countries were all called "POWER TRANS.III". This looks like a case of duplicate data. What you end up doing with this knowledge would depend on the context. For example, if you wanted to get a true count for the total number of projects in the data set, should all of these projects be counted as one project? Run the code cell below to see the projects in question. ###Code import datetime # run this code cell to see the duplicate data pd.concat([yugoslavia[yugoslavia['boardapprovaldate'] == datetime.date(1983, 7, 26)], republics[republics['boardapprovaldate'] == datetime.date(1983, 7, 26)]]) ###Output _____no_output_____
Deep Learning/Assignments/Assignment 6/homework6.ipynb
###Markdown `X` is a `NxK` float matrix where each row (`X[i]`) corresponds to a data point. ###Code def multivariate_gaussian(X, mean, cov): d = X.shape[0] left_term = (2*np.pi)**(-d/2)*np.linalg.det(cov)**(-0.5) right_term = np.exp(-0.5*np.dot(np.dot((X-mean).T, np.linalg.inv(cov)), X-mean)) return left_term * right_term def gmm(X, n_classes, n_iter): n, d = X.shape # Weights (pi) pi = np.array([1./ n_classes] * n_classes) # Mean (mu) rand_idx = np.random.choice(n, n_classes, replace = False) mean = np.array(X[rand_idx, :]) # Covariance (sigma) cov = np.array([np.eye(d)] * n_classes) # Responsiblities (gamma) res = np.zeros((n, n_classes)) for _ in range(n_iter): # E-Step for i in range(n_classes): for j in range(n): res[j, i] = pi[i] * multivariate_gaussian(X[j], mean[i], cov[i]) res = (res.T / np.sum(res, axis = 1)).T # normalize # M-Step N_K = np.sum(res, axis=0) for i in range(n_classes): # Update mean mean[i] = 1. / N_K[i] * np.sum(res[:, i].reshape(n, 1) * X , axis=0) # Update covariance cov[i] = (1. / N_K[i]) * np.dot((res[:, i].reshape(n,1) * (X - mean[i]) ).T, X- mean[i]) # Update weights pi = N_K / np.sum(res) # Pick prediction with largest probability class_assignments = np.argmax(res, axis=1) return class_assignments, mean, cov # Hyper parameters num_classes = 3 num_iteration = 2000 class_assignments, mean, cov = gmm(X, num_classes, num_iteration) # You may want to tune the number of iterations ###Output _____no_output_____ ###Markdown Visualization: a Cross Section ###Code plt.figure(figsize=(9,4)) plt.subplot(121) for k in range(3): plt.scatter(X[class_assignments==k, 2], X[class_assignments==k, 1], s=2) plt.subplot(122) for k, class_name in enumerate(np.unique(Y)): plt.scatter(X[Y==class_name, 2], X[Y==class_name, 1], s=2) plt.savefig(f"figures/cs_{num_classes}_{num_iteration}.png") ###Output _____no_output_____ ###Markdown Visualization: PCA Projection ###Code evals, evecs = np.linalg.eigh(np.cov(X.T)) to_crd = lambda x: ((x-x.mean(axis=0))@evecs)[:,-2:] crds = to_crd(X) plt.figure(figsize=(9,4)) plt.subplot(121) for k in range(3): plt.scatter(crds[class_assignments==k, 0], crds[class_assignments==k, 1], s=2) plt.scatter(to_crd(mean)[:,0], to_crd(mean)[:,1], s=30, marker='+') plt.subplot(122) for k in np.unique(Y): plt.scatter(crds[Y==k, 0], crds[Y==k, 1], s=2) plt.savefig(f"figures/pca_{num_classes}_{num_iteration}.png") ###Output _____no_output_____
ingreso de datos.ipynb
###Markdown validacion de argumentos en una funcion ###Code def validacion(x=None, y=None): if x==None or y==None: print("tienes q ingresar los dos datos") return return x*y validacion() validacion(1) ###Output tienes q ingresar los dos datos ###Markdown cuando no se sabe la longitud de una lista podemos hacer esto.... ###Code def arg(*lista1): for i in lista1: print(i) x=arg=[1,5,6,8,12,14] x def diccionario(**dic): for i in dic: print(i) w=diccionario(nombre="samir",edad=25,carrera="sistemas") w ###Output nombre edad carrera ###Markdown COSAS Q SE TIENEN Q HACER AL INICIAR UN SISTEMA PAR EL USUARIO ###Code try: c=float(input("ingrese su edad")) print(c) except: print("no ingreso lo que se lel pidio") while(True): try: c=int(input("ingrese su edad")) print(c) except: print("ERROR vuelta a ingresar el dato pedido") else: print("Iniciaste secion perfectamente") break: ###Output _____no_output_____
notebooks/Chapter_24/01_Bivariate_Normal_Distribution.ipynb
###Markdown Bivariate Normal Distribution The multivariate normal distribution is defined in terms of a mean vector and a covariance matrix. The units of covariance are often hard to understand, as they are the product of the units of the two variables.Normalizing the covariance so that it is easier to interpret is a good idea. As you have seen in exercises, for jointly distributed random variables $X$ and $Y$ the *correlation* between $X$ and $Y$ is defined as$$r_{X,Y} ~ = ~ \frac{Cov(X, Y)}{\sigma_X\sigma_Y} ~ = ~ E\Big{(} \frac{X-\mu_X}{\sigma_X} \cdot \frac{Y-\mu_Y}{\sigma_Y} \Big{)}~ = ~ E(X^*Y^*)$$where $X^*$ is $X$ in standard units and $Y^*$ is $Y$ in standard units. Properties of Correlation You showed all of these in exercises.- $r_{X,Y}$ depends only on standard units and hence is a pure number with no units- $r_{X,Y} = r_{Y,X}$- $-1 \le r_{X,Y} \le 1$ - If $Y = aX + b$ then $r_{X,Y}$ is $1$ or $-1$ according to whether the sign of $a$ is positive or negative. We say that $r_{X,Y}$ measures the *linear association* between $X$ and $Y$. Variance of a Sum Rewrite the formula for correlation to see that $$Cov(X, Y) ~ = ~ r_{X,Y}\sigma_X\sigma_Y$$So the variance of $X+Y$ is$$\sigma_{X+Y}^2 ~ = ~ \sigma_X^2 + \sigma_Y^2 + 2r_{X,Y}\sigma_X\sigma_Y$$Notice the parallel with the formula for the length of the sum of two vectors, with correlation playing the role of the cosine of the angle between two vectors. If the angle is 90 degrees, the the cosine is 0. This corresponds to correlation being zero and hence the random variables being uncorrelated. We will visualize this idea in the case where the joint distribution of $X$ and $Y$ is bivariate normal. Standard Bivariate Normal Distribution Let $X$ and $Z$ be independent standard normal variables, that is, bivariate normal random variables with mean vector $\mathbf{0}$ and covariance matrix equal to the identity. Now fix a number $\rho$ (that's the Greek letter rho, the lower case r) so that $-1 < \rho < 1$, and let$$\mathbf{A} ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}$$Define a new random variable $Y = \rho X + \sqrt{1-\rho^2}Z$, and notice that$$\begin{bmatrix}X \\Y\end{bmatrix} ~ = ~\begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}X \\Z\end{bmatrix}~ = ~ \mathbf{A}\begin{bmatrix}X \\Z\end{bmatrix}$$So $X$ and $Y$ have the bivariate normal distribution with mean vector $\mathbf{0}$ and covariance matrix$$\mathbf{AIA}^T ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}1 & \rho \\0 & \sqrt{1 - \rho^2}\end{bmatrix}~ = ~ \begin{bmatrix}1 & \rho \\\rho & 1\end{bmatrix}$$We say that $X$ and $Y$ have the *standard bivariate normal distribution with correlation $\rho$*.The graph below shows the empirical distribution of 1000 $(X, Y)$ points in the case $\rho = 0.6$. You can change the value of $rho$ and see how the scatter diagram changes. It will remind you of numerous such simulations in Data 8. ###Code # Plotting parameters plt.figure(figsize=(5, 5)) plt.axes().set_aspect('equal') plt.xlabel('$X$') plt.ylabel('$Y$', rotation=0) plt.xticks(np.arange(-4, 4.1)) plt.yticks(np.arange(-4, 4.1)) # X, Z, and Y x = stats.norm.rvs(0, 1, size=1000) z = stats.norm.rvs(0, 1, size=1000) rho = 0.6 y = rho*x + np.sqrt((1-rho**2))*z plt.scatter(x, y, color='darkblue', s=10); ###Output _____no_output_____ ###Markdown Correlation as a Cosine We have defined$$Y ~ = ~ \rho X + \sqrt{1 - \rho^2} Z$$where $X$ and $Z$ are i.i.d. standard normal.Let's understand this construction geometrically. A good place to start is the joint density of $X$ and $Z$, which has circular symmetry. ###Code # HIDDEN Plot_bivariate_normal([0, 0], [[1, 0], [0, 1]]) plt.xlabel('$X$') plt.ylabel('$Z$') plt.gca().set_zlabel('$f(x, z)$') plt.title('Standard Bivariate Normal Distribution, Correlation = 0'); ###Output _____no_output_____ ###Markdown The $X$ and $Z$ axes are orthogonal. Let's see what happens if we twist them. Take any positive angle $\theta$ degrees and draw a new axis at angle $\theta$ to the original $X$ axis. Every point $(X, Z)$ has a *projection* onto this axis. The figure below shows the projection of the point $(X, Z) = (1, 2)$ onto the gold axis which is at an angle of $\theta$ degress to the $X$ axis. The blue segment is the value of $X$. You get that by dropping the perpendicular from $(1, 2)$ to the horizontal axis. That's called *projecting* $(1, 2)$ onto the horizontal axis. The red segment is the projection of $(1, 2)$ onto the gold axes, obtained by dropping the perpendicular from $(1, 2)$ to the gold axis.Vary the values of $\theta$ in the cell below to see how the projection changes as the gold axis rotates. ###Code theta = 20 projection_1_2(theta) ###Output _____no_output_____ ###Markdown Let $Y$ be the length of the red segment, and remember that $X$ is the length of the blue segment. When $\theta$ is very small, $Y$ is almost equal to $X$. When $\theta$ approaches 90 degrees, $Y$ is almost equal to $Z$.A little trigonometry shows that $Y ~ = ~ X \cos(\theta) + Z\sin(\theta)$. ###Code projection_trig() ###Output _____no_output_____ ###Markdown Thus$$Y ~ = ~ X\cos(\theta) + Z\sin(\theta) ~ = ~ \rho X + \sqrt{1 - \rho^2}Z$$where $\rho = \cos(\theta)$.The sequence of graphs below illustrates the transformation for $\theta = 30$ degrees. ###Code theta = 30 projection_1_2(theta) ###Output _____no_output_____ ###Markdown The bivariate normal distribution is the joint distribution of the blue and red lengths $X$ and $Y$ when the original point $(X, Z)$ has i.i.d. standard normal coordinates. This transforms the circular contours of the joint density surface of $(X, Z)$ into the elliptical contours of the joint density surface of $(X, Y)$. ###Code cos(theta), (3**0.5)/2 rho = cos(theta) Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]]) plt.title('Standard Bivariate Normal Distribution, Correlation = '+str(round(rho, 2))); ###Output _____no_output_____ ###Markdown Small $\theta$ As we observed earlier, when $\theta$ is very small there is hardly any change in the position of the axis. So $X$ and $Y$ are almost equal. ###Code theta = 2 projection_1_2(theta) ###Output _____no_output_____ ###Markdown The bivariate normal density of $X$ and $Y$, therefore, is essentially confined to the $X = Y$ line. The correlation $\cos(\theta)$ is large because $\theta$ is small; it is more than 0.999. You can see the plotting function having trouble rendering this joint density surface. ###Code rho = cos(theta) rho Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]]) ###Output _____no_output_____ ###Markdown Orthogonality and Independence When $\theta$ is 90 degrees, the gold axis is orthogonal to the $X$ axis and $Y$ is equal to $Z$ which is independent of $X$. ###Code theta = 90 projection_1_2(theta) ###Output _____no_output_____ ###Markdown Bivariate Normal Distribution The multivariate normal distribution is defined in terms of a mean vector and a covariance matrix. The units of covariance are often hard to understand, as they are the product of the units of the two variables.Normalizing the covariance so that it is easier to interpret is a good idea. As you have seen in exercises, for jointly distributed random variables $X$ and $Y$ the *correlation* between $X$ and $Y$ is defined as$$r_{X,Y} ~ = ~ \frac{Cov(X, Y)}{\sigma_X\sigma_Y} ~ = ~ E\Big{(} \frac{X-\mu_X}{\sigma_X} \cdot \frac{Y-\mu_Y}{\sigma_Y} \Big{)}~ = ~ E(X^*Y^*)$$where $X^\*$ is $X$ in standard units and $Y^\*$ is $Y$ in standard units. Properties of Correlation You showed all of these in exercises.- $r_{X,Y}$ depends only on standard units and hence is a pure number with no units- $r_{X,Y} = r_{Y,X}$- $-1 \le r_{X,Y} \le 1$ - If $Y = aX + b$ then $r_{X,Y}$ is $1$ or $-1$ according to whether the sign of $a$ is positive or negative. We say that $r_{X,Y}$ measures the *linear association* between $X$ and $Y$. Variance of a Sum Rewrite the formula for correlation to see that $$Cov(X, Y) ~ = ~ r_{X,Y}\sigma_X\sigma_Y$$So the variance of $X+Y$ is$$\sigma_{X+Y}^2 ~ = ~ \sigma_X^2 + \sigma_Y^2 + 2r_{X,Y}\sigma_X\sigma_Y$$Notice the parallel with the formula for the length of the sum of two vectors, with correlation playing the role of the cosine of the angle between two vectors. If the angle is 90 degrees, the the cosine is 0. This corresponds to correlation being zero and hence the random variables being uncorrelated. We will visualize this idea in the case where the joint distribution of $X$ and $Y$ is bivariate normal. Standard Bivariate Normal Distribution Let $X$ and $Z$ be independent standard normal variables, that is, bivariate normal random variables with mean vector $\mathbf{0}$ and covariance matrix equal to the identity. Now fix a number $\rho$ (that's the Greek letter rho, the lower case r) so that $-1 < \rho < 1$, and let$$\mathbf{A} ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}$$Define a new random variable $Y = \rho X + \sqrt{1-\rho^2}Z$, and notice that$$\begin{bmatrix}X \\Y\end{bmatrix} ~ = ~\begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}X \\Z\end{bmatrix}~ = ~ \mathbf{A}\begin{bmatrix}X \\Z\end{bmatrix}$$So $X$ and $Y$ have the bivariate normal distribution with mean vector $\mathbf{0}$ and covariance matrix$$\mathbf{AIA}^T ~ = ~ \begin{bmatrix}1 & 0 \\\rho & \sqrt{1 - \rho^2}\end{bmatrix}\begin{bmatrix}1 & \rho \\0 & \sqrt{1 - \rho^2}\end{bmatrix}~ = ~ \begin{bmatrix}1 & \rho \\\rho & 1\end{bmatrix}$$We say that $X$ and $Y$ have the *standard bivariate normal distribution with correlation $\rho$*.The graph below shows the empirical distribution of 1000 $(X, Y)$ points in the case $\rho = 0.6$. You can change the value of $rho$ and see how the scatter diagram changes. It will remind you of numerous such simulations in Data 8. ###Code # Plotting parameters plt.figure(figsize=(5, 5)) plt.axes().set_aspect('equal') plt.xlabel('$X$') plt.ylabel('$Y$', rotation=0) plt.xticks(np.arange(-4, 4.1)) plt.yticks(np.arange(-4, 4.1)) # X, Z, and Y x = stats.norm.rvs(0, 1, size=1000) z = stats.norm.rvs(0, 1, size=1000) rho = 0.6 y = rho*x + np.sqrt((1-rho**2))*z plt.scatter(x, y, color='darkblue', s=10); ###Output _____no_output_____ ###Markdown Correlation as a Cosine We have defined$$Y ~ = ~ \rho X + \sqrt{1 - \rho^2} Z$$where $X$ and $Z$ are i.i.d. standard normal.Let's understand this construction geometrically. A good place to start is the joint density of $X$ and $Z$, which has circular symmetry. ###Code # NO CODE Plot_bivariate_normal([0, 0], [[1, 0], [0, 1]]) plt.xlabel('$X$') plt.ylabel('$Z$') plt.gca().set_zlabel('$f(x, z)$') plt.title('Standard Bivariate Normal Distribution, Correlation = 0'); ###Output _____no_output_____ ###Markdown The $X$ and $Z$ axes are orthogonal. Let's see what happens if we twist them. Take any positive angle $\theta$ degrees and draw a new axis at angle $\theta$ to the original $X$ axis. Every point $(X, Z)$ has a *projection* onto this axis. The figure below shows the projection of the point $(X, Z) = (1, 2)$ onto the gold axis which is at an angle of $\theta$ degress to the $X$ axis. The blue segment is the value of $X$. You get that by dropping the perpendicular from $(1, 2)$ to the horizontal axis. That's called *projecting* $(1, 2)$ onto the horizontal axis. The red segment is the projection of $(1, 2)$ onto the gold axes, obtained by dropping the perpendicular from $(1, 2)$ to the gold axis.Vary the values of $\theta$ in the cell below to see how the projection changes as the gold axis rotates. ###Code theta = 20 projection_1_2(theta) ###Output _____no_output_____ ###Markdown Let $Y$ be the length of the red segment, and remember that $X$ is the length of the blue segment. When $\theta$ is very small, $Y$ is almost equal to $X$. When $\theta$ approaches 90 degrees, $Y$ is almost equal to $Z$.A little trigonometry shows that $Y ~ = ~ X \cos(\theta) + Z\sin(\theta)$. ###Code projection_trig() ###Output _____no_output_____ ###Markdown Thus$$Y ~ = ~ X\cos(\theta) + Z\sin(\theta) ~ = ~ \rho X + \sqrt{1 - \rho^2}Z$$where $\rho = \cos(\theta)$.The sequence of graphs below illustrates the transformation for $\theta = 30$ degrees. ###Code theta = 30 projection_1_2(theta) ###Output _____no_output_____ ###Markdown The bivariate normal distribution is the joint distribution of the blue and red lengths $X$ and $Y$ when the original point $(X, Z)$ has i.i.d. standard normal coordinates. This transforms the circular contours of the joint density surface of $(X, Z)$ into the elliptical contours of the joint density surface of $(X, Y)$. ###Code cos(theta), (3**0.5)/2 rho = cos(theta) Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]]) plt.title('Standard Bivariate Normal Distribution, Correlation = '+str(round(rho, 2))); ###Output _____no_output_____ ###Markdown Small $\theta$ As we observed earlier, when $\theta$ is very small there is hardly any change in the position of the axis. So $X$ and $Y$ are almost equal. ###Code theta = 2 projection_1_2(theta) ###Output _____no_output_____ ###Markdown The bivariate normal density of $X$ and $Y$, therefore, is essentially confined to the $X = Y$ line. The correlation $\cos(\theta)$ is large because $\theta$ is small; it is more than 0.999. You can see the plotting function having trouble rendering this joint density surface. ###Code rho = cos(theta) rho Plot_bivariate_normal([0, 0], [[1, rho], [rho, 1]]) ###Output _____no_output_____ ###Markdown Orthogonality and Independence When $\theta$ is 90 degrees, the gold axis is orthogonal to the $X$ axis and $Y$ is equal to $Z$ which is independent of $X$. ###Code theta = 90 projection_1_2(theta) ###Output _____no_output_____
DCGAN_model.ipynb
###Markdown Synthetic Image Generation with DCGANs in Keras 1. Import Libraries ###Code %matplotlib inline import tensorflow as tf from tensorflow import keras import numpy as np import plot_utils import matplotlib.pyplot as plt from tqdm import tqdm print('Tensorflow version:', tf.__version__) ###Output Tensorflow version: 2.2.0 ###Markdown 2. Loading data and Preprocessing the DataI'm using the inbuild MNIST fasion data from tensorflow dataset. ###Code (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() x_train = x_train.astype(np.float32) / 255.0 x_test = x_test.astype(np.float32) / 255.0 plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(x_train[i], cmap=plt.cm.binary) plt.show() ###Output _____no_output_____ ###Markdown 3. Creating Batches of Training Data For now experimenting with 32 batch size. ###Code batch_size = 32 # This dataset fills a buffer with buffer_size elements, #then randomly samples elements from this buffer, replacing the selected elements with new elements. dataset = tf.data.Dataset.from_tensor_slices(x_train).shuffle(1000) #Combines consecutive elements of this dataset into batches. dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1) #Creates a Dataset that prefetches elements from this dataset ###Output _____no_output_____ ###Markdown 4. Building the Generator Network model for DCGAN using Tf Sequential ###Code num_features = 100 generator = keras.models.Sequential([ keras.layers.Dense(7 * 7 * 128, input_shape=[num_features]), keras.layers.Reshape([7, 7, 128]), keras.layers.BatchNormalization(), keras.layers.Conv2DTranspose(64, (5,5), (2,2), padding="same", activation="selu"), keras.layers.BatchNormalization(), keras.layers.Conv2DTranspose(1, (5,5), (2,2), padding="same", activation="tanh"), ]) #generate random Noise noise = tf.random.normal(shape=[1, num_features]) generated_images = generator(noise, training=False) plot_utils.show(generated_images, 1) ###Output _____no_output_____ ###Markdown 5. Discriminator Network model for DCGAN ###Code discriminator = keras.models.Sequential([ keras.layers.Conv2D(64, (5,5), (2,2), padding="same", input_shape=[28, 28, 1]), keras.layers.LeakyReLU(0.2), keras.layers.Dropout(0.3), keras.layers.Conv2D(128, (5,5), (2,2), padding="same"), keras.layers.LeakyReLU(0.2), keras.layers.Dropout(0.3), keras.layers.Flatten(), keras.layers.Dense(1, activation='sigmoid') ]) decision = discriminator(generated_images) print(decision) ###Output tf.Tensor([[0.5008253]], shape=(1, 1), dtype=float32) ###Markdown 6. Compiling the Deep Convolutional Generative Adversarial Network (DCGAN) model made using Keras ###Code discriminator.compile(loss="binary_crossentropy", optimizer="rmsprop") discriminator.trainable = False gan = keras.models.Sequential([generator, discriminator]) gan.compile(loss="binary_crossentropy", optimizer="rmsprop") ###Output _____no_output_____ ###Markdown 7. Training Procedure is given below ###Code from IPython import display from tqdm import tqdm seed = tf.random.normal(shape=[batch_size, 100]) from tqdm import tqdm def train_dcgan(gan, dataset, batch_size, num_features, epochs=5): generator, discriminator = gan.layers for epoch in tqdm(range(epochs)): print("Epoch {}/{}".format(epoch + 1, epochs)) for X_batch in dataset: noise = tf.random.normal(shape=[batch_size, num_features]) generated_images = generator(noise) X_fake_and_real = tf.concat([generated_images, X_batch], axis=0) y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size) discriminator.trainable = True discriminator.train_on_batch(X_fake_and_real, y1) noise = tf.random.normal(shape=[batch_size, num_features]) y2 = tf.constant([[1.]] * batch_size) discriminator.trainable = False gan.train_on_batch(noise, y2) # Produce images for the GIF as we go display.clear_output(wait=True) generate_and_save_images(generator, epoch + 1, seed) display.clear_output(wait=True) generate_and_save_images(generator, epochs, seed) ## Source https://www.tensorflow.org/tutorials/generative/dcgan#create_a_gif def generate_and_save_images(model, epoch, test_input): # Notice `training` is set to False. # This is so all layers run in inference mode (batchnorm). predictions = model(test_input, training=False) fig = plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5, 5, i+1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='binary') plt.axis('off') plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() ###Output _____no_output_____ ###Markdown 8. Training a DCGAN model ###Code x_train_dcgan = x_train.reshape(-1, 28, 28, 1) * 2. - 1. batch_size = 32 dataset = tf.data.Dataset.from_tensor_slices(x_train_dcgan) dataset = dataset.shuffle(1000) dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1) %%time train_dcgan(gan, dataset, batch_size, num_features, epochs=10) ###Output _____no_output_____ ###Markdown 9. Generating Synthetic Images with DCGAN on Fation MNIST data ###Code noise = tf.random.normal(shape=[batch_size, num_features]) generated_images = generator(noise) plot_utils.show(generated_images, 8) ## Source: https://www.tensorflow.org/tutorials/generative/dcgan#create_a_gif import imageio import glob anim_file = '/content/dcgan.gif' with imageio.get_writer(anim_file, mode='I') as writer: filenames = glob.glob('image*.png') filenames = sorted(filenames) last = -1 for i,filename in enumerate(filenames): frame = 2*(i) if round(frame) > round(last): last = frame else: continue image = imageio.imread(filename) writer.append_data(image) image = imageio.imread(filename) writer.append_data(image) import IPython display.Image(filename=anim_file) ###Output _____no_output_____
Extract_tag.ipynb
###Markdown Test cases ###Code # test_case = ["I want a high school romance movie with a bit of fantasy adventure.", # "I want a sport theme drama.", # "I want a non-human Artificial Intelligence crime series.", # "I want a show with lots of actions and comedy and little romance.", # "I want a slice of life anime.", # "I want a show with lots of actions, comedy and Science Fiction.", # "I want an ojou-sama anime.", # "I want a action, romance, comedy show.", # "I want an action anime.", # "I want a horror anime.", test_case = ["I want a show with a lot of action and ninjas.", "I want a show with fighting robots.", "I want a show with crazy battles."] ###Output _____no_output_____ ###Markdown A method to get the tags we needed ###Code def seperate_genres(nlp): """ Takes a Spacy core language model, we use the model to process all the tag and category we have Arguments: nlp -- a spacy core language model Returns: {dict} -- a dictionary of category as the key and the set of tags associated with it as value. """ result = {"Genres": ["Action", "Adventure", "Comedy", "Drama", "Ecchi", "Fantasy", "Horror", "Mahou Shoujo", "Mecha", "Music", "Mystery", "Psychological" ,"Romance", "Science Fiction", "Slice Of Life", "Sports", "Supernatural", "Thriller"], "Cast-Main Cast": ["Anti-Hero", "Ensemble Cast", "Female Protagonist", "Male Protagonist", "Primarily Adult Cast", "Primarily Child Cast", "Primarily Female Cast", "Primarily Male Cast"], "Cast-Traits": ["Age Regression", "Agender", "Aliens", "Amnesia", "Angels", "Artificial Intelligence", "Asexual", "Butler", "Centaur", "Chimera", "Chuunibyou", "Cosplay", "Crossdressing", "Cyborg", "Delinquents", "Demons", "Detective", "Dinosaurs", "Dissociative Identities", "Dragons", "Dullahan", "Elf", "Ghost", "Goblin", "Gods", "Gyaru", "Hikikomori", "Idol", "Kemonomimi", "Kuudere", "Maids", "Mermaid", "Monster Boy", "Monster Girl", "Nekomimi", "Ninja", "Nudity", "Nun", "Office Lady", "Oiran", "Ojou-Sama", "Pirates", "Robots", "Samurai", "Shrine Maiden", "Skeleton", "Succubus", "Tanned Skin", "Teacher", "Tomboy" "Transgender", "Tsundere", "Twins", "Vampire", "Vikings", "Villainess", "VTuber", "Werewolf", "Witch", "Yandere", "Zombie"], "Demographic": ["Josei", "Kids", "Seinen", "Shoujo", "Shounen"], "Setting-Scene": ["Bar", "Circus", "College", "Dungeon", "Foreign", "Language Barrier", "Outdoor", "Rural", "School", "School Club", "UrbanWork"], "Setting-Time": ["Achronological Order", "Anachronism", "Dystopian", "Historical", "Time Skip"], "Setting-Universe": ["Afterlife", "Alternate Universe", "Augmented Reality", "Post-Apocalyptic", "Space", "Urban Fantasy", "Virtual World"], "Technical": ["4-Koma", "Achromatic", "Advertisement", "Anthology", "CGI", "Episodic", "Flash", "Full CGI", "Full Color", "No Dialogue", "POV", "Puppetry", "Rotoscoping", "Stop Motion"], "Theme-Action": ["Archery", "Battle Royale", "Espionage", "Fugitive", "Guns", "Martial Arts", "Swordplay"], "Theme-Arts": ["Acting", "Calligraphy", "Classic Literature", "Drawing", "Fashion", "Food", "Makeup", "Photography", "Rakugo", "Writing"], "Theme-Arts-Music": ["Band", "Dancing", "Musical"], "Theme-Comedy": ["Parody", "Satire", "Slapstick", "Surreal Comedy"], "Theme-Drama": ["Bullying", "Coming Of Age", "Conspiracy", "Rehabilitation", "Revenge", "Suicide", "Tragedy"], "Theme-Fantasy": ["Body Swapping", "Cultivation", "Fairy Tale", "Henshin", "Isekai", "Kaiju", "Magic", "Mythology", "Shapeshifting", "Steampunk", "Super Power", "Superhero", "Wuxia", "Youkai"], "Theme-Game": ["E-Sports", "Video Games"], "Theme-Game-Card & Board Game": ["Card Battle", "Go", "Karuta", "Mahjong", "Poker", "Shogi"], "Theme-Game-Sport": ["Airsoft", "American Football", "Athletics", "Badminton", "Baseball", "Basketball", "Boxing", "Cheerleading", "Cycling", "Fencing", "Fishing", "Fitness", "Football", "Golf", "Ice Skating", "Judo", "Lacrosse", "Parkour", "Rugby", "Scuba Diving", "Skateboarding", "Sumo", "Surfing", "Swimming", "Table Tennis", "Tennis", "Volleyball", "Wrestling"], "Theme-Other": ["Adoption", "Animals", "Astronomy", "Autobiographical", "Biographical", "Body Horror", "Cannibalism", "Chibi", "Cosmic Horror", "Crime", "Crossover", "Death Game", "Denpa", "Drugs", "Economics", "Educational", "Environmental", "Ero Guro", "Gambling", "Gender Bending", "Gore", "LGBTQ+ Themes", "Lost Civilization", "Medicine", "Memory Manipulation", "Meta", "Noir", "Otaku Culture", "Pandemic", "Philosophy", "Politics", "Reincarnation", "Religion", "Slavery", "Software Development", "Survival", "Terrorism", "Torture", "War"], "Theme-Other-Organisations": ["Assassins", "Cult", "Firefighters", "Gangs", "Mafia", "Military", "Police", "Triads", "Yakuza"], "Theme-Other-Vehicle": ["Aviation", "Cars", "Mopeds", "Motorcycles", "Ships", "Tanks", "Trains"], "Theme-Romance": ["Age Gap", "Bisexual", "Boys' Love", "Female Harem", "Heterosexual", "Love Triangle", "Male Harem", "Teens' Love", "Yuri"], "Theme-Sci Fi": ["Cyberpunk", "Space Opera", "Time Manipulation", "Tokusatsu"], "Theme-Sci Fi-Mecha": ["Real Robot", "Super Robot"], "Theme-Slice Of Life": ["Agriculture", "Cute Boys Doing Cute Things", "Cute Girls Doing Cute Things", "Family Life", "Iyashikei"]} # These tags are exception os that when we tokenize them and lemamtized them and combined them # back we don't add space in them exceptions = ["Anti-Hero", "Ojou-Sama", "Post-Apocalyptic", "4-Koma", "E-Sports"] exceptions_tag = [exception.lower() for exception in exceptions] # we go over each tag and we convert them to lower case and lemmatized them for category, tags in result.items(): new_list = [] for tag in tags: new_tag = "" # convert them to lower case tag = tag.lower() nlp_new_tag = nlp(tag) white_space = False # we lemmatized them for token in nlp_new_tag: if white_space is True: # the if and else statement is to make sure that we don't add space in the beginning # of the word new_tag += " " + token.lemma_ else: # we process the first word of the tag without adding the space new_tag += token.lemma_ if tag not in exceptions_tag: white_space = True # we add the lemmatized tag to the list and update the dict new_list.append(new_tag) result.update({category: new_list}) return result ###Output _____no_output_____ ###Markdown Printing the tag to check ###Code final_tags = seperate_genres(nlp) for key,value in final_tags.items(): print(key) print(value) ###Output Genres ['action', 'adventure', 'comedy', 'drama', 'ecchi', 'fantasy', 'horror', 'mahou shoujo', 'mecha', 'music', 'mystery', 'psychological', 'romance', 'science fiction', 'slice of life', 'sport', 'supernatural', 'thriller'] Cast-Main Cast ['anti-hero', 'ensemble cast', 'female protagonist', 'male protagonist', 'primarily adult cast', 'primarily child cast', 'primarily female cast', 'primarily male cast'] Cast-Traits ['age regression', 'agender', 'alien', 'amnesia', 'angel', 'artificial intelligence', 'asexual', 'butler', 'centaur', 'chimera', 'chuunibyou', 'cosplay', 'crossdressing', 'cyborg', 'delinquent', 'demon', 'detective', 'dinosaur', 'dissociative identity', 'dragon', 'dullahan', 'elf', 'ghost', 'goblin', 'god', 'gyaru', 'hikikomori', 'idol', 'kemonomimi', 'kuudere', 'maid', 'mermaid', 'monster boy', 'monster girl', 'nekomimi', 'ninja', 'nudity', 'nun', 'office lady', 'oiran', 'ojou-sama', 'pirate', 'robot', 'samurai', 'shrine maiden', 'skeleton', 'succubus', 'tanned skin', 'teacher', 'tomboytransgender', 'tsundere', 'twin', 'vampire', 'viking', 'villainess', 'vtuber', 'werewolf', 'witch', 'yandere', 'zombie'] Demographic ['josei', 'kid', 'seinen', 'shoujo', 'shounen'] Setting-Scene ['bar', 'circus', 'college', 'dungeon', 'foreign', 'language barrier', 'outdoor', 'rural', 'school', 'school club', 'urbanwork'] Setting-Time ['achronological order', 'anachronism', 'dystopian', 'historical', 'time skip'] Setting-Universe ['afterlife', 'alternate universe', 'augment reality', 'post-apocalyptic', 'space', 'urban fantasy', 'virtual world'] Technical ['4-koma', 'achromatic', 'advertisement', 'anthology', 'cgi', 'episodic', 'flash', 'full cgi', 'full color', 'no dialogue', 'pov', 'puppetry', 'rotoscope', 'stop motion'] Theme-Action ['archery', 'battle royale', 'espionage', 'fugitive', 'gun', 'martial art', 'swordplay'] Theme-Arts ['act', 'calligraphy', 'classic literature', 'draw', 'fashion', 'food', 'makeup', 'photography', 'rakugo', 'write'] Theme-Arts-Music ['band', 'dancing', 'musical'] Theme-Comedy ['parody', 'satire', 'slapstick', 'surreal comedy'] Theme-Drama ['bully', 'come of age', 'conspiracy', 'rehabilitation', 'revenge', 'suicide', 'tragedy'] Theme-Fantasy ['body swap', 'cultivation', 'fairy tale', 'henshin', 'isekai', 'kaiju', 'magic', 'mythology', 'shapeshifte', 'steampunk', 'super power', 'superhero', 'wuxia', 'youkai'] Theme-Game ['e-sport', 'video game'] Theme-Game-Card & Board Game ['card battle', 'go', 'karuta', 'mahjong', 'poker', 'shogi'] Theme-Game-Sport ['airsoft', 'american football', 'athletic', 'badminton', 'baseball', 'basketball', 'boxing', 'cheerleading', 'cycling', 'fence', 'fishing', 'fitness', 'football', 'golf', 'ice skating', 'judo', 'lacrosse', 'parkour', 'rugby', 'scuba diving', 'skateboard', 'sumo', 'surf', 'swimming', 'table tennis', 'tennis', 'volleyball', 'wrestling'] Theme-Other ['adoption', 'animal', 'astronomy', 'autobiographical', 'biographical', 'body horror', 'cannibalism', 'chibi', 'cosmic horror', 'crime', 'crossover', 'death game', 'denpa', 'drug', 'economic', 'educational', 'environmental', 'ero guro', 'gamble', 'gender bending', 'gore', 'lgbtq+ theme', 'lose civilization', 'medicine', 'memory manipulation', 'meta', 'noir', 'otaku culture', 'pandemic', 'philosophy', 'politic', 'reincarnation', 'religion', 'slavery', 'software development', 'survival', 'terrorism', 'torture', 'war'] Theme-Other-Organisations ['assassin', 'cult', 'firefighter', 'gang', 'mafia', 'military', 'police', 'triad', 'yakuza'] Theme-Other-Vehicle ['aviation', 'car', 'moped', 'motorcycle', 'ship', 'tank', 'train'] Theme-Romance ['age gap', 'bisexual', "boy ' love", 'female harem', 'heterosexual', 'love triangle', 'male harem', "teen ' love", 'yuri'] Theme-Sci Fi ['cyberpunk', 'space opera', 'time manipulation', 'tokusatsu'] Theme-Sci Fi-Mecha ['real robot', 'super robot'] Theme-Slice Of Life ['agriculture', 'cute boy do cute thing', 'cute girl do cute thing', 'family life', 'iyashikei'] ###Markdown Modifying nlp tokenizer ###Code # we add the tag to the tokenizer so that it process # the tag as one token for tags in final_tags.values(): for tag in tags: special_case = [{"ORTH": tag}] nlp.tokenizer.add_special_case(tag, special_case) def extract_keywords(nlp, sequence): """ Takes a Spacy core language model, a text that we need to process we use the model to process the sequence of text that was pass in Arguments: nlp -- a spacy core language model sequence -- the text that we want to process Returns: {dict} -- a dictionary of keyword as the key and a set of adjective or adverb use to quantify it. (Note theat the set contains words that we do not need so we'll process it afterward) """ result = {} pos_tag = ["PROPN", "NOUN", "ADJ", "VERB"] descriptive_tag = ["ADJ", "ADV"] doc = nlp(sequence.lower()) custom_stop_list = [ 'theme' ] for token in doc: # we ignore puctuation, stop_words(common word such as "is", "are", "what"...), # and word in our custom_stop_list if token in nlp.Defaults.stop_words or token.pos_ == "PUNCT" or token.text in custom_stop_list: continue # the word might be a keyword so we process it if token.pos_ in pos_tag: # we keep track of a list of adjective or adverb describing the keyword we're # looking at children_list = set() for child in token.children: # we're looking at the other token associate with the keyword we're # currently looking at if (child.pos_ in descriptive_tag): # if the other token is an adverb or an adjective we add it to the set # we also combine it to form a new word children_list.add(child.text) new_text = child.lemma_ + " " + token.lemma_ result.update({new_text: set()}) # this is to take into account for phrase such as "lot of" and "bit of" if token.dep_ == "pobj": if token.head.dep_ == "prep": children_list.add(token.head.head.text + " " + token.head.text) # we process words connected by conjuction such as and if token.dep_ == "conj": if len(children_list) == 0: # words that are connected by conjuction shouldn't have any # adjective or adverb describing it. If it does that meant # that it was an entirely different phrase and we shouldn't add it children_list.update(result[token.head.lemma_]) result.update({token.lemma_: children_list}) # print(token.text + ", " + token.pos_, children_list) for token in doc: if token.dep_ == "compound": if result.get(token.text) is not None: updated_value = result.get(token.text) if result.get(token.head.text) is not None: updated_value.update(result.get(token.head.text)) result.update({token.text: updated_value}) # from spacy import displacy # displacy.render(doc, style="dep") return result # we print out the keyword that was extracted from the test_case to see if # it's what we expected # for test in test_case: # print(extract_keywords(nlp, test)) def extract_tags(nlp, text, special_tags : dict = None): """ Takes a Spacy core language model, a text that we need to process, and a dictionary of tags we use the model to process the sequence of text that was pass in and go through the tags to see which are similar Arguments: nlp -- a spacy core language model sequence -- the text that we want to process special_tags -- the list of tags that we're looking for Returns: {dict} -- a dictionary of tags as the key and a value associated with it so that we only look if that tag has a rank of the value or above """ result_tag = {} result_media_type = [] keywords = extract_keywords(nlp, text) custom_stop_list = [ "anime", "manga", "movie", "ova", "ona" ] quatifier_descriptions = { nlp("a lot of"): 1, nlp("lots of"): 1, nlp("many") : 1, nlp("some"): 25, nlp("average"): 50, nlp("little"): 75, nlp("bit of"): 75, nlp("no"): 0, } for keyword, quantifiers in keywords.items(): # if the keyword is in the list # it's describing what type of media we want # such as manga, anime or other if keyword in custom_stop_list: result_media_type.append(keyword) continue # we use nlp to process the keyword nlp_keyword = nlp(keyword) search_rank_filter = 1; for quantifier in quantifiers: nlp_quantifier = nlp(quantifier) for description in quatifier_descriptions: if nlp_quantifier.has_vector and nlp_quantifier.similarity(description) > 0.9: search_rank_filter = quatifier_descriptions[description] # we loop through all the tags and see which one are similar for category, tags in special_tags.items(): for tag in tags: # we process the tag to be able to compare the word vector nlp_tag = nlp(tag) # we have a boolean value to see if the word we're comparing # have a word vector if yes we compare them should_compare = nlp_keyword.has_vector and nlp_tag.has_vector if should_compare and nlp_keyword.similarity(nlp_tag) > 0.8: result_tag.update({tag: search_rank_filter}) print("tag: ", search_rank_filter, nlp_tag.text, " - ", "text: ", nlp_keyword.text) return set(result_tag); special_tags = seperate_genres(nlp); for test in test_case: print(test) extract_tags(nlp, test, special_tags) print(nlp("bit of").similarity(nlp("little"))) print(nlp("a lot of").similarity(nlp("lots of"))) print(nlp("average").similarity(nlp("high"))) print(nlp("fight").similarity(nlp("action"))) ###Output 0.7395083689922443 0.8639847133169465 0.509374629883784 0.47812068532229546
data_preprocessing/5/Data_Preprocessing_Notebook.ipynb
###Markdown Data Preprocessing for Social Anxiety Detection : Participant 5 *** Participant Details__Gender:__ female __Ethnicity:__ asian __Age:__ 21 __Self-reported Liebowitz social anxiety score:__ 80 __Anxiety category:__ 2 *** Contents__1.Introduction __1.1. Nature of the dataset 1.2. Description of the ML experiments __2.Import packages ____3.Import data __3.1. Import HR data and resample 3.2. Import ST data 3.3. Import EDA data __4.Combine data ____5.Data labelling __5.1. Labelling for experiment (1) and (3) 5.2. Labelling for experiment (2) __6.Data visualisation and export__*** 1. IntroductionThis notebook preprocesses the physiological data needed for the supervised machine learning (ML) experiments that investigate whether subclinical social anxiety in young adults can be detected using physiological data obtained from wearable sensors. 1.1. Nature of the datasetThe dataset consists of Heart Rate (HR) data, Skin Temperature (ST) data and Electrodermal Activity (EDA) data. This physiological data was collected using an E4 Empatica wearable device. Using the default sampling rates of the E4, EDA was measured in microSiemens (μS) at 4 Hz using stainless steel electrodes positioned on the inner side of the wrist. HR was measured in Beats Per Minute (BPM) at 1 Hz using data derived from a Photoplethysmography sensor. ST was measured in degrees Celsius (°C) at 4 Hz using an infrared thermophile. 1.2. Description of the ML experiments__Experiment (1)__ investigates whether models can be trained to classify between baseline and socially anxious states. The data is either labelled '0' during the baseline period and '1' during the anxiety period (during anticipation and reactive anxiety).__Experiment (2)__ investigates whether models can be trained to differentiate between baseline, anticipation anxiety and reactive anxiety states. The data is labelled in three ways, '0' during the baseline period, '1' during the anticipation anxiety period and '2' during the reactive anxiety period.__Experiment (3)__ investigates whether models can be trained to classify between social anxiety experienced by individuals with differing social anxiety severity. The data was segregated based on scores reported using the self-reported version of Liebowitz Social Anxiety Scale (LSAS-SR), the data was is either labelled as '0' for individuals in anxiety category 1 (LSAS-SR:50-64) or labelled as '1' for individuals in anxiety category 2 (LSAS-SR:65-80).*** 2.Import packages ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns ###Output _____no_output_____ ###Markdown 3.Import and combine data 3.1. Import HR data and upsampleHR is imported and upsampled to 4Hz similar to ST and EDA. The data is then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting. ###Code hr = pd.read_csv("HR.csv") hr.index = pd.date_range('2020-03-04', periods = len(hr), freq='1S') #resampling HR to 4Hz hr_resample = hr.resample('0.25S').ffill() #Applying moving average filter rolling = hr_resample.rolling(window=9) hr_filtered = rolling.mean() #Plotting the comparison fig, (ax1, ax2) = plt.subplots(2, 1) hr_resample[2600:2700].plot( ax=ax1, legend=False, color = 'indigo') ax1.yaxis.set_label_text("HR (BPM)") ax1.xaxis.set_label_text('Time(min)') ax1.set_title("Resampled HR") ax1.grid(which='both', alpha=2) hr_filtered[2600:2700].plot( ax=ax2, legend=False, color = 'indigo') ax2.yaxis.set_label_text("HR (BPM)") ax2.xaxis.set_label_text('Time(min)') ax2.set_title("Resampled HR After Filtering") ax2.grid(which='both', alpha=2) fig.set_size_inches(15, 5) fig.subplots_adjust(hspace=0.7) plt.show() ###Output _____no_output_____ ###Markdown 3.2. Import ST data The ST data is imported and then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting. ###Code st = pd.read_csv("ST.csv") st.index = pd.date_range('2020-03-04', periods = len(st), freq='0.25S') #Applying moving average filter rolling = st.rolling(window=15) st_filtered = rolling.mean() #Plotting the comparison fig, (ax1, ax2) = plt.subplots(2, 1) st[2600:2700].plot( ax=ax1, legend=False, color = 'indigo') ax1.yaxis.set_label_text("ST (°C)") ax1.xaxis.set_label_text('Time(min)') ax1.set_title("Raw ST") ax1.grid(which='both', alpha=2) st_filtered[2600:2700].plot( ax=ax2, legend=False, color = 'indigo') ax2.yaxis.set_label_text("ST (°C)") ax2.xaxis.set_label_text('Time(min)') ax2.set_title("ST After Filtering") ax2.grid(which='both', alpha=2) fig.set_size_inches(15, 5) fig.subplots_adjust(hspace=0.7) plt.show() ###Output _____no_output_____ ###Markdown 3.3. Import EDA data The EDA data is imported and then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting. The EDA data is also range corrected in order to remove inter-individual differences, more details about the range correction method can be found in the paper. ###Code eda = pd.read_csv("EDA.csv") eda.index = pd.date_range('2020-03-04', periods = len(eda), freq='0.25S') #Applying moving average filter rolling = eda.rolling(window=15) eda_filtered = rolling.mean() #Range corrected EDA - value - min/max-min eda_corrected = (eda_filtered - 1.015)/(4.983-1.015) #Plotting the comparison fig, (ax1, ax2, ax3) = plt.subplots(3, 1) eda[2600:2800].plot( ax=ax1, legend=False, color = 'indigo') ax1.yaxis.set_label_text("EDA (μS)") ax1.xaxis.set_label_text('Time(min)') ax1.set_title("Raw EDA") ax1.grid(which='both', alpha=2) eda_filtered[2600:2800].plot( ax=ax2, legend=False, color = 'indigo') ax2.yaxis.set_label_text("EDA (μS)") ax2.xaxis.set_label_text('Time(min)') ax2.set_title("EDA After Filtering") ax2.grid(which='both', alpha=2) eda_corrected[2600:2800].plot( ax=ax3, legend=False, color = 'indigo') ax3.yaxis.set_label_text("EDA (μS)") ax3.xaxis.set_label_text('Time(min)') ax3.set_title("Range corrected EDA") ax3.grid(which='both', alpha=2) fig.set_size_inches(15, 6) fig.subplots_adjust(hspace=1.3) eda_filtered=eda_corrected plt.show() #eda[480:5846].min() #eda[480:5846].max() ###Output _____no_output_____ ###Markdown 4.Combine data ###Code df = pd.concat([hr_filtered, st_filtered, eda_filtered], ignore_index=True, axis = 1 ) df = df.T.reset_index(drop=True).T display(df.describe()) ###Output _____no_output_____ ###Markdown 5.Data labellingThe data was labelled for three different experiments. The anxiety duration in data cells etc. was calculated using a spreadsheet and the timestamps recorded during the experiments. ###Code #insert column specifically for labels df.insert(3,3,0) display(df.describe()) ###Output _____no_output_____ ###Markdown 5.1. Labelling for experiment (1) and (3)For experiment (1) the data was labelled '1' (allocated to the social anxiety class) from when the task was announced to when the task was finished. The first 2 minutes from the baseline period were also discarded to account for acclimisation, the data after the task was also discarded.For experiment (3) only the data in the anxious period (from task announcement to task end) was extracted and labelled. This individual falls into anxiety catergory 2 based on their LSAS-SR scores therefore their anxious data is labelled '1'. Data is then shuffled and a certain number of samples is taken. ###Code experiment_df = df #duration (labels) of anxiety duration (both anticipation and reactive, labelled '1') experiment_df[3][2647:5846] = 1 display(experiment_df[3].value_counts()) #removing the data after the task had ended experiment_df = experiment_df.drop(experiment_df.index[5846:]) #experiment 1 - removing the first 2 mins of the baseline period to account for acclimisation experiment1_df = experiment_df.drop(experiment_df.index[:480]) display(experiment1_df[3].value_counts()) experiment1_df.to_csv("experiment_1.csv") #experiment 3 - removing baseline period experiment3_df = experiment_df.drop(experiment_df.index[:2647]) display(experiment3_df[3].value_counts()) #shuffling and extracting a set number of samples idx = np.random.permutation(experiment3_df.index) shuffled = experiment3_df.reindex(idx, axis=0) shuffled = shuffled.reset_index(drop=True) shuffled = shuffled.drop(shuffled.index[1667:]) shuffled.to_csv("experiment_3.csv") ###Output C:\Users\Ruksana\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy after removing the cwd from sys.path. ###Markdown 5.2. Labelling for experiment (2) For experiment (2) the data was labelled '1' during the anticipation anxiety stage (task announcement to task start) and labelled '2' during the reactive anxiety stage (task start to task end). The first 2 minutes from the baseline period were also discarded to account for acclimisation, the data after the task was also discarded. ###Code experiment2_df = df #duration (labels) of task prep (anticipation anxiety duration, labelled '1') experiment2_df[3][2647:4766] = 1 #duration (labels) of task execution (reactive anxiety duration, labelled '2') experiment2_df[3][4766:5846] = 2 display(experiment2_df[3].value_counts()) #removing the data after the task had ended experiment2_df = experiment2_df.drop(experiment2_df.index[5846:]) #removing the first 2 mins of the baseline period to account for acclimisation experiment2_df = experiment2_df.drop(experiment2_df.index[:528]) display(experiment2_df[3].value_counts()) experiment2_df.to_csv("experiment_2.csv") ###Output C:\Users\Ruksana\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy after removing the cwd from sys.path. C:\Users\Ruksana\Anaconda3\lib\site-packages\ipykernel_launcher.py:6: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy ###Markdown 6.Data visualisationThe physiological data and experiment (1) and (2) labels were plotted. Pearson correlation matrices were also formulated for the dataset used in experiment (1) and (2). ###Code fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1) ax1.set_title('Combined Physiological Data and Experiment Labels (1 & 2)', fontsize = 15) experiment1_df[0].plot(ax=ax1, legend=False, color='indigo') ax1.yaxis.set_label_text("HR (BPM)") ax1.xaxis.set_label_text('Time(min)') ax1.grid(which='both', alpha=2) experiment1_df[1].plot(ax=ax2, legend=False, color='indigo') ax2.yaxis.set_label_text("ST (°C)") ax2.xaxis.set_label_text('Time(min)') ax2.grid(which='both', alpha=2) experiment1_df[2].plot(ax=ax3, legend=False, color='indigo') ax3.yaxis.set_label_text("Range Corrected EDA (μS)") ax3.xaxis.set_label_text('Time(min)') ax3.grid(which='both', alpha=2) experiment1_df[3].plot(ax=ax4, legend=False, color='indigo') ax4.yaxis.set_label_text("Experiment (1) labels") ax4.xaxis.set_label_text('Time(min)') ax4.grid(which='both', alpha=2) experiment2_df[3].plot(ax=ax5, legend=False, color='indigo') ax5.yaxis.set_label_text("Experiment (2) labels") ax5.xaxis.set_label_text('Time(min)') ax5.grid(which='both', alpha=2) fig.set_size_inches(15, 14) fig.subplots_adjust(hspace=0.4) plt.show() #Correlation matrix with Experiment 1 (binary labels) labeldata = ['HR', 'ST', 'EDA','Labels'] sns.heatmap(experiment1_df.corr(method = 'pearson'), vmin=0, vmax=1, annot=True, cmap="YlGnBu", yticklabels = labeldata, xticklabels =labeldata) fig = plt.gcf() #Correlation matrix with Experiment 2 (Mult-class labels) sns.heatmap(experiment2_df.corr(method = 'pearson'), vmin=0, vmax=1, annot=True, cmap="YlGnBu", yticklabels = labeldata, xticklabels =labeldata) fig = plt.gcf() ###Output _____no_output_____
data_visualization/01-exercise-hello-seaborn.ipynb
###Markdown **This notebook is an exercise in the [Data Visualization](https://www.kaggle.com/learn/data-visualization) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/hello-seaborn).**--- In this exercise, you will write your first lines of code and learn how to use the coding environment for the course! SetupFirst, you'll learn how to run code, and we'll start with the code cell below. (Remember that a **code cell** in a notebook is just a gray box containing code that we'd like to run.)- Begin by clicking inside the code cell. - Click on the blue triangle (in the shape of a "Play button") that appears to the left of the code cell.- If your code was run sucessfully, you will see `Setup Complete` as output below the cell.![ex0_run_code](https://i.imgur.com/4NzqJ7G.png) The code cell below imports and configures the Python libraries that you need to complete the exercise.Click on the cell and run it. ###Code import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns # Set up code checking import os if not os.path.exists("../input/fifa.csv"): os.symlink("../input/data-for-datavis/fifa.csv", "../input/fifa.csv") from learntools.core import binder binder.bind(globals()) from learntools.data_viz_to_coder.ex1 import * print("Setup Complete") ###Output _____no_output_____ ###Markdown The code you just ran sets up the system to give you feedback on your work. You'll learn more about the feedback system in the next step. Step 1: Explore the feedback systemEach exercise lets you test your new skills with a real-world dataset. Along the way, you'll receive feedback on your work. You'll see if your answer is right, get customized hints, and see the official solution (_if you'd like to take a look!_).To explore the feedback system, we'll start with a simple example of a coding problem. Follow the following steps in order:1. Run the code cell below without making any edits. It will show the following output: > Check: When you've updated the starter code, `check()` will tell you whether your code is correct. You need to update the code that creates variable `one` This means you need to change the code to set the variable `one` to something other than the blank provided below (`____`).2. Replace the underline with a `2`, so that the line of code appears as `one = 2`. Then, run the code cell. This should return the following output:> Incorrect: Incorrect value for `one`: `2` This means we still have the wrong answer to the question.3. Now, change the `2` to `1`, so that the line of code appears as `one = 1`. Then, run the code cell. The answer should be marked as Correct. You have now completed this problem! ###Code # Fill in the line below one = 1 # Check your answer step_1.check() ###Output _____no_output_____ ###Markdown In this exercise, you were responsible for filling in the line of code that sets the value of variable `one`. **Don't edit the code that checks your answer.** You'll need to run the lines of code like `step_1.check()` and `step_2.check()` just as they are provided.This problem was relatively straightforward, but for more difficult problems, you may like to receive a hint or view the official solution. Run the code cell below now to receive both for this problem. ###Code step_1.hint() step_1.solution() ###Output _____no_output_____ ###Markdown Step 2: Load the dataYou are ready to get started with some data visualization! You'll begin by loading the dataset from the previous tutorial. The code you need is already provided in the cell below. Just run that cell. If it shows Correct result, you're ready to move on! ###Code # Path of the file to read fifa_filepath = "../input/fifa.csv" # Read the file into a variable fifa_data fifa_data = pd.read_csv(fifa_filepath, index_col="Date", parse_dates=True) # Check your answer step_2.check() ###Output _____no_output_____ ###Markdown Next, recall the difference between comments and executable code:- **Comments** are preceded by a pound sign (``) and contain text that appear faded and italicized. They are completely ignored by the computer when the code is run.- **Executable code** is code that is run by the computer.In the code cell below, every line is a comment:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```If you run the code cell below without making any changes, it won't return any output. Try this now! ###Code # Uncomment the line below to receive a hint #step_2.hint() # Uncomment the line below to see the solution #step_2.solution() ###Output _____no_output_____ ###Markdown Next, remove the pound sign before `step_2.hint()` so that the code cell above appears as follows:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```When we remove the pound sign before a line of code, we say we **uncomment** the line. This turns the comment into a line of executable code that is run by the computer. Run the code cell now, which should return the Hint as output.Finally, uncomment the line to see the solution, so the code cell appears as follows:```python Uncomment the line below to receive a hintstep_2.hint()step_2.solution()```Then, run the code cell. You should receive both a Hint and the Solution.If at any point you're having trouble with coming up with the correct answer to a problem, you are welcome to obtain either a hint or the solution before completing the cell. (So, you don't need to get a Correct result before running the code that gives you a Hint or the Solution.) Step 3: Plot the dataNow that the data is loaded into the notebook, you're ready to visualize it! Run the next code cell without changes to make a line chart. The code may not make sense yet - you'll learn all about it in the next tutorial! ###Code # Set the width and height of the figure plt.figure(figsize=(16,6)) # Line chart showing how FIFA rankings evolved over time sns.lineplot(data=fifa_data) # Check your answer step_3.a.check() ###Output _____no_output_____ ###Markdown Some questions won't require you to write any code. Instead, you'll interpret visualizations.As an example, consider the question: Considering only the years represented in the dataset, which countries spent at least 5 consecutive years in the 1 ranked spot?To receive a Hint, uncomment the line below, and run the code cell. ###Code #step_3.b.hint() ###Output _____no_output_____ ###Markdown Once you have an answer, check the Solution to get credit for completing the problem and to ensure your interpretation is right. ###Code # Check your answer (Run this code cell to receive credit!) step_3.b.solution() ###Output _____no_output_____
ML_Retraining_Pipeline/Clean_Training_Data.ipynb
###Markdown Copyright (c) Microsoft Corporation. Licensed under the MIT license. Clean Training Data This notebook will clean the training dataset and load the cleaned data into a spark database for training the models. ###Code paths = ['abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/full_dataset/', 'abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/clickstream/processed/'] full_dataset = spark.read.parquet(*paths) # remove all null values from category and brand filtered_df = full_dataset.filter((full_dataset.category_code != 'null') & (full_dataset.brand != 'null')) #filter on construction and remove misplaced brands construction_df = filtered_df.filter((filtered_df.category_code.contains('construction')) & (filtered_df.brand != 'apple') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'oystercosmetics')& (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'polaris') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'maybellinenewyork') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'nokia') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'fila') & (filtered_df.brand != 'milanicosmetics') & (filtered_df.brand != 'shoesrepublic') &(filtered_df.brand != 'hp')&(filtered_df.brand != 'jbl')) #filter on electronics and remove misplaced brands electronic_df = filtered_df.filter((filtered_df.category_code.contains('electronics'))& (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma')) #filter on apparel and remove misplaced brands apparel_df = filtered_df.filter((filtered_df.category_code.contains('apparel')) & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'canon')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp')& (filtered_df.brand != 'nikon') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'x-digital') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'toysmax') & (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'playdoh') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'tv-shop') & (filtered_df.brand != 'xp-pen') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'm-audio') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'hyundai')) #filtered on computers and removed misplaced brands computer_df = filtered_df.filter((filtered_df.category_code.contains('computers')) & (filtered_df.brand != 'fila') & (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hotwheels') & (filtered_df.brand != 'taftoys') & (filtered_df.brand != 'barbi') & (filtered_df.brand != 'fitbit') & (filtered_df.brand != 'nike')) #filtered on appliances and removed misplaced brands appliance_df = filtered_df.filter((filtered_df.category_code.contains('appliances')) & (filtered_df.brand != 'fila')& (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'toshiba')& (filtered_df.brand != 'hp')& (filtered_df.brand != 'nokia')&(filtered_df.brand != 'hyundai')& (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'colorkid') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'logitech')) #filtered on auto and removed misplaced brands auto_df = filtered_df.filter((filtered_df.category_code.contains('auto')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp')) #filtered on furniture and removed misplaced brands furniture_df = filtered_df.filter((filtered_df.category_code.contains('furniture')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'lg')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hyundai')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'dell') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'newsuntoys') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') ) #filtered on kids and removed misplaced brands kids_df = filtered_df.filter((filtered_df.category_code.contains('kids')) & (filtered_df.brand != 'tefal')& (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'samsung')) #filtered on sports and removed misplaced brands sports_df = filtered_df.filter((filtered_df.category_code.contains('sport')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'hp') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'mersedes-benz') & (filtered_df.brand != 'toyland') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'ikea') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'bmw') & (filtered_df.brand != 'jeep') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'asus') & (filtered_df.brand != 'hyundai')) #filtered on country_yard and removed misplaced brands country_df = filtered_df.filter((filtered_df.category_code.contains('country_yard')) & (filtered_df.brand != 'nike')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'vans') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'columbia') & (filtered_df.brand != 'adidas')& (filtered_df.brand != 'apple')) #filtered on stationary and removed misplaced brands stationery_df = filtered_df.filter((filtered_df.category_code.contains('stationery')) & (filtered_df.brand !='hyundai') & (filtered_df.brand !='puma') & (filtered_df.brand !='nike') & (filtered_df.brand !='jeep') & (filtered_df.brand !='jaguar') & (filtered_df.brand !='toyota') & (filtered_df.brand !='shoesrepublic') & (filtered_df.brand !='tefal') & (filtered_df.brand !='fila')) #filtered on accessories and removed misplaced brands accessories_df = filtered_df.filter((filtered_df.category_code == 'accessories.umbrella') |(filtered_df.category_code == 'accessories.wallet') |(filtered_df.category_code == 'accessories.bag') &(filtered_df.brand != 'hyundai')) medicine_df = filtered_df.filter((filtered_df.category_code.contains('medicine')) & (filtered_df.brand != 'ikea')) # combine all the separated DataFrames into one to load into a table. df = medicine_df.union(accessories_df) df = df.union(stationery_df) df = df.union(country_df) df = df.union(sports_df) df = df.union(kids_df) df = df.union(furniture_df) df = df.union(auto_df) df = df.union(appliance_df) df = df.union(computer_df) df = df.union(apparel_df) df = df.union(electronic_df) df = df.union(construction_df) # load the cleaned data to a spark database df.write.saveAsTable("retailaidb.cleaned_dataset") ###Output _____no_output_____ ###Markdown Clean Training Data This notebook will clean the training dataset and load the cleaned data into a spark database for training the models. ###Code paths = ['abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/full_dataset/', 'abfss://{FILE_SYSTEM_NAME}@{DATA_LAKE_NAME}.dfs.core.windows.net/synapse/workspaces/clickstream/processed/'] full_dataset = spark.read.parquet(*paths) # remove all null values from category and brand filtered_df = full_dataset.filter((full_dataset.category_code != 'null') & (full_dataset.brand != 'null')) #filter on construction and remove misplaced brands construction_df = filtered_df.filter((filtered_df.category_code.contains('construction')) & (filtered_df.brand != 'apple') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'oystercosmetics')& (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'polaris') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'maybellinenewyork') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'nokia') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'fila') & (filtered_df.brand != 'milanicosmetics') & (filtered_df.brand != 'shoesrepublic') &(filtered_df.brand != 'hp')&(filtered_df.brand != 'jbl')) #filter on electronics and remove misplaced brands electronic_df = filtered_df.filter((filtered_df.category_code.contains('electronics'))& (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma')) #filter on apparel and remove misplaced brands apparel_df = filtered_df.filter((filtered_df.category_code.contains('apparel')) & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'canon')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp')& (filtered_df.brand != 'nikon') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'x-digital') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'toysmax') & (filtered_df.brand != 'houseofseasons') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'playdoh') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'tv-shop') & (filtered_df.brand != 'xp-pen') & (filtered_df.brand != 'philips') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'm-audio') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'hyundai')) #filtered on computers and removed misplaced brands computer_df = filtered_df.filter((filtered_df.category_code.contains('computers')) & (filtered_df.brand != 'fila') & (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'hotwheels') & (filtered_df.brand != 'taftoys') & (filtered_df.brand != 'barbi') & (filtered_df.brand != 'fitbit') & (filtered_df.brand != 'nike')) #filtered on appliances and removed misplaced brands appliance_df = filtered_df.filter((filtered_df.category_code.contains('appliances')) & (filtered_df.brand != 'fila')& (filtered_df.brand != 'shoesrepublic') & (filtered_df.brand != 'toshiba')& (filtered_df.brand != 'hp')& (filtered_df.brand != 'nokia')&(filtered_df.brand != 'hyundai')& (filtered_df.brand != 'moosetoys') & (filtered_df.brand != 'jaguar') & (filtered_df.brand != 'colorkid') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'toyota') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'logitech')) #filtered on auto and removed misplaced brands auto_df = filtered_df.filter((filtered_df.category_code.contains('auto')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hp')) #filtered on furniture and removed misplaced brands furniture_df = filtered_df.filter((filtered_df.category_code.contains('furniture')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'lg')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'hyundai')& (filtered_df.brand != 'sony') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'toshiba') & (filtered_df.brand != 'fujifilm') & (filtered_df.brand != 'tefal') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'dell') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'newsuntoys') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') ) #filtered on kids and removed misplaced brands kids_df = filtered_df.filter((filtered_df.category_code.contains('kids')) & (filtered_df.brand != 'tefal')& (filtered_df.brand != 'puma') & (filtered_df.brand != 'hp') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'nike') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'samsung')) #filtered on sports and removed misplaced brands sports_df = filtered_df.filter((filtered_df.category_code.contains('sport')) & (filtered_df.brand != 'philips')& (filtered_df.brand != 'hp') & (filtered_df.brand != 'canon') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'microsoft') & (filtered_df.brand != 'apple') & (filtered_df.brand != 'jbl') & (filtered_df.brand != 'nikon') & (filtered_df.brand != 'mersedes-benz') & (filtered_df.brand != 'toyland') & (filtered_df.brand != 'lg') & (filtered_df.brand != 'samsung') & (filtered_df.brand != 'ikea') & (filtered_df.brand != 'logitech') & (filtered_df.brand != 'bmw') & (filtered_df.brand != 'jeep') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'asus') & (filtered_df.brand != 'hyundai')) #filtered on country_yard and removed misplaced brands country_df = filtered_df.filter((filtered_df.category_code.contains('country_yard')) & (filtered_df.brand != 'nike')& (filtered_df.brand != 'samsung') & (filtered_df.brand != 'sony') & (filtered_df.brand != 'vans') & (filtered_df.brand != 'hyundai') & (filtered_df.brand != 'puma') & (filtered_df.brand != 'columbia') & (filtered_df.brand != 'adidas')& (filtered_df.brand != 'apple')) #filtered on stationary and removed misplaced brands stationery_df = filtered_df.filter((filtered_df.category_code.contains('stationery')) & (filtered_df.brand !='hyundai') & (filtered_df.brand !='puma') & (filtered_df.brand !='nike') & (filtered_df.brand !='jeep') & (filtered_df.brand !='jaguar') & (filtered_df.brand !='toyota') & (filtered_df.brand !='shoesrepublic') & (filtered_df.brand !='tefal') & (filtered_df.brand !='fila')) #filtered on accessories and removed misplaced brands accessories_df = filtered_df.filter((filtered_df.category_code == 'accessories.umbrella') |(filtered_df.category_code == 'accessories.wallet') |(filtered_df.category_code == 'accessories.bag') &(filtered_df.brand != 'hyundai')) medicine_df = filtered_df.filter((filtered_df.category_code.contains('medicine')) & (filtered_df.brand != 'ikea')) # combine all the separated DataFrames into one to load into a table. df = medicine_df.union(accessories_df) df = df.union(stationery_df) df = df.union(country_df) df = df.union(sports_df) df = df.union(kids_df) df = df.union(furniture_df) df = df.union(auto_df) df = df.union(appliance_df) df = df.union(computer_df) df = df.union(apparel_df) df = df.union(electronic_df) df = df.union(construction_df) # load the cleaned data to a spark database df.write.saveAsTable("retailaidb.cleaned_dataset") ###Output _____no_output_____
Control_Ops.ipynb
###Markdown Control Ops TutorialIn this tutorial we show how to use control flow operators in Caffe2 and give some details about their underlying implementations. Conditional Execution Using NetBuilderLet's start with conditional operator. We will demonstrate how to use it in two Caffe2 APIs used for building nets: `NetBuilder` and `brew`. ###Code from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import workspace from caffe2.python.core import Plan, to_execution_step, Net from caffe2.python.net_builder import ops, NetBuilder ###Output WARNING:root:This caffe2 python run does not have GPU support. Will run in CPU only mode. WARNING:root:Debug message: No module named caffe2_pybind11_state_gpu ###Markdown In the first example, we define several blobs and then use the 'If' operator to set the value of one of them conditionally depending on values of other blobs.The pseudocode for the conditional examples we will implement is as follows: if (x > 0): y = 1 else: y = 0 ###Code with NetBuilder() as nb: # Define our constants ops.Const(0.0, blob_out="zero") ops.Const(1.0, blob_out="one") ops.Const(0.5, blob_out="x") ops.Const(0.0, blob_out="y") # Define our conditional sequence with ops.IfNet(ops.GT(["x", "zero"])): ops.Copy("one", "y") with ops.Else(): ops.Copy("zero", "y") ###Output _____no_output_____ ###Markdown Note the usage of `NetBuilder`'s `ops.IfNet` and `ops.Else` calls: `ops.IfNet` accepts a blob reference or blob name as an input, it expects an input blob to have a scalar value convertible to bool. Note that the optional `ops.Else` is at the same level as `ops.IfNet` and immediately follows the corresponding `ops.IfNet`. Let's execute the resulting net (execution step) and check the values of the blobs.Note that since x = 0.5, which is indeed greater than 0, we should expect y = 1 after execution. ###Code # Initialize a Plan plan = Plan('if_net_test') # Add the NetBuilder definition above to the Plan plan.AddStep(to_execution_step(nb)) # Initialize workspace for blobs ws = workspace.C.Workspace() # Run the Plan ws.run(plan) # Fetch some blobs and print print('x = ', ws.blobs["x"].fetch()) print('y = ', ws.blobs["y"].fetch()) ###Output x = 0.5 y = 1.0 ###Markdown Before going further, it's important to understand the semantics of execution blocks ('then' and 'else' branches in the example above), i.e. handling of reads and writes into global (defined outside of the block) and local (defined inside the block) blobs.`NetBuilder` uses the following set of rules: - In `NetBuilder`'s syntax, a blob's declaration and definition occur at the same time - we define an operator which writes its output into a blob with a given name. - `NetBuilder` keeps track of all operators seen before the current execution point in the same block and up the stack in parent blocks. - If an operator writes into a previously unseen blob, it creates a **local** blob that is visible only within the current block and the subsequent children blocks. Local blobs created in a given block are effectively deleted when we exit the block. Any write into previously defined (in the same block or in the parent blocks) blob updates an originally created blob and does not result in the redefinition of a blob. - An operator's input blobs have to be defined earlier in the same block or in the stack of parent blocks. As a result, in order to see the values computed by a block after its execution, the blobs of interest have to be defined outside of the block. This rule effectively forces visible blobs to always be correctly initialized.To illustrate concepts of block semantics and provide a more sophisticated example, let's consider the following net: ###Code with NetBuilder() as nb: # Define our constants ops.Const(0.0, blob_out="zero") ops.Const(1.0, blob_out="one") ops.Const(2.0, blob_out="two") ops.Const(1.5, blob_out="x") ops.Const(0.0, blob_out="y") # Define our conditional sequence with ops.IfNet(ops.GT(["x", "zero"])): ops.Copy("x", "local_blob") # create local_blob using Copy -- this is not visible outside of this block with ops.IfNet(ops.LE(["local_blob", "one"])): ops.Copy("one", "y") with ops.Else(): ops.Copy("two", "y") with ops.Else(): ops.Copy("zero", "y") # Note that using local_blob would fail here because it is outside of the block in # which it was created ###Output _____no_output_____ ###Markdown When we execute this, we expect that y == 2.0, and that `local_blob` will not exist in the workspace. ###Code # Initialize a Plan plan = Plan('if_net_test_2') # Add the NetBuilder definition above to the Plan plan.AddStep(to_execution_step(nb)) # Initialize workspace for blobs ws = workspace.C.Workspace() # Run the Plan ws.run(plan) # Fetch some blobs and print print('x = ', ws.blobs["x"].fetch()) print('y = ', ws.blobs["y"].fetch()) # Assert that the local_blob does not exist in the workspace # It should have been destroyed because of its locality assert "local_blob" not in ws.blobs ###Output x = 1.5 y = 2.0 ###Markdown Conditional Execution Using Brew ModuleBrew is another Caffe2 interface used to construct nets. Unlike `NetBuilder`, `brew` does not track the hierarchy of blocks and, as a result, we need to specify which blobs are considered local and which blobs are considered global when passing 'then' and 'else' models to an API call.Let's start by importing the necessary items for the `brew` API. ###Code from caffe2.python import brew from caffe2.python.workspace import FeedBlob, RunNetOnce, FetchBlob from caffe2.python.model_helper import ModelHelper ###Output _____no_output_____ ###Markdown We will use the Caffe2's `ModelHelper` class to define and represent our models, as well as contain the parameter information about the models. Note that a `ModelHelper` object has two underlying nets: (1) param_init_net: Responsible for parameter initialization (2) net: Contains the main network definition, i.e. the graph of operators that the data flows throughNote that `ModelHelper` is similar to `NetBuilder` in that we define the operator graph first, and actually run later. With that said, let's define some models to act as conditional elements, and use the `brew` module to form the conditional statement that we want to run. We will construct the same statement used in the first example above. ###Code # Initialize model, which will represent our main conditional model for this test model = ModelHelper(name="test_if_model") # Add variables and constants to our conditional model; notice how we add them to the param_init_net model.param_init_net.ConstantFill([], ["zero"], shape=[1], value=0.0) model.param_init_net.ConstantFill([], ["one"], shape=[1], value=1.0) model.param_init_net.ConstantFill([], ["x"], shape=[1], value=0.5) model.param_init_net.ConstantFill([], ["y"], shape=[1], value=0.0) # Add Greater Than (GT) conditional operator to our model # which checks if "x" > "zero", and outputs the result in the "cond" blob model.param_init_net.GT(["x", "zero"], "cond") # Initialize a then_model, and add an operator which we will set to be # executed if the conditional model returns True then_model = ModelHelper(name="then_test_model") then_model.net.Copy("one", "y") # Initialize an else_model, and add an operator which we will set to be # executed if the conditional model returns False else_model = ModelHelper(name="else_test_model") else_model.net.Copy("zero", "y") # Use the brew module's handy cond operator to facilitate the construction of the operator graph brew.cond( model=model, # main conditional model cond_blob="cond", # blob with condition value external_blobs=["x", "y", "zero", "one"], # data blobs used in execution of conditional then_model=then_model, # pass then_model else_model=else_model) # pass else_model ###Output _____no_output_____ ###Markdown Before we run the model, let's use Caffe2's graph visualization tool `net_drawer` to check if the operator graph makes sense. ###Code from caffe2.python import net_drawer from IPython import display graph = net_drawer.GetPydotGraph(model.net, rankdir="LR") display.Image(graph.create_png(), width=800) ###Output _____no_output_____ ###Markdown Now let's run the net! When using `ModelHelper`, we must first run the `param_init_net` to initialize paramaters, then we execute the main `net`. ###Code # Run param_init_net once RunNetOnce(model.param_init_net) # Run main net (once in this case) RunNetOnce(model.net) # Fetch and examine some blobs print("x = ", FetchBlob("x")) print("y = ", FetchBlob("y")) ###Output x = [0.5] y = [1.] ###Markdown Loops Using NetBuilderAnother important control flow operator is 'While', which allows repeated execution of a fragment of net. Let's consider `NetBuilder`'s version first.The pseudocode for this example is: i = 0 y = 0 while (i <= 7): y = i + y i += 1 ###Code with NetBuilder() as nb: # Define our variables ops.Const(0, blob_out="i") ops.Const(0, blob_out="y") # Define loop code and conditions with ops.WhileNet(): with ops.Condition(): ops.Add(["i", ops.Const(1)], ["i"]) ops.LE(["i", ops.Const(7)]) ops.Add(["i", "y"], ["y"]) ###Output _____no_output_____ ###Markdown As with the 'If' operator, standard block semantic rules apply. Note the usage of `ops.Condition` clause that should immediately follow `ops.WhileNet` and contains code that is executed before each iteration. The last operator in the condition clause is expected to have a single boolean output that determines whether the other iteration is executed.In the example above we increment the counter ("i") before each iteration and accumulate its values in "y" blob, the loop's body is executed 7 times, the resulting blob values: ###Code # Initialize a Plan plan = Plan('while_net_test') # Add the NetBuilder definition above to the Plan plan.AddStep(to_execution_step(nb)) # Initialize workspace for blobs ws = workspace.C.Workspace() # Run the Plan ws.run(plan) # Fetch blobs and print print("i = ", ws.blobs["i"].fetch()) print("y = ", ws.blobs["y"].fetch()) ###Output i = 8 y = 28 ###Markdown Loops Using Brew ModuleNow let's take a look at how to replicate the loop above using the `ModelHelper`+`brew` interface. ###Code # Initialize model, which will represent our main conditional model for this test model = ModelHelper(name="test_while_model") # Add variables and constants to our model model.param_init_net.ConstantFill([], ["i"], shape=[1], value=0) model.param_init_net.ConstantFill([], ["one"], shape=[1], value=1) model.param_init_net.ConstantFill([], ["seven"], shape=[1], value=7) model.param_init_net.ConstantFill([], ["y"], shape=[1], value=0) # Initialize a loop_model that represents the code to run inside of loop loop_model = ModelHelper(name="loop_test_model") loop_model.net.Add(["i", "y"], ["y"]) # Initialize cond_model that represents the conditional test that the loop # abides by, as well as the incrementation step cond_model = ModelHelper(name="cond_test_model") cond_model.net.Add(["i", "one"], "i") cond_model.net.LE(["i", "seven"], "cond") # Use brew's loop operator to facilitate the creation of the loop's operator graph brew.loop( model=model, # main model that contains data cond_blob="cond", # explicitly specifying condition blob external_blobs=["cond", "i", "one", "seven", "y"], # data blobs used in execution of the loop loop_model=loop_model, # pass loop_model cond_model=cond_model # pass condition model (optional) ) ###Output _____no_output_____ ###Markdown Once again, let's visualize the net using the `net_drawer`. ###Code graph = net_drawer.GetPydotGraph(model.net, rankdir="LR") display.Image(graph.create_png(), width=800) ###Output _____no_output_____ ###Markdown Finally, we'll run the `param_init_net` and `net` and print our final blob values. ###Code RunNetOnce(model.param_init_net) RunNetOnce(model.net) print("i = ", FetchBlob("i")) print("y = ", FetchBlob("y")) ###Output i = [8] y = [28] ###Markdown BackpropagationBoth 'If' and 'While' operators support backpropagation. To illustrate how backpropagation with control ops work, let's consider the following examples in which we construct the operator graph using `NetBuilder` and obtain calculate gradients using the `AddGradientOperators` function. The first example shows the following conditional statement: x = 1-D numpy float array y = 4 z = 0 if (x > 0): z = y^2 else: z = y^3 ###Code import numpy as np # Feed blob called x, which is simply a 1-D numpy array [0.5] FeedBlob("x", np.array(0.5, dtype='float32')) # _use_control_ops=True forces NetBuilder to output single net as a result # x is external for NetBuilder, so we let nb know about it through initial_scope param with NetBuilder(_use_control_ops=True, initial_scope=["x"]) as nb: ops.Const(0.0, blob_out="zero") ops.Const(1.0, blob_out="one") ops.Const(4.0, blob_out="y") ops.Const(0.0, blob_out="z") with ops.IfNet(ops.GT(["x", "zero"])): ops.Pow("y", "z", exponent=2.0) with ops.Else(): ops.Pow("y", "z", exponent=3.0) # we should get a single net as output assert len(nb.get()) == 1, "Expected a single net produced" net = nb.get()[0] # add gradient operators for 'z' blob grad_map = net.AddGradientOperators(["z"]) ###Output _____no_output_____ ###Markdown In this case$$x = 0.5$$$$z = y^2 = 4^2 = 16$$We will fetch the blob `y_grad`, which was generated by the `AddGradientOperators` call above. This blob contains the gradient of blob z with respect to y. According to basic calculus:$$y\_grad = \frac{\partial{z}}{\partial{y}}y^2 = 2y = 2(4) = 8$$ ###Code # Run the net RunNetOnce(net) # Fetch blobs and print print("x = ", FetchBlob("x")) print("y = ", FetchBlob("y")) print("z = ", FetchBlob("z")) print("y_grad = ", FetchBlob("y_grad")) ###Output x = 0.5 y = 4.0 z = 16.0 y_grad = 8.0 ###Markdown Now, let's change value of blob "x" to -0.5 and rerun net: ###Code # To re-run net with different input, simply feed new blob FeedBlob("x", np.array(-0.5, dtype='float32')) RunNetOnce(net) print("x = ", FetchBlob("x")) print("y = ", FetchBlob("y")) print("z = ", FetchBlob("z")) print("y_grad = ", FetchBlob("y_grad")) ###Output x = -0.5 y = 4.0 z = 64.0 y_grad = 48.0 ###Markdown The next and final example illustrates backpropagation on the following loop: x = 2 y = 3 z = 2 i = 0 while (i <= 2): x = x^2 if (i < 2): y = y^2 else: z = z^3 i += 1 s = x + y + z Note that this code essentially computes the sum of x^4 (by squaring x twice), y^2, and z^3. ###Code with NetBuilder(_use_control_ops=True) as nb: # Define variables and constants ops.Copy(ops.Const(0), "i") ops.Copy(ops.Const(1), "one") ops.Copy(ops.Const(2), "two") ops.Copy(ops.Const(2.0), "x") ops.Copy(ops.Const(3.0), "y") ops.Copy(ops.Const(2.0), "z") # Define loop statement # Computes x^4, y^2, z^3 with ops.WhileNet(): with ops.Condition(): ops.Add(["i", "one"], "i") ops.LE(["i", "two"]) ops.Pow("x", "x", exponent=2.0) with ops.IfNet(ops.LT(["i", "two"])): ops.Pow("y", "y", exponent=2.0) with ops.Else(): ops.Pow("z", "z", exponent=3.0) # Sum s = x + y + z ops.Add(["x", "y"], "x_plus_y") ops.Add(["x_plus_y", "z"], "s") assert len(nb.get()) == 1, "Expected a single net produced" net = nb.get()[0] # Add gradient operators to output blob 's' grad_map = net.AddGradientOperators(["s"]) workspace.RunNetOnce(net) print("x = ", FetchBlob("x")) print("x_grad = ", FetchBlob("x_grad")) # derivative: 4x^3 print("y = ", FetchBlob("y")) print("y_grad = ", FetchBlob("y_grad")) # derivative: 2y print("z = ", FetchBlob("z")) print("z_grad = ", FetchBlob("z_grad")) # derivative: 3z^2 ###Output x = 16.0 x_grad = 32.0 y = 9.0 y_grad = 6.0 z = 8.0 z_grad = 12.0
dataset/dataset/KNN/KNN-Predict Diabetes_final.ipynb
###Markdown KNN - Predict whether a person will have diabetes or not ###Code import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score from sklearn.metrics import accuracy_score dataset = pd.read_csv('../Downloads/diabetes.csv') len(dataset) dataset.head() # Replace zeroes zero_not_accepted = ['Glucose', 'BloodPressure', 'SkinThickness', 'BMI', 'Insulin'] for column in zero_not_accepted: dataset[column] = dataset[column].replace(0, np.NaN) mean = int(dataset[column].mean(skipna=True)) dataset[column] = dataset[column].replace(np.NaN, mean) # split dataset X = dataset.iloc[:, 0:8] y = dataset.iloc[:, 8] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.2) print(len(X_train)) print(len(y_train)) print(len(X_test)) print(len(y_test)) #Feature scaling sc_X = StandardScaler() X_train = sc_X.fit_transform(X_train) X_test = sc_X.transform(X_test) # Define the model: Init K-NN classifier = KNeighborsClassifier(n_neighbors=11, p=2,metric='euclidean') # Fit Model classifier.fit(X_train, y_train) # Predict the test set results y_pred = classifier.predict(X_test) y_pred # Evaluate Model cm = confusion_matrix(y_test, y_pred) print (cm) print(f1_score(y_test, y_pred)) print(accuracy_score(y_test, y_pred)) ###Output 0.8181818181818182
Nocoes_de_Probabilidade_e_Estatistica.ipynb
###Markdown **Título:** Noções de Probabilidade e Estatística - 7ª Edição - 2015 **Autores:** Marcos Nascimento Magalhães e Antônio Carlos Pedroso de Lima **Editora:** Editora da Universidade de São Paulo Introdução à Análise Exploratória de Dados No primeiro capítulo o autor aborda rapidamente algumas aplicações da estatística e apresenta alguns exemplos cotidianos onde ela é usada. De modo geral a Estatística está dividia em 3 grande áreas, sendo elas:* Estatística Descritiva* Probabilidade* Inferência EstatísticaA **Estatística Descritiva** é, de certa forma, um conjunto de técnicas destinadas a descrever e resumir os dados para que assim possamos tirar conclusões a respeito de características de interesse.A **Probabilidade** pode ser exemplificada como a teoria matemática utilizada para se estudar a incerteza oriunda de fenômenos de caráter aleatório.A **Inferência Estatística** é a area que estuda as técnicas que possibilitam a extrapolação das informações e conclusões obtidas a partir de subconjunto de valores.Ao conjunto de dados que contem a característica de interesse é dado o nome de **População**. A população contem todos os indviduos de um determinado grupo. Dessa forma podemos exemplificar a população como:1. Todos os habitantes de uma determinada cidade/estado/país...;2. Todas as mesas produzidas por uma fábrica de móveis;3. Todo o sangue no corpo de uma pessoa...No entanto, algumas vezes não conseguimos acessar todos os indivíduos de uma população sendo necessário fazer o uso de uma pequena parcela desta para coletarmos os dados. Essa parcela é chamada de **Amostra**. A definição da parcela amostral é chamada de amostragem e ela tenta fornecer um subconjunto de valores o mais parecido possível com a população que lhe dá origem. ###Code Image(filename='images/populacao_amostra.jpg',width = 300, height = 150) ###Output _____no_output_____
master/_downloads/be71c9935575a01822eb555cbfbbb1a1/plot_debiased_barycenter.ipynb
###Markdown Debiased Sinkhorn barycenter demoThis example illustrates the computation of the debiased Sinkhorn barycenteras proposed in [37]_... [37] Janati, H., Cuturi, M., Gramfort, A. Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020 ###Code # Author: Hicham Janati <[email protected]> # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import os from pathlib import Path import numpy as np import matplotlib.pyplot as plt import ot from ot.bregman import (barycenter, barycenter_debiased, convolutional_barycenter2d, convolutional_barycenter2d_debiased) ###Output _____no_output_____ ###Markdown Debiased barycenter of 1D Gaussians ###Code n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n, m=60, s=8) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T n_distributions = A.shape[1] # loss matrix + normalization M = ot.utils.dist0(n) M /= M.max() alpha = 0.2 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) epsilons = [5e-3, 1e-2, 5e-2] bars = [barycenter(A, M, reg, weights) for reg in epsilons] bars_debiased = [barycenter_debiased(A, M, reg, weights) for reg in epsilons] labels = ["Sinkhorn barycenter", "Debiased barycenter"] colors = ["indianred", "gold"] f, axes = plt.subplots(1, len(epsilons), tight_layout=True, sharey=True, figsize=(12, 4), num=1) for ax, eps, bar, bar_debiased in zip(axes, epsilons, bars, bars_debiased): ax.plot(A[:, 0], color="k", ls="--", label="Input data", alpha=0.3) ax.plot(A[:, 1], color="k", ls="--", alpha=0.3) for data, label, color in zip([bar, bar_debiased], labels, colors): ax.plot(data, color=color, label=label, lw=2) ax.set_title(r"$\varepsilon = %.3f$" % eps) plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Debiased barycenter of 2D images ###Code this_file = os.path.realpath('__file__') data_path = os.path.join(Path(this_file).parent.parent.parent, 'data') f1 = 1 - plt.imread(os.path.join(data_path, 'heart.png'))[:, :, 2] f2 = 1 - plt.imread(os.path.join(data_path, 'duck.png'))[:, :, 2] A = np.asarray([f1, f2]) + 1e-2 A /= A.sum(axis=(1, 2))[:, None, None] ###Output _____no_output_____ ###Markdown Display the input images ###Code fig, axes = plt.subplots(1, 2, figsize=(7, 4), num=2) for ax, img in zip(axes, A): ax.imshow(img, cmap="Greys") ax.axis("off") fig.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Barycenter computation and visualization ###Code bars_sinkhorn, bars_debiased = [], [] epsilons = [5e-3, 7e-3, 1e-2] for eps in epsilons: bar = convolutional_barycenter2d(A, eps) bar_debiased, log = convolutional_barycenter2d_debiased(A, eps, log=True) bars_sinkhorn.append(bar) bars_debiased.append(bar_debiased) titles = ["Sinkhorn", "Debiased"] all_bars = [bars_sinkhorn, bars_debiased] fig, axes = plt.subplots(2, 3, figsize=(8, 6), num=3) for jj, (method, ax_row, bars) in enumerate(zip(titles, axes, all_bars)): for ii, (ax, img, eps) in enumerate(zip(ax_row, bars, epsilons)): ax.imshow(img, cmap="Greys") if jj == 0: ax.set_title(r"$\varepsilon = %.3f$" % eps, fontsize=13) ax.set_xticks([]) ax.set_yticks([]) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) if ii == 0: ax.set_ylabel(method, fontsize=15) fig.tight_layout() plt.show() ###Output _____no_output_____
Sk_Regresion_Lineal_Simple.ipynb
###Markdown ###Code !python --version import sklearn sklearn.__version__ ###Output _____no_output_____ ###Markdown Regresion Lineal Simple, aca nuestro modelo unicamente debe aprender dos constantes 0 = Interseccion y 1 = la pendiente. 1)Entrenando nuestro modelo $y= b_0 + b_1x$ 2)Importando nuestro archivo que esta guardado en google drive ###Code from google.colab import drive drive.mount('/content/drive') ###Output Mounted at /content/drive ###Markdown 3) Importando librerias al proyecto: numpy, pandas, matplotlib. ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression ###Output _____no_output_____ ###Markdown 4)Cargamos nuestro set de datos desde Drive que estan en excel. ###Code data = pd.read_excel("/content/drive/MyDrive/Colab_Notebooks/DataSets/ReduccionSolidosDemandaOxigeno.xlsx") ###Output _____no_output_____ ###Markdown 5)Consultando el contenido de la variable data. ###Code data #Visualizando los datos completos de la variable data. ###Output _____no_output_____ ###Markdown 6)Se guardara en una variable X la variable data Reduccion de solidos ###Code x = data[["Reduccion de solidos"]] x #Visualizando los datos de X ###Output _____no_output_____ ###Markdown 7)Se guardara en una variable Y la variable data Reduccion de la demanda de oxigeno ###Code y = data[["Reduccion de la demanda de oxigeno"]] y #Visualizando los datos de Y ###Output _____no_output_____ ###Markdown 8)Creando el grafico de dispersion de las variables de nuestro dataset. ###Code plt.scatter(x,y)#Creando el grafico con las variables de datos X,Y plt.xlabel("Reduccion de solidos") #Nombre de variable X plt.ylabel("Reduccion de la demanda de oxigeno") #Nombre de la variable Y plt.grid()#Creando el tipo de cuadricula plt.show()#Presentando el grafico ###Output _____no_output_____ ###Markdown 9)Convirtiendo el dataframe que tenemos a numpy con una matriz de datos ###Code matriz = data.to_numpy()#Creando la matriz de datos con numpy matriz #Consultando nuestra matriz de datos. ###Output _____no_output_____ ###Markdown 10)Calculando el valor de n con la matriz de datos. ###Code n = len(matriz) #Onteniendo el valor de n que es la cantidad de datos en la matriz sumatoria_x = np.sum(matriz[:,0]) sumatoria_y = np.sum(matriz[:,1]) sumatoria_producto = np.sum(matriz[:,0]*matriz[:,1]) sumatoria_cuadrado_x = np.sum(matriz[:,0]*matriz[:,0]) print("n:", n) print("sumatoria x:", sumatoria_x) print("sumatoria y:", sumatoria_y) print("sumatoria xy:", sumatoria_producto) print("sumatoria x^2:", sumatoria_cuadrado_x) ###Output n: 33 sumatoria x: 1104 sumatoria y: 1124 sumatoria xy: 41355 sumatoria x^2: 41086 ###Markdown 11)Realizando la sustitucion de valores obtenidos de la matriz en la ECU. ###Code b1 = (n*sumatoria_producto-sumatoria_x*sumatoria_y) / (n*sumatoria_cuadrado_x-sumatoria_x*sumatoria_x) b0 = (sumatoria_y-b1*sumatoria_x)/n print("b1:", b1) print("b0:", b0) ###Output b1: 0.9036432105793231 b0: 3.829633197588709 ###Markdown 12)Creando el modelo en scikit-learn ###Code clf = LinearRegression() #Creando nuestro modelo de regresion con Scikit-learn. ###Output _____no_output_____ ###Markdown 13) Entrenando el modelo creado. ###Code clf.fit(x,y) # Entrenando nuestro modelo con los datos de X y Y ###Output _____no_output_____ ###Markdown 14)Creando el entrenamiento de nuestro modelo. ###Code clf.coef_ #Esto nos da el valor de b1 ###Output _____no_output_____ ###Markdown 15)Obteniendo el valor de la intercept o b0 ###Code clf.intercept_ #Esto nos obtiene el valor de b0 que es el intercepto ###Output _____no_output_____ ###Markdown 16)Encontrando la prediccion en un valor del dataset ###Code clf.predict([[7]]) #Hacemos la prediccion con un valor obtenido del dataset en este caso el 7 para ver valores cercanos. clf.predict([[100]]) #Hacemos la prediccion con un valor obtenido del dataset en este caso el 100 para ver valores cercanos. ###Output /usr/local/lib/python3.7/dist-packages/sklearn/base.py:451: UserWarning: X does not have valid feature names, but LinearRegression was fitted with feature names "X does not have valid feature names, but" ###Markdown 17)Graficando los valores, para que pueda observarse la regresion lineal. ###Code plt.plot(x,y) #Graficando los valores de x,y plt.plot(x, clf.predict(x))#Se va a tratar de predecir toda la matriz de x plt.title("Regresion Lineal Simple") plt.xlabel("Reduccion de solidos") #Nombre de variable X plt.ylabel("Reduccion de la demanda de oxigeno") #Nombre de la variable Y plt.legend(["y", "Predicciones"]) #Y son los valores deseados y las predicciones plt.grid()#Creando el tipo de cuadricula plt.show()#Presentando el grafico ###Output _____no_output_____
DB_OpenData/Explore_With_py2neo_NetworkX.ipynb
###Markdown Explore DB-OpenData with py2neo and NetworkXIn this notebook we explore the carsharing data from Deutsche Bahn, they're migrated to a Neo4J graph database. For this we use two libraries: [py2neo](http://py2neo.org/v3/index.html) and [NetworkX](https://networkx.github.io). ###Code seperatingLine = "\n########################################################################################################\n" ###Output _____no_output_____ ###Markdown Access the object informations over the py2neo-APIA single record in a cursor as result of a cypher-query execution contains the informations about a single (or a list of) node(s)/relationship(s), depending on which type the result objects of the query have.For example the following query:```MATCH (v:VEHICLE:MITSUBISHI) RETURN *```In this case ist the result data (Cursor) a set of node objects. Each row in this cursor has records. A record contains in the top level a key/value map with a key, that called with the same variable-name, such that used in the query for the object (here "v" as variable-name of a vehicle-object). The value of this item represents the node informations and is from Type Node:```('v': (ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"}))```By an access to the value of the top level map, we access to an object of type Node:```pythonsub = record["v"]``````(ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"})```Because of this object type we can also access to the labels of the node:```pythonrecord["v"].labels()``````SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'})```With a conversion of the node to a dictionary it is possible to select the attributes of the node;```pythondict(record["v"])``````{'ownershipType': 'Langzeitmiete', 'modelName': 'i-Miev', 'modelDetails': 'ELEKTRO 35kW Automatik 4-Sitzer', 'fuelType': 'Strom', 'vin': 'JMBLDHA3WBU000341', 'registrationPlate': 'F-R 8009', 'vehicleID': 148221, 'kw': 35}```Alternatively we can access to the attributes with the following way:```pythonprint(record["v"]["modelName"])``````i-Miev```For the query `MATCH (v:VEHICLE:MITSUBISHI)-[r:WAS_BOOKED_IN]->(s:STATION) RETURN v, r, s` it will be returned three objects and one from type Relationship (variable r).By a relationship its possible to access following informations over the API:- Type of relationship `relationship.type()` `-> WAS_BOOKED_IN` - All nodes `relationship.nodes()` ``` ((a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}), (f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"})) ``` - Start or end node `relationship.start_node()` ``` (a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}) ``` `relationship.end_node()` ```(f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"}) ``` - Attributes of the relationship `relationship[times]` `28` ###Code def printOutNodeInformations(node, singleAttributeName): print('keys of the node') print(node.keys()) print('labels of the node') print(node.labels()) print('single attribute access') print(node[singleAttributeName]) def printOutRelationshipInformations(relationship, singleAttributeName): print('type of the relationship') print(relationship.type()) print('single attribute access') print(relationship[singleAttributeName]) print('all nodes of relationship') print(relationship.nodes()) print('start node of relationship') print(relationship.start_node()) print('end node of relationship') print(relationship.end_node()) from py2neo import Graph, Path, Subgraph, Node, PropertyDict, Relationship, Walkable, walk graph = Graph("http://neo4j:neo4jj@localhost:7474/db/data") query = """ MATCH (v:VEHICLE:MITSUBISHI) RETURN * """ cursor = graph.run(query) for record in cursor: print('raw view of a record:') print(record) print('value in the records top level map:') print(record["v"]) print('dictionary representation of the node attributes') print(dict(record["v"])) print('dictionary keys (node attribute names)') print('%s Node informations (VEHICLE) %s' %(seperatingLine, seperatingLine)) node = record["v"] printOutNodeInformations(node, "modelName") query = """ MATCH (v:VEHICLE:MITSUBISHI)-[r:WAS_BOOKED_IN]->(s:STATION) RETURN v, r, s """ cursor = graph.run(query) # print(cursor.data()) for record in cursor: print('%s Node informations (VEHICLE) %s' %(seperatingLine, seperatingLine)) vehicle = record["v"] printOutNodeInformations(vehicle, "modelName") print('%s Node informations (STATION) %s' %(seperatingLine, seperatingLine)) station = record["s"] printOutNodeInformations(station, "name") print('%s Relationship informations (WAS_BOOKED_IN) %s' %(seperatingLine, seperatingLine)) station = record["r"] printOutRelationshipInformations(station, "times") ###Output ######################################################################################################## Node informations (VEHICLE) ######################################################################################################## keys of the node dict_keys(['ownershipType', 'modelName', 'modelDetails', 'fuelType', 'vin', 'registrationPlate', 'vehicleID', 'kw']) labels of the node SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'}) single attribute access i-Miev ######################################################################################################## Node informations (STATION) ######################################################################################################## keys of the node dict_keys(['code', 'poiAirport', 'city', 'rentalZoneID', 'poiSuburbanTrains', 'latitude', 'name', 'longtitude', 'type', 'poiLongDistanceTrains', 'poiUnderground']) labels of the node SetView({'ACTIVE', 'STATION', 'STATIONBASED'}) single attribute access Wilhelmstraße-ELEKTRO ######################################################################################################## Relationship informations (WAS_BOOKED_IN) ######################################################################################################## type of the relationship WAS_BOOKED_IN single attribute access 791 all nodes of relationship ((ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"}), (b227752:ACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"WIL-ELEKTRO",latitude:48.8958625793457,longtitude:9.191786766052246,name:"Wilhelmstraße-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:403352,type:"stationbased"})) start node of relationship (ac742de:AUTO:MITSUBISHI:STROM:VEHICLE {fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8009",vehicleID:148221,vin:"JMBLDHA3WBU000341"}) end node of relationship (b227752:ACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"WIL-ELEKTRO",latitude:48.8958625793457,longtitude:9.191786766052246,name:"Wilhelmstraße-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:403352,type:"stationbased"}) ######################################################################################################## Node informations (VEHICLE) ######################################################################################################## keys of the node dict_keys(['ownershipType', 'modelName', 'modelDetails', 'fuelType', 'vin', 'registrationPlate', 'bordComputerType', 'vehicleID', 'kw']) labels of the node SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'}) single attribute access i-Miev ######################################################################################################## Node informations (STATION) ######################################################################################################## keys of the node dict_keys(['code', 'poiAirport', 'city', 'rentalZoneID', 'poiSuburbanTrains', 'latitude', 'name', 'longtitude', 'type', 'poiLongDistanceTrains', 'poiUnderground']) labels of the node SetView({'STATIONBASED', 'STATION', 'INACTIVE'}) single attribute access Bahnhof-ELEKTRO ######################################################################################################## Relationship informations (WAS_BOOKED_IN) ######################################################################################################## type of the relationship WAS_BOOKED_IN single attribute access 245 all nodes of relationship ((a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}), (dfa6823:INACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"BF-Elektro",latitude:48.891685485839844,longtitude:9.183795928955078,name:"Bahnhof-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:404993,type:"stationbased"})) start node of relationship (a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}) end node of relationship (dfa6823:INACTIVE:STATION:STATIONBASED {city:"Ludwigsburg",code:"BF-Elektro",latitude:48.891685485839844,longtitude:9.183795928955078,name:"Bahnhof-ELEKTRO",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:404993,type:"stationbased"}) ######################################################################################################## Node informations (VEHICLE) ######################################################################################################## keys of the node dict_keys(['ownershipType', 'modelName', 'modelDetails', 'fuelType', 'vin', 'registrationPlate', 'bordComputerType', 'vehicleID', 'kw']) labels of the node SetView({'AUTO', 'MITSUBISHI', 'VEHICLE', 'STROM'}) single attribute access i-Miev ######################################################################################################## Node informations (STATION) ######################################################################################################## keys of the node dict_keys(['code', 'poiAirport', 'city', 'rentalZoneID', 'poiSuburbanTrains', 'latitude', 'name', 'longtitude', 'type', 'poiLongDistanceTrains', 'poiUnderground']) labels of the node SetView({'STATIONBASED', 'STATION', 'INACTIVE'}) single attribute access Parkgarage Staatsgalerie ######################################################################################################## Relationship informations (WAS_BOOKED_IN) ######################################################################################################## type of the relationship WAS_BOOKED_IN single attribute access 28 all nodes of relationship ((a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}), (f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"})) start node of relationship (a34e56f:AUTO:MITSUBISHI:STROM:VEHICLE {bordComputerType:"Invers BCSA 2006 GPRS",fuelType:"Strom",kw:35,modelDetails:"ELEKTRO 35kW NAVI Automatik 4-Sitzer",modelName:"i-Miev",ownershipType:"Langzeitmiete",registrationPlate:"F-R 8011",vehicleID:148261,vin:"JMBLDHA3WBU000344"}) end node of relationship (f55f604:INACTIVE:STATION:STATIONBASED {city:"Stuttgart",code:"STG",latitude:48.780357360839844,longtitude:9.186469078063965,name:"Parkgarage Staatsgalerie",poiAirport:"Nein",poiLongDistanceTrains:"Nein",poiSuburbanTrains:"Nein",poiUnderground:"Nein",rentalZoneID:401727,type:"stationbased"}) ###Markdown Draw informations as graph over NetworkXIn the library networkX is every graph a set of edges, where each one of them connects two nodes. Every edge or node can also have attributes. In the following we prepare the data in the dataframe - as result of a trivial query - for this view about the rental-zone/vehicle data:```pythondfn['VEHICLE_ID'] = dfn.apply({'v' : lambda x: x["vehicleID"]})dfn['RENTALZONE_ID'] = dfn.apply({'s' : lambda x: x["rentalZoneID"]})dfn['vModelName'] = dfn.apply({'v' : lambda x: x["modelName"]})dfn['sName'] = dfn.apply({'s' : lambda x: x["name"]})dfn['TIMES'] = dfn.apply({'r' : lambda x: x["times"]})```Alternatively we can also access to the needed informations in this way:```pythondfn["v"]["vehicleID"]dfn["s"]["rentalZoneID"]dfn["r"]["times"]```NetworkX provides an easy way to import the data in a dataframe as edges and nodes:```pythonG2=nx.from_pandas_dataframe(dfn, 'VEHICLE_ID', 'RENTALZONE_ID', ['TIMES'])```The disadvantage of this approach is, that we can't import node attributes over this interface. This will also don't work, if we try to import the nodes over a explicit function, ```pythondef addVRZNodesToGraph(row, graph): graph.add_node(row["RENTALZONE_ID"],code=str(row["s"]["code"])) graph.add_node(row["VEHICLE_ID"],vin=str(row["v"]["vin"])) return graph...```... and import on this basis the data to the graph:```pythonG2=nx.Graph()dfn.apply(addVRZNodesToGraph, axis=1, graph=G2)print (G2.nodes(data=True))G2=nx.from_pandas_dataframe(dfn, 'VEHICLE_ID', 'RENTALZONE_ID', ['TIMES'])print (G.edges(data=True))print (G.nodes(data=True))```If we look at the result of the above code, we find out, that the nodes were overridden within the import operation:``` Node informations in the graph before import [(403352, {'code': 'WIL-ELEKTRO'}), (148221, {'vin': 'JMBLDHA3WBU000341'}), (404993, {'code': 'BF-Elektro'}), (148261, {'vin': 'JMBLDHA3WBU000344'}), (401727, {'code': 'STG'})] Node informations in the graph after import [(148221, {}), (403352, {}), (148261, {}), (404993, {}), (401727, {})] Edge informations in the graph after import [(148221, 403352, {'TIMES': '791'}), (148261, 404993, {'TIMES': '245'}), (148261, 401727, {'TIMES': '28'})]```Because of this side effect we use only the way to add nodes and edges manually to the graph:```pythondef addVRZEdgesToGraph(row, graph, relationshipType): graph.add_edge(row["VEHICLE_ID"], row["RENTALZONE_ID"],{'type': relationshipType, 'times': row["TIMES"]}) return graph...G=nx.Graph()dfn.apply(addVRZNodesToGraph, axis=1, graph=G)dfn.apply(addVRZEdgesToGraph, axis=1, graph=G, relationshipType='WAS_BOOKED_IN')...print('%s Node informations in the graph after import %s' %(seperatingLine, seperatingLine)) print (G.nodes(data=True))print('%s Edge informations in the graph after import %s' %(seperatingLine, seperatingLine)) print (G.edges(data=True))```The output of the code above is:``` Node informations in the graph after manual import [(403352, {'code': 'WIL-ELEKTRO'}), (148221, {'vin': 'JMBLDHA3WBU000341'}), (404993, {'code': 'BF-Elektro'}), (148261, {'vin': 'JMBLDHA3WBU000344'}), (401727, {'code': 'STG'})] Edge informations in the graph after manual import [(403352, 148221, {'type': 'WAS_BOOKED_IN', 'times': '791'}), (404993, 148261, {'type': 'WAS_BOOKED_IN', 'times': '245'}), (148261, 401727, {'type': 'WAS_BOOKED_IN', 'times': '28'})]```In our case we'll draw also labels for the nodes and edges. NetworkX provides in the functionality a way to pass the labels of nodes and edges as parameter. For the node labels most them exist as dict and for edges as simple list.Therefore we prepare the labels on the basis of the existing data as follows.We've two different types of nodes: Vehicle and rental zone. For this reason we must collect different informations as values for keys (IDs) from both entities. The id-namespace of both entities aren't overlapping in our case. We bring the IDs of both data in the first step together: ```pythondfnnl = dfn.drop(['r', 's', 'v', 'TIMES', 'vModelName', 'sName'], axis=1).copy(True)dfnnl = dfnnl.reset_index()dfnnl = dfnnl.drop("index", axis=1)dfnnl = dfnnl.stack()dfnnl = dfnnl.reset_index()dfnnl = dfnnl.rename_axis({"level_0": "levelName", "level_1": "columnName", 0: "ID"}, axis="columns")dfnnl["columnName"] = dfnnl["columnName"].astype(str)```In the second step we append a label column to the data frame with the specific label-information for each entity (vehicle or rental zone):```pythondef produceLabelInformation(row, orgData): label = " " if str(row["columnName"]) == 'VEHICLE_ID': label = orgData.loc[(orgData['VEHICLE_ID'] == row["ID"])] .drop_duplicates(subset=["vModelName"],keep="first")["vModelName"].values[0] else: label = orgData.loc[(orgData['RENTALZONE_ID'] == row["ID"])] .drop_duplicates(subset=["sName"],keep="first")["sName"].values[0] return label...dfnnl["LABEL"] = dfnnl.apply(produceLabelInformation, axis=1, orgData=dfn)print('%s IDs from vehicles and rental zones with label informations %s' %(seperatingLine, seperatingLine)) print(dfnnl)```The code above produces following output:``` IDs from vehicles and rental zones with label informations levelName columnName ID LABEL0 0 VEHICLE_ID 148221 i-Miev1 0 RENTALZONE_ID 403352 Wilhelmstraße-ELEKTRO2 1 VEHICLE_ID 148261 i-Miev3 1 RENTALZONE_ID 404993 Bahnhof-ELEKTRO5 2 RENTALZONE_ID 401727 Parkgarage Staatsgalerie```Because of the uniformity in the relationships between the nodes, we need only a constant label for each edge. Therefore it's enough to produce a list containing string members for a single label:```pythondef prepareEdgeLabelsForGraph(dfn): dfnel = dfn.apply({'r' : lambda x: 'WAS_BOOKED_IN'}) print('%s Edge labels %s' %(seperatingLine, seperatingLine)) dfnel = dfnel.reset_index() dfnel = dfnel.drop("index", axis=1) dfnel = dfnel.rename_axis({"r": "edgeLabel"}, axis="columns") print(dfnel) return dfnel ...dfnel = prepareEdgeLabelsForGraph(dfn)```The code above produces following output:``` Edge labels edgeLabel0 WAS_BOOKED_IN1 WAS_BOOKED_IN2 WAS_BOOKED_IN```We can now pass all informations together (graph and labels) to a simple function, that draws a graph as image:```pythonimport matplotlib.pyplot as pltimport networkx as nximport pandas as pd%matplotlib inlinedef draw_graph(graph, layout, edgeLabels, nodeLabels, name): edge_labels = dict(zip(graph.edges(), edgeLabels)) G = graph graph_pos = layout plt.figure(3,figsize=(30,30)) draw nodes, edges and labels nx.draw_networkx_nodes(G, graph_pos, node_size=15000, node_color='blue', alpha=0.3) we can now added edge thickness and edge color nx.draw_networkx_edges(G, graph_pos, width=5, alpha=0.3, edge_color='green') nx.draw_networkx_labels(G, graph_pos, nodeLabels, font_size=16, font_family='sans-serif') nx.draw_networkx_edge_labels(G, graph_pos, font_size=16, edge_labels=edge_labels) plt.savefig("graph_" + name + ".png", dpi=100, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1) plt.show()...draw_graph(G, nx.spring_layout(G, 2, 1), edgeLabels, nodeLabels, "spring")``` ###Code import matplotlib.pyplot as plt import networkx as nx import pandas as pd %matplotlib inline def draw_graph(graph, layout, edgeLabels, nodeLabels, name): edge_labels = dict(zip(graph.edges(), edgeLabels)) G = graph graph_pos = layout plt.figure(3,figsize=(30,30)) # draw nodes, edges and labels nx.draw_networkx_nodes(G, graph_pos, node_size=15000, node_color='blue', alpha=0.3) # we can now added edge thickness and edge color nx.draw_networkx_edges(G, graph_pos, width=5, alpha=0.3, edge_color='green') nx.draw_networkx_labels(G, graph_pos, nodeLabels, font_size=16, font_family='sans-serif') nx.draw_networkx_edge_labels(G, graph_pos, font_size=16, edge_labels=edge_labels) plt.savefig("graph_" + name + ".png", dpi=100, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1) plt.show() def addVRZNodesToGraph(row, graph): graph.add_node(row["RENTALZONE_ID"],code=str(row["s"]["code"])) graph.add_node(row["VEHICLE_ID"],vin=str(row["v"]["vin"])) return graph def addVRZEdgesToGraph(row, graph, relationshipType): graph.add_edge(row["VEHICLE_ID"], row["RENTALZONE_ID"],{'type': relationshipType, 'times': row["TIMES"]}) return graph def produceLabelInformation(row, orgData): label = " " if str(row["columnName"]) == 'VEHICLE_ID': label = orgData.loc[(orgData['VEHICLE_ID'] == row["ID"])]\ .drop_duplicates(subset=["vModelName"],keep="first")["vModelName"].values[0] else: label = orgData.loc[(orgData['RENTALZONE_ID'] == row["ID"])]\ .drop_duplicates(subset=["sName"],keep="first")["sName"].values[0] return label def prepareDataForGraph(df): dfn = df.reset_index() dfn = dfn.drop("index", axis=1) print('%s Original dataframe without index %s' %(seperatingLine, seperatingLine)) print(dfn.head()) dfn['VEHICLE_ID'] = dfn.apply({'v' : lambda x: x["vehicleID"]}) dfn['RENTALZONE_ID'] = dfn.apply({'s' : lambda x: x["rentalZoneID"]}) dfn['vModelName'] = dfn.apply({'v' : lambda x: x["modelName"]}) dfn['sName'] = dfn.apply({'s' : lambda x: x["name"]}) dfn['TIMES'] = dfn.apply({'r' : lambda x: x["times"]}) print('%s Extended dataframe %s' %(seperatingLine, seperatingLine)) print(dfn) return dfn def prepareNodeLabelsForGraph(dfn): dfnnl = dfn.drop(['r', 's', 'v', 'TIMES', 'vModelName', 'sName'], axis=1).copy(True) dfnnl = dfnnl.reset_index() dfnnl = dfnnl.drop("index", axis=1) dfnnl = dfnnl.stack() dfnnl = dfnnl.reset_index() dfnnl = dfnnl.rename_axis({"level_0": "levelName", "level_1": "columnName", 0: "ID"}, axis="columns") dfnnl["columnName"] = dfnnl["columnName"].astype(str) dfnnl = dfnnl.drop_duplicates(subset=["ID"],keep="first") # dfnnl.append(dfn.get(['RENTALZONE_ID']).copy(True), ignore_index=True) print('%s Stacked ids from vehicles and rental zones %s' %(seperatingLine, seperatingLine)) print(dfnnl) dfnnl["LABEL"] = dfnnl.apply(produceLabelInformation, axis=1, orgData=dfn) print('%s IDs from vehicles and rental zones with label informations %s' %(seperatingLine, seperatingLine)) print(dfnnl) return dfnnl def prepareEdgeLabelsForGraph(dfn): dfnel = dfn.apply({'r' : lambda x: 'WAS_BOOKED_IN'}) print('%s Edge labels %s' %(seperatingLine, seperatingLine)) dfnel = dfnel.reset_index() dfnel = dfnel.drop("index", axis=1) dfnel = dfnel.rename_axis({"r": "edgeLabel"}, axis="columns") print(dfnel) return dfnel # # Not recommend way to hold data to a graph # def importDataToGraph(dfn): G2=nx.Graph() dfn.apply(addVRZNodesToGraph, axis=1, graph=G2) print('%s Node informations in the graph before import %s' %(seperatingLine, seperatingLine)) print (G2.nodes(data=True)) G2=nx.from_pandas_dataframe(dfn, 'VEHICLE_ID', 'RENTALZONE_ID', ['TIMES']) print('%s Node informations in the graph after import %s' %(seperatingLine, seperatingLine)) print (G2.nodes(data=True)) print('%s Edge informations in the graph after import %s' %(seperatingLine, seperatingLine)) print (G2.edges(data=True)) return G2 # # Recommend way to hold data to a graph # def addDataToGraph(dfn): G=nx.Graph() dfn.apply(addVRZNodesToGraph, axis=1, graph=G) dfn.apply(addVRZEdgesToGraph, axis=1, graph=G, relationshipType='WAS_BOOKED_IN') print('%s Node informations in the graph after manual import %s' %(seperatingLine, seperatingLine)) print (G.nodes(data=True)) print('%s Edge informations in the graph after manual import %s' %(seperatingLine, seperatingLine)) print (G.edges(data=True)) return G data = graph.data(query) df = pd.DataFrame(data) dfn = prepareDataForGraph(df) dfnnl = prepareNodeLabelsForGraph(dfn) dfnel = prepareEdgeLabelsForGraph(dfn) # # Recommend way to hold data to a graph # G = addDataToGraph(dfn) # # Not recommend way to hold data to a graph # G2 = importDataToGraph(dfn) nodeLabels = dict(zip(dfnnl["ID"], dfnnl["LABEL"])) edgeLabels = dfnel["edgeLabel"].astype(str) print(nodeLabels) print(edgeLabels) draw_graph(G, nx.spring_layout(G, 2, 1), edgeLabels, nodeLabels, "spring") ###Output {148221: 'i-Miev', 403352: 'Wilhelmstraße-ELEKTRO', 148261: 'i-Miev', 404993: 'Bahnhof-ELEKTRO', 401727: 'Parkgarage Staatsgalerie'} 0 WAS_BOOKED_IN 1 WAS_BOOKED_IN 2 WAS_BOOKED_IN Name: edgeLabel, dtype: object
playbook/tactics/defense-evasion/T1070.001.ipynb
###Markdown T1070.001 - Indicator Removal on Host: Clear Windows Event LogsAdversaries may clear Windows Event Logs to hide the activity of an intrusion. Windows Event Logs are a record of a computer's alerts and notifications. There are three system-defined sources of events: System, Application, and Security, with five event types: Error, Warning, Information, Success Audit, and Failure Audit.The event logs can be cleared with the following utility commands:* wevtutil cl system* wevtutil cl application* wevtutil cl securityThese logs may also be cleared through other mechanisms, such as the event viewer GUI or [PowerShell](https://attack.mitre.org/techniques/T1059/001). Atomic Tests ###Code #Import the Module before running the tests. # Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts. Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force ###Output _____no_output_____ ###Markdown Atomic Test 1 - Clear LogsUpon execution this test will clear Windows Event Logs. Open the System.evtx logs at C:\Windows\System32\winevt\Logs and verify that it is now empty.**Supported Platforms:** windowsElevation Required (e.g. root or admin) Attack Commands: Run with `command_prompt````command_promptwevtutil cl System``` ###Code Invoke-AtomicTest T1070.001 -TestNumbers 1 ###Output _____no_output_____ ###Markdown Atomic Test 2 - Delete System Logs Using Clear-EventLogClear event logs using built-in PowerShell commands.Upon successful execution, you should see the list of deleted event logsUpon execution, open the Security.evtx logs at C:\Windows\System32\winevt\Logs and verify that it is now empty or has very few logs in it.**Supported Platforms:** windowsElevation Required (e.g. root or admin) Attack Commands: Run with `powershell````powershell$logs = Get-EventLog -List | ForEach-Object {$_.Log}$logs | ForEach-Object {Clear-EventLog -LogName $_ }Get-EventLog -list``` ###Code Invoke-AtomicTest T1070.001 -TestNumbers 2 ###Output _____no_output_____
use-cases/healthcare/breast_cancer/autopilot_xgboost_breast_cancer.ipynb
###Markdown Breast Cancer prediction with Amazon SageMaker Autopilot------ Contents1. [Introduction](Introduction)1. [Prerequisites](Prerequisites)1. [Downloading the dataset](Downloading)1. [Upload the dataset to Amazon S3](Uploading)1. [Setting up the SageMaker Autopilot Job](Settingup)1. [Launching the SageMaker Autopilot Job](Launching)1. [Tracking Sagemaker Autopilot Job Progress](Tracking)1. [Results](Results)1. [Cleanup](Cleanup) IntroductionAmazon SageMaker Autopilot is an automated machine learning (commonly referred to as AutoML) solution for tabular datasets. You can use SageMaker Autopilot in different ways: on autopilot (hence the name) or with human guidance, without code through SageMaker Studio, or using the AWS SDKs. This notebook, as a first glimpse, will use the AWS SDKs to simply create and deploy a machine learning model.The model lifecycle can be viewed below:![`SageMaker` Model Lifecycle](images/ml-concepts.png) add information about the datasetThis notebook demonstrates how you can use Autopilot on this dataset to get the most accurate ML pipeline through exploring a number of potential options, or "candidates". Each candidate generated by Autopilot consists of two steps. The first step performs automated feature engineering on the dataset and the second step trains and tunes an algorithm to produce a model. When you deploy this model, it follows similar steps. Feature engineering followed by inference, to decide whether the lead is worth pursuing or not. The notebook contains instructions on how to train the model as well as to deploy the model to perform breast cancer malignancy predictions. PrerequisitesBefore you start the tasks in this tutorial, do the following:- The Amazon Simple Storage Service (Amazon S3) bucket and prefix that you want to use for training and model data. This should be within the same Region as Amazon SageMaker training. The code below will create, or if it exists, use, the default bucket.- The IAM role to give Autopilot access to your data. See the Amazon SageMaker documentation for more information on IAM roles: https://docs.aws.amazon.com/sagemaker/latest/dg/security-iam.html ###Code import sagemaker # Define IAM role import boto3 import re from sagemaker import get_execution_role region = boto3.Session().region_name # Define IAM role role = get_execution_role() session = sagemaker.Session() #bucket = '' # <uncomment and change to your own bucket if you don't want to use the default bucket> bucket = session.default_bucket() print(sagemaker.Session().default_bucket()) prefix = 'sagemaker/bc/autopilot' # modify to your own path if desired sm = boto3.Session().client(service_name='sagemaker',region_name=region) # Import Libraries import numpy as np # For matrix operations and numerical processing import pandas as pd # For munging tabular data import matplotlib.pyplot as plt # For charts and visualizations from IPython.display import Image # For displaying images in the notebook from IPython.display import display # For displaying outputs in the notebook import time # For labeling SageMaker models, endpoints, etc. from time import gmtime, strftime # For labeling SageMaker models, endpoints, etc. import sys # For writing outputs to notebook import math # For ceiling function import json # For parsing hosting outputs import os # For manipulating filepath names import zipfile # Amazon SageMaker's Python SDK provides many helper functions ###Output _____no_output_____ ###Markdown --- Data WranglingFor this illustration, we will continue using UCI'S breast cancer diagnostic data set available at https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29. The data set is also available on Kaggle at https://www.kaggle.com/uciml/breast-cancer-wisconsin-data. The purpose here is to use this data set to build a predictve model of whether a breast mass image indicates benign or malignant tumor. Upload the dataset to Amazon S3Before you run Autopilot on the dataset, first perform a check of the dataset to make sure that it has no obvious errors. The Autopilot process can take long time, and it's generally a good practice to inspect the dataset before you start a job. This particular dataset is small, so you can inspect it in the notebook instance itself. If you have a larger dataset that will not fit in a notebook instance memory, inspect the dataset offline using a big data analytics tool like Apache Spark. [Deequ](https://github.com/awslabs/deequ) is a library built on top of Apache Spark that can be helpful for performing checks on large datasets. Autopilot is capable of handling datasets up to 5 GB. ###Code !wget https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data ###Output --2021-10-13 06:52:32-- https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data Resolving archive.ics.uci.edu (archive.ics.uci.edu)... 128.195.10.252 Connecting to archive.ics.uci.edu (archive.ics.uci.edu)|128.195.10.252|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 124103 (121K) [application/x-httpd-php] Saving to: ‘wdbc.data’ wdbc.data 100%[===================>] 121.19K 273KB/s in 0.4s 2021-10-13 06:52:34 (273 KB/s) - ‘wdbc.data’ saved [124103/124103] ###Markdown Read the data into a Pandas data frame and take a look. ###Code col_names = ["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean", "compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean", "radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se", "concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst", "perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst", "concave points_worst","symmetry_worst","fractal_dimension_worst"] breastcancer = pd.read_csv('./wdbc.data', header=None, names=col_names) pd.set_option('display.max_columns', 50) # Make sure we can see all of the columns pd.set_option('display.max_rows', 10) # Keep the output on one page breastcancer ###Output _____no_output_____ ###Markdown Note that there are 20 features to help predict the target column 'y'.Amazon SageMaker Autopilot takes care of preprocessing your data for you. You do not need to perform conventional data preprocssing techniques such as handling missing values, converting categorical features to numeric features, scaling data, and handling more complicated data types.Moreover, splitting the dataset into training and validation splits is not necessary. Autopilot takes care of this for you. You may, however, want to split out a test set. That's next, although you use it for batch inference at the end instead of testing the model. We will drop the Id Column ###Code breastcancer = breastcancer.drop(['id'], axis=1) breastcancer breastcancer.diagnosis = pd.Categorical(breastcancer.diagnosis).codes breastcancer.head() ###Output _____no_output_____ ###Markdown Reserve some data for calling batch inference on the modelDivide the data into training and testing splits. The training split is used by SageMaker Autopilot. The testing split is reserved to perform inference using the suggested model. ###Code train_data = breastcancer.sample(frac=0.9,random_state=200) test_data = breastcancer.drop(train_data.index) test_data_no_target = breastcancer.drop(columns=['diagnosis']) ###Output _____no_output_____ ###Markdown Upload the dataset to Amazon S3Copy the file to Amazon Simple Storage Service (Amazon S3) in a .csv format for Amazon SageMaker training to use. ###Code train_file = 'train_data.csv'; train_data.to_csv(train_file, index=False, header=True) # train_data_s3_path = session.upload_data(path=train_file, key_prefix=prefix + "/train") # print('Train data uploaded to: ' + train_data_s3_path) autopilot_train_s3_uri = session.upload_data(bucket=bucket, key_prefix=prefix, path=train_file) autopilot_train_s3_uri test_file = 'test_data.csv'; test_data_no_target.to_csv(test_file, index=False, header=False) test_data_s3_path = session.upload_data(path=test_file, key_prefix=prefix + "/test") print('Test data uploaded to: ' + test_data_s3_path) from IPython.core.display import display, HTML display(HTML('<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/sagemaker/bc/autopilot/">uploaded files</a> in S3 bucket</b>'.format(bucket, prefix, {}))) ###Output _____no_output_____ ###Markdown Setting up the SageMaker Autopilot JobAfter uploading the dataset to Amazon S3, you can invoke Autopilot to find the best ML pipeline to train a model on this dataset. The required inputs for invoking a Autopilot job are:* Amazon S3 location for input dataset and for all output artifacts* Name of the column of the dataset you want to predict (`y` in this case) * An IAM roleCurrently Autopilot supports only tabular datasets in CSV format. Either all files should have a header row, or the first file of the dataset, when sorted in alphabetical/lexical order, is expected to have a header row. ###Code input_data_config = [{ 'DataSource': { 'S3DataSource': { 'S3DataType': 'S3Prefix', 'S3Uri': 's3://{}/{}/train'.format(bucket,prefix) } }, 'TargetAttributeName': 'diagnosis' } ] output_data_config = { 'S3OutputPath': 's3://{}/{}/output'.format(bucket,prefix) } autoMLJobConfig={ 'CompletionCriteria': { 'MaxCandidates': 5 } } autoMLJobObjective = { "MetricName": "Accuracy" } from IPython.core.display import display, HTML display(HTML('<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/sagemaker/bc/autopilot/">No output bucket created yet</a> in S3 bucket</b>'.format(bucket, prefix, {}))) ###Output _____no_output_____ ###Markdown You can also specify the type of problem you want to solve with your dataset (`Regression, MulticlassClassification, BinaryClassification`). In case you are not sure, SageMaker Autopilot will infer the problem type based on statistics of the target column (the column you want to predict). You have the option to limit the running time of a SageMaker Autopilot job by providing either the maximum number of pipeline evaluations or candidates (one pipeline evaluation is called a `Candidate` because it generates a candidate model) or providing the total time allocated for the overall Autopilot job. Under default settings, this job takes about four hours to run. This varies between runs because of the nature of the exploratory process Autopilot uses to find optimal training parameters. Launching the SageMaker Autopilot JobYou can now launch the Autopilot job by calling the `create_auto_ml_job` API. https://docs.aws.amazon.com/cli/latest/reference/sagemaker/create-auto-ml-job.html ###Code from time import gmtime, strftime, sleep timestamp_suffix = strftime('%d-%H-%M-%S', gmtime()) print(autopilot_train_s3_uri) model_output_s3_uri = 's3://{}/{}/output'.format(bucket, prefix) print(model_output_s3_uri) print(input_data_config) auto_ml_job_name = 'automl-xgboost-bc' + timestamp_suffix print('AutoMLJobName: ' + auto_ml_job_name) max_candidates = 2 automl = sagemaker.automl.automl.AutoML( target_attribute_name='diagnosis', base_job_name=auto_ml_job_name, output_path=model_output_s3_uri, max_candidates=max_candidates, sagemaker_session=session, role=role, max_runtime_per_training_job_in_seconds=600, total_job_runtime_in_seconds=3000 ) print(max_candidates) automl.fit( ### BEGIN SOLUTION - DO NOT delete this comment for grading purposes inputs=autopilot_train_s3_uri, # Replace None ### END SOLUTION - DO NOT delete this comment for grading purposes job_name=auto_ml_job_name, wait=False, logs=False ) from IPython.core.display import display, HTML display(HTML('<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/sagemaker/bc/autopilot/">output folder has been created</a> in S3 bucket</b>'.format(bucket, prefix, {}))) ###Output _____no_output_____ ###Markdown Tracking SageMaker Autopilot job progressSageMaker Autopilot job consists of the following high-level steps : * Analyzing Data, where the dataset is analyzed and Autopilot comes up with a list of ML pipelines that should be tried out on the dataset. The dataset is also split into train and validation sets.* Feature Engineering, where Autopilot performs feature transformation on individual features of the dataset as well as at an aggregate level.* Model Tuning, where the top performing pipeline is selected along with the optimal hyperparameters for the training algorithm (the last stage of the pipeline). ###Code # This step takes about 33 minutes print ('JobStatus - Secondary Status') print('------------------------------') describe_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) print (describe_response['AutoMLJobStatus'] + " - " + describe_response['AutoMLJobSecondaryStatus']) job_run_status = describe_response['AutoMLJobStatus'] while job_run_status not in ('Failed', 'Completed', 'Stopped'): describe_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_run_status = describe_response['AutoMLJobStatus'] print (describe_response['AutoMLJobStatus'] + " - " + describe_response['AutoMLJobSecondaryStatus']) sleep(30) ###Output JobStatus - Secondary Status ------------------------------ Completed - Completed ###Markdown SageMaker processing jobsThe Autopilot creates required SageMaker processing jobs during the run:* First processing job (data splitter) checks the data sanity, performs stratified shuffling and splits the data into training and validation. * Second processing job (candidate generator) first streams through the data to compute statistics for the dataset. Then, uses these statistics to identify the problem type, and possible types of every column-predictor: numeric, categorical, natural language, etc. ###Code from IPython.core.display import display, HTML display(HTML('<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/">processing jobs</a></b>'.format(region))) ###Output _____no_output_____ ###Markdown Review the Output in S3Once data analysis is complete, SageMaker AutoPilot generates two notebooks: * Data exploration* Candidate definitionNotebooks are included in the AutoML job artifacts generated during the run. Before checking the existence of the notebooks, you can check if the artifacts have been generated.```data-processor-models/ "models" learned to transform raw data into features documentation/ explainability and other documentation about your modelpreprocessed-data/ data for train and validationsagemaker-automl-candidates/ candidate models which autopilot comparestransformed-data/ candidate-specific data for train and validationtuning/ candidate-specific tuning resultsvalidations/ validation results``` ###Code display( HTML( '<b>Review all <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}?region={}&prefix=sagemaker/bc/autopilot/">output in S3</a></b>'.format( bucket, region, auto_ml_job_name ) ) ) ###Output _____no_output_____ ###Markdown Model training and tuningWhen you launched the Autopilot job, you requested that 3 model candidates are generated and compared.Therefore, you should see three (3) SageMaker training jobs below. from IPython.core.display import display, HTMLdisplay(HTML('Review hyper-parameter tuning jobs'.format(region))) ResultsNow use the describe_auto_ml_job API to look up the best candidate selected by the SageMaker Autopilot job. ###Code best_candidate = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)["BestCandidate"] best_candidate_name = best_candidate["CandidateName"] print(best_candidate) print("\n") print("CandidateName: " + best_candidate_name) print( "FinalAutoMLJobObjectiveMetricName: " + best_candidate["FinalAutoMLJobObjectiveMetric"]["MetricName"] ) print( "FinalAutoMLJobObjectiveMetricValue: " + str(best_candidate["FinalAutoMLJobObjectiveMetric"]["Value"]) ) ###Output {'CandidateName': 'automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3', 'FinalAutoMLJobObjectiveMetric': {'MetricName': 'validation:f1_binary', 'Value': 0.9598100185394287}, 'ObjectiveStatus': 'Succeeded', 'CandidateSteps': [{'CandidateStepType': 'AWS::SageMaker::ProcessingJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:processing-job/automl-xgboost-bc13-06-53-18-db-1-2cf2c8486b1142f2ad1ecd482d9ce', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18-db-1-2cf2c8486b1142f2ad1ecd482d9ce'}, {'CandidateStepType': 'AWS::SageMaker::TrainingJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:training-job/automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5'}, {'CandidateStepType': 'AWS::SageMaker::TransformJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:transform-job/automl-xgboost-bc13-06-53-18-dpp0-csv-1-7fea98f662f140459e796e3', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18-dpp0-csv-1-7fea98f662f140459e796e3'}, {'CandidateStepType': 'AWS::SageMaker::TrainingJob', 'CandidateStepArn': 'arn:aws:sagemaker:ap-southeast-2:745084241526:training-job/automl-xgboost-bc13-06-53-18t1rm-002-2c8903b3', 'CandidateStepName': 'automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3'}], 'CandidateStatus': 'Completed', 'InferenceContainers': [{'Image': '783357654285.dkr.ecr.ap-southeast-2.amazonaws.com/sagemaker-sklearn-automl:2.2.1-1-cpu-py3', 'ModelDataUrl': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/data-processor-models/automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5/output/model.tar.gz', 'Environment': {'AUTOML_TRANSFORM_MODE': 'feature-transform', 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': 'application/x-recordio-protobuf', 'SAGEMAKER_PROGRAM': 'sagemaker_serve', 'SAGEMAKER_SUBMIT_DIRECTORY': '/opt/ml/model/code'}}, {'Image': '783357654285.dkr.ecr.ap-southeast-2.amazonaws.com/sagemaker-xgboost:1.2-2-cpu-py3', 'ModelDataUrl': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/tuning/automl-xgb-dpp0-xgb/automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3/output/model.tar.gz', 'Environment': {'MAX_CONTENT_LENGTH': '20971520', 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': 'text/csv', 'SAGEMAKER_INFERENCE_OUTPUT': 'predicted_label', 'SAGEMAKER_INFERENCE_SUPPORTED': 'predicted_label,probability,probabilities'}}, {'Image': '783357654285.dkr.ecr.ap-southeast-2.amazonaws.com/sagemaker-sklearn-automl:2.2.1-1-cpu-py3', 'ModelDataUrl': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/data-processor-models/automl-xgboost-bc13-06-53-18-dpp0-1-f507133ee41b4068a5fba1f24f5/output/model.tar.gz', 'Environment': {'AUTOML_TRANSFORM_MODE': 'inverse-label-transform', 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': 'text/csv', 'SAGEMAKER_INFERENCE_INPUT': 'predicted_label', 'SAGEMAKER_INFERENCE_OUTPUT': 'predicted_label', 'SAGEMAKER_INFERENCE_SUPPORTED': 'predicted_label,probability,labels,probabilities', 'SAGEMAKER_PROGRAM': 'sagemaker_serve', 'SAGEMAKER_SUBMIT_DIRECTORY': '/opt/ml/model/code'}}], 'CreationTime': datetime.datetime(2021, 10, 13, 7, 12, 28, tzinfo=tzlocal()), 'EndTime': datetime.datetime(2021, 10, 13, 7, 14, 6, tzinfo=tzlocal()), 'LastModifiedTime': datetime.datetime(2021, 10, 13, 7, 15, 4, 976000, tzinfo=tzlocal()), 'CandidateProperties': {'CandidateArtifactLocations': {'Explainability': 's3://sagemaker-ap-southeast-2-745084241526/sagemaker/bc/autopilot/output/automl-xgboost-bc13-06-53-18/documentation/explainability/output'}}} CandidateName: automl-xgboost-bc13-06-53-18T1rM-002-2c8903b3 FinalAutoMLJobObjectiveMetricName: validation:f1_binary FinalAutoMLJobObjectiveMetricValue: 0.9598100185394287 ###Markdown Deploy The ModelNow that you have successfully completed the SageMaker Autopilot job on the dataset, create a model from any of the candidates by using [Inference Pipelines](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html). Now we are going to deploy the best model ###Code timestamp_suffix = strftime("%d-%H-%M-%S", gmtime()) model_name = best_candidate_name + timestamp_suffix + "-model" #Create the model model_arn = sm.create_model( Containers=best_candidate["InferenceContainers"], ModelName=model_name, ExecutionRoleArn=role ) #Configure the model epc_name = best_candidate_name + timestamp_suffix + "-epc" ep_config = sm.create_endpoint_config( EndpointConfigName=epc_name, ProductionVariants=[ { "InstanceType": "ml.m5.2xlarge", "InitialInstanceCount": 1, "ModelName": model_name, "VariantName": "main", } ], ) #Deploy the model ep_name = best_candidate_name + timestamp_suffix + "-ep" create_endpoint_response = sm.create_endpoint(EndpointName=ep_name, EndpointConfigName=epc_name) sm.get_waiter("endpoint_in_service").wait(EndpointName=ep_name) ###Output _____no_output_____ ###Markdown EvaluateNow that we have a hosted endpoint running, we can make real-time predictions from our model by calling the predict method. But first, we'll need to setup serializers and deserializers for passing our test_data NumPy arrays to the model behind the endpoint. ###Code from sagemaker.predictor import Predictor from sagemaker.serializers import CSVSerializer from sagemaker.deserializers import CSVDeserializer predictor = Predictor( endpoint_name=ep_name, sagemaker_session=session, serializer=CSVSerializer(), deserializer=CSVDeserializer(), ) # Remove the target column from the test data and reset the index for the ground truth data test_data_inference = test_data.drop("diagnosis", axis=1) actual=test_data.iloc[:,0] actual=actual.reset_index(drop=True) # Obtain predictions from SageMaker endpoint prediction = predictor.predict(test_data_inference.to_csv(sep=",", header=False, index=False)) # Load prediction in pandas and compare to ground truth prediction_df = pd.DataFrame(prediction) # pd.crosstab(index=actual, columns=prediction_df[0], rownames=['actual'], colnames=['predictions']) ###Output _____no_output_____ ###Markdown Data Exploration NotebookSagemaker Autopilot also auto-generates a Data Exploration notebook, which can be downloaded from the following Amazon S3 location: ###Code sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['AutoMLJobArtifacts']['DataExplorationNotebookLocation'] ###Output _____no_output_____ ###Markdown Candidate Generation Notebook Sagemaker AutoPilot also auto-generates a Candidate Definitions notebook. This notebook can be used to interactively step through the various steps taken by the Sagemaker Autopilot to arrive at the best candidate. This notebook can also be used to override various runtime parameters like parallelism, hardware used, algorithms explored, feature extraction scripts and more. The notebook can be downloaded from the following Amazon S3 location: ###Code sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['AutoMLJobArtifacts']['CandidateDefinitionNotebookLocation'] ###Output _____no_output_____ ###Markdown CleanupThe Autopilot job creates many underlying artifacts such as dataset splits, preprocessing scripts, or preprocessed data, etc. This code, when un-commented, deletes them. This operation deletes all the generated models and the auto-generated notebooks as well. ###Code #s3 = boto3.resource('s3') #bucket = s3.Bucket(bucket) #job_outputs_prefix = '{}/output/{}'.format(prefix,auto_ml_job_name) #bucket.objects.filter(Prefix=job_outputs_prefix).delete() ###Output _____no_output_____
lectures/notebooks/Lecture 03 - Linear methods.ipynb
###Markdown IntroductionThis notebook demonstrates some basic data handling using the Pandas package and the application of linear methods to identify relationships in materials data. We will be creating a rudimentary prediction model for the bulk modulus of an element from various basic elemental properties. For the purposes of this exercise, we will assume that a linear relationship does indeed exist. ###Code import pandas as pd import seaborn as sns import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt params = {'legend.fontsize': 20, 'figure.figsize': (12, 8), 'axes.labelsize': 20, 'axes.titlesize': 24, 'xtick.labelsize':16, 'ytick.labelsize': 16} mpl.rcParams.update(params) %matplotlib inline ###Output _____no_output_____ ###Markdown Load in the elemental dataset using pandas' read_csv method. This data was obtained from the Materials Project for the ground state structure of each element only. The columns are:- K: Bulk modulus in GPa- MP: Melting point in K- BP: Boiling point in K- Z: Atomic number- X: Pauling electronegativity- r: Atomic radius in angstroms ###Code data = pd.read_csv("element_data.csv", index_col=0) print(data) ###Output K MP BP Z X r Element Ac 29.0 1323.00 3573.0 89 1.10 1.95 Ag 88.0 1234.93 2435.0 47 1.93 1.60 Al 83.0 933.47 2792.0 13 1.61 1.25 As 40.0 1090.00 887.0 33 2.18 1.15 Au 137.0 1337.33 3129.0 79 2.54 1.35 ... ... ... ... .. ... ... W 304.0 3695.00 5828.0 74 2.36 1.35 Y 41.0 1799.00 3609.0 39 1.22 1.80 Yb 15.0 1097.00 1469.0 70 1.10 1.75 Zn 67.0 692.68 1180.0 30 1.65 1.35 Zr 94.0 2128.00 4682.0 40 1.33 1.55 [83 rows x 6 columns] ###Markdown Since a strict linear relationship may not exist between K and the other variables, we will create a few additional features based on simple transformations of some of the inputs, namely the electronegativity and the atomic radius. ###Code data["X^2"] = data["X"] ** 2 data["sqrt(X)"] = data["X"] ** 0.5 data["r^2"] = data["r"] ** 2 data["sqrt(r)"] = data["r"] ** 0.5 print(data) ###Output K MP BP Z X r X^2 sqrt(X) r^2 \ Element Ac 29.0 1323.00 3573.0 89 1.10 1.95 1.2100 1.048809 3.8025 Ag 88.0 1234.93 2435.0 47 1.93 1.60 3.7249 1.389244 2.5600 Al 83.0 933.47 2792.0 13 1.61 1.25 2.5921 1.268858 1.5625 As 40.0 1090.00 887.0 33 2.18 1.15 4.7524 1.476482 1.3225 Au 137.0 1337.33 3129.0 79 2.54 1.35 6.4516 1.593738 1.8225 ... ... ... ... .. ... ... ... ... ... W 304.0 3695.00 5828.0 74 2.36 1.35 5.5696 1.536229 1.8225 Y 41.0 1799.00 3609.0 39 1.22 1.80 1.4884 1.104536 3.2400 Yb 15.0 1097.00 1469.0 70 1.10 1.75 1.2100 1.048809 3.0625 Zn 67.0 692.68 1180.0 30 1.65 1.35 2.7225 1.284523 1.8225 Zr 94.0 2128.00 4682.0 40 1.33 1.55 1.7689 1.153256 2.4025 sqrt(r) Element Ac 1.396424 Ag 1.264911 Al 1.118034 As 1.072381 Au 1.161895 ... ... W 1.161895 Y 1.341641 Yb 1.322876 Zn 1.161895 Zr 1.244990 [83 rows x 10 columns] ###Markdown For ease of interpretation, let's define our X and y. ###Code features = [c for c in data.columns if c != "K"] x = data[features] y = data["K"] ###Output _____no_output_____ ###Markdown We will now perform a standard multiple linear regression using scikit-learn. ###Code from sklearn import linear_model from sklearn.metrics import mean_squared_error reg = linear_model.LinearRegression() reg.fit(x, y) r2 = reg.score(x, y) equation = ["%.2e %s" % (v, f) for v, f in zip(reg.coef_, features)] print("K = %.1f + %s" % (reg.intercept_, " + ".join(equation))) f, ax = plt.subplots(figsize=(12, 8)) yhat = reg.predict(data[features]) sns.scatterplot(x=y, y=yhat) plt.ylabel(r"$K_{predicted}$ (GPa)") plt.xlabel(r"$K$ (GPa)") plt.annotate(r"$R^2$ = %.3f, MSE = %.1f" % (r2, mean_squared_error(y, yhat)), (200, 0), fontsize=18); ###Output K = -243.8 + 4.25e-02 MP + 2.76e-02 BP + -2.48e-01 Z + 3.92e+02 X + -3.55e+02 r + -4.96e+01 X^2 + -3.85e+02 sqrt(X) + 5.40e+01 r^2 + 5.00e+02 sqrt(r) ###Markdown Now, it may seem that this model performs very well. But in actuality, we have used the entire dataset to perform the regression. A proper fit should be conducted using cross-validation. Here, we will use a five-fold cross-validation to assess the performance of this highly overspecified model. ###Code from sklearn.model_selection import cross_val_predict, KFold from sklearn.metrics import r2_score kfold = KFold(n_splits=5, shuffle=True, random_state=42) mlr = linear_model.LinearRegression() yhat_mlr = cross_val_predict(mlr, x, y, cv=kfold) r2_mlr = r2_score(y, yhat_mlr) mse_mlr = mean_squared_error(y, yhat_mlr) label_mlr = "MLR: $R^2$ = %.3f, MSE = %.1f" % (r2_mlr, mse_mlr) f, ax = plt.subplots(figsize=(8, 8)) plt.plot(y, yhat_mlr, 'o', label=label_mlr) plt.ylabel(r"$K_{predicted}$ (GPa)") plt.xlabel(r"$K$ (GPa)") plt.legend() plt.xlim([0, 410]) plt.ylim([0, 410]) plt.plot([0, 410], [0, 410], 'k--'); ###Output _____no_output_____ ###Markdown Correlations between featuresHere, we will look at correlations between features. First, we do a pair plot between features. ###Code grid = sns.pairplot(data[features]) ###Output _____no_output_____ ###Markdown From the plot, it is clear that MP and BP are correlated with each other. And X is inversely related to r in some way. Obviously, X and $X^2$ are correlated. Another way to plot this is using a correlation plot. ###Code f, ax = plt.subplots(figsize=(8, 6)) sns.heatmap(x.corr(), cmap="coolwarm", vmin=-1, vmax=1, ax=ax); ###Output _____no_output_____ ###Markdown Subset selection Sometimes, the input variables may not be directly related to the interested target. Hence, a feature selection step is necessary. There are many different methods for selecting features. Here we will go over a simple implementation in scikit-learn, ###Code from sklearn.feature_selection import SelectKBest, f_regression def identify_columns(x_new, nrows=10): columns = x.columns xvalues = x.values dist = np.linalg.norm(xvalues[:nrows, :, None] - x_new[:nrows, None, :], axis=0) return columns[np.argmin(dist, axis=0)].values sel = SelectKBest(f_regression, k=3) x_new = sel.fit_transform(x, y) print(f"Selected features {identify_columns(x_new)}") s = ', '.join(['%s: %.3e' % (i, j) for i, j in zip(x.columns, sel.pvalues_)]) print("The p values for the variables are " + s) ###Output The p values for the variables are MP: 5.017e-17, BP: 3.285e-14, Z: 1.382e-01, X: 1.760e-01, r: 9.118e-02, X^2: 6.985e-01, sqrt(X): 6.706e-02, r^2: 1.346e-02, sqrt(r): 2.524e-01 ###Markdown Apparently, the most significant variables are MP and BP, followed by r^2 (p values < 0.05). Let's redo the regression using only these variables. ###Code mlr_best = linear_model.LinearRegression() yhat_mlr_best = cross_val_predict(mlr_best, x_new, y, cv=kfold) r2_mlr_best = r2_score(y, yhat_mlr_best) mse_mlr_best = mean_squared_error(y, yhat_mlr_best) label_mlr_best = "MLR: $R^2$ = %.3f, MSE = %.1f" % (r2_mlr_best, mse_mlr_best) f, ax = plt.subplots(figsize=(8, 8)) plt.plot(y, yhat_mlr, 'o', label=label_mlr) plt.plot(y, yhat_mlr_best, 'o', label=label_mlr_best) l = plt.ylabel(r"$K_{predicted}$ (GPa)") l = plt.xlabel(r"$K$ (GPa)") plt.legend() plt.xlim([0, 410]) plt.ylim([0, 410]) plt.plot([0, 410], [0, 410], 'k--'); ###Output _____no_output_____ ###Markdown We can see that the best subset model has substantially reduced MSE and improved R2. ShrinkageHere, we will use shrinkage methods to shrink the feature coefficients. It is a best practice to first center the inputs and scale to unit variance prior to performing shrinkage. We will use scikit-learn's StandardScaler which performs this scaling. ###Code from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(x) means_ = scaler.mean_ stds_ = scaler.scale_ z = scaler.transform(x) ###Output _____no_output_____ ###Markdown Ridge regressionUnlike the simple MLR example, we will do our ridge regression properly using k-fold cross-validation to identify the best shrinkage factor (denoted as the argument alpha in scikit-learn's implementation). We will use the MSE as the criterion in which to determine the best alpha. ###Code from sklearn.model_selection import cross_validate, KFold cv_results = [] coeffs = [] alphas = np.logspace(-2, 2, 71) kfold = KFold(n_splits=10, shuffle=True, random_state=42) for alpha in alphas: ridge = linear_model.Ridge(alpha=alpha, max_iter=10000) ridge.fit(z, y) scores = cross_validate(ridge, z, y, cv=kfold, scoring="neg_mean_squared_error") cv_results.append([alpha, -np.mean(scores["test_score"])] + list(ridge.coef_)) cv_results = pd.DataFrame(cv_results, columns=["alpha", "score"] + features) f, ax = plt.subplots(figsize=(12, 8)) plt.plot(cv_results["alpha"], cv_results["score"], '-x') plt.xlim([1e-2, 10**1.8]) plt.ylim((4000, 4800)) plt.xscale(r'log') plt.xlabel(r'$\alpha$') plt.ylabel(r'MSE') plt.title(r'Ridge regression') best_alpha = cv_results["alpha"][cv_results["score"].idxmin()] plt.annotate(r"Best $\alpha$ = %.3f" % best_alpha, (best_alpha, cv_results["score"].min()), fontsize=16); ###Output _____no_output_____ ###Markdown Here, we will take a look at the effect of alpha on the coefficients. Note that these are for the scaled coefficients, i.e., the coefficients that map the scaled inputs to the output, and not the unscaled inputs. ###Code f, ax = plt.subplots(figsize=(12, 8)) for f in features: plt.plot(cv_results["alpha"], cv_results[f], '-x', label=f) plt.xscale('log') plt.xlabel(r'$\alpha$') plt.ylabel('Coefficient') plt.title(r'Scaled coefficients change with $\alpha$') plt.legend() plt.xlim([1e-2, 10**1.8]) plt.ylim([-100, 100]); ###Output _____no_output_____ ###Markdown Using the best alpha, we will now regenerate the final relationship. Note that we have to rescale the intercepts and coefficients back to the unnormalized inputs (multiply all coefficients by the respective standard deviations of the inputs, and shift all means). ###Code reg = linear_model.Ridge(alpha=best_alpha, max_iter=10000) reg.fit(z, y) real_coef = reg.coef_ / stds_ # convert back to unnormalized inputs real_interp = reg.intercept_ - means_.dot(real_coef) # convert back to unnormalized inputs equation = ["%.2e %s" % (v, f) for v, f in zip(real_coef, features)] print("K = %.1f + %s" % (real_interp, " + ".join(equation))) ###Output K = -196.3 + 4.44e-02 MP + 2.43e-02 BP + -4.78e-02 Z + 1.98e+01 X + -2.00e+00 r + -4.27e+00 X^2 + 9.56e+01 sqrt(X) + -6.84e+00 r^2 + 3.33e+01 sqrt(r) ###Markdown Here, we will redo our MLR with cross validation and compare with the ridge regression. ###Code from sklearn.model_selection import cross_val_predict from sklearn.metrics import r2_score ridge = linear_model.Ridge(alpha=best_alpha, max_iter=10000) yhat_ridge = cross_val_predict(ridge, z, y, cv=kfold) r2_ridge = r2_score(y, yhat_ridge) mse_ridge = mean_squared_error(y, yhat_ridge) label_ridge = "Ridge: $R^2$ = %.3f, MSE = %.1f" % (r2_ridge, mse_ridge) f, ax = plt.subplots(figsize=(8, 8)) plt.plot(y, yhat_mlr, 'o', label=label_mlr) plt.plot(y, yhat_ridge, 'o', label=label_ridge) l = plt.ylabel("$K_{predicted}$ (GPa)") l = plt.xlabel("$K$ (GPa)") plt.legend() plt.xlim([0, 410]) plt.ylim([0, 410]) plt.plot([0, 410], [0, 410], 'k--'); ###Output _____no_output_____ ###Markdown LASSOHere, we will perform a LASSO regression using the same process as the ridge regression. ###Code alphas = np.logspace(-2, 1.2, 20) cv_results = [] coeffs = [] for alpha in alphas: lasso = linear_model.Lasso(alpha=alpha, max_iter=100000) lasso.fit(z, y) scores = cross_validate(lasso, z, y, cv=kfold, scoring='neg_mean_squared_error') cv_results.append([alpha, -np.mean(scores["test_score"])] + list(lasso.coef_)) cv_results = pd.DataFrame(cv_results, columns=["alpha", "score"] + features) f, ax = plt.subplots(figsize=(12, 8)) plt.plot(cv_results["alpha"], cv_results["score"], '-x') plt.xlim([1e-2, 10**1.2]) plt.ylim((3500, 7000)) plt.xscale('log') plt.xlabel(r'$\alpha$') plt.ylabel('MSE') plt.title('LASSO') best_alpha = cv_results["alpha"][cv_results["score"].idxmin()] plt.annotate(r"Best $\alpha$ = %.3f" % best_alpha, (best_alpha, cv_results["score"].min()), fontsize=16); ###Output _____no_output_____ ###Markdown At a certain shrinkage factor, several of the coefficients have been shrunk to zero. ###Code f, ax = plt.subplots(figsize=(12, 8)) for f in features: plt.plot(cv_results["alpha"], cv_results[f], '-x', label=f) plt.xscale('log') plt.xlabel(r'$\alpha$') plt.ylabel('Coefficient') plt.title(r'Scaled coefficients change with $\alpha$') plt.legend() plt.xlim([1e-2, 10**1.2]); ###Output _____no_output_____ ###Markdown We will now retrieve the final equation, ignoring the coefficients that are zero. We note that the atomic number Z no longer appears in the equation. This is somewhat in line with intuition since we do not expect atomic number to have a significant relationship with the bulk modulus. As you may recall, the electronegativity and atomic radius are inversely correlated with each other. So we would expect only one of these parameters to be needed to describe the bulk modulus. Furthermore, it seems that the bulk modulus should be related to sqrt(X) and not X. ###Code reg = linear_model.Lasso(alpha=best_alpha, max_iter=10000) reg.fit(z, y) real_coef = reg.coef_ / stds_ # convert back to unnormalized inputs real_interp = reg.intercept_ - means_.dot(real_coef) # convert back to unnormalized inputs equation = ["%.2e %s" % (v, f) for v, f in zip(real_coef, features) if abs(v) > 1e-4] print("K = %.1f + %s" % (real_interp, " + ".join(equation))) lasso = linear_model.Lasso(alpha=best_alpha, max_iter=10000) yhat_lasso = cross_val_predict(lasso, z, y, cv=kfold) r2_lasso = r2_score(y, yhat_lasso) mse_lasso = mean_squared_error(y, yhat_lasso) label_lasso = "Lasso: $R^2$ = %.3f, MSE = %.1f" % (r2_lasso, mse_lasso) f, ax = plt.subplots(figsize=(8, 8)) plt.plot(y, yhat_mlr, 'o', label=label_mlr) plt.plot(y, yhat_lasso, 'o', label=label_lasso) l = plt.ylabel("$K_{predicted}$ (GPa)") l = plt.xlabel("$K$ (GPa)") plt.legend() plt.xlim([0, 410]) plt.ylim([0, 410]) plt.plot([0, 410], [0, 410], 'k--'); ###Output _____no_output_____ ###Markdown Partial Least SquaresHere, we will do a 2-component PLS regression. ###Code from sklearn.cross_decomposition import PLSRegression cv_results = [] coeffs = [] pls = PLSRegression(n_components=2) pls.fit(x, y) yhat_pls = cross_val_predict(pls, x, y, cv=kfold) r2_pls = r2_score(y, yhat_pls) mse_pls = mean_squared_error(y, yhat_pls) label_pls = "PLS3: $R^2$ = %.3f, MSE = %.1f" % (r2_pls, mse_pls) f, ax = plt.subplots(figsize=(8, 8)) plt.plot(y, yhat_mlr, 'o', label=label_mlr) plt.plot(y, yhat_pls, 'o', label=label_pls) l = plt.ylabel("$K_{predicted}$ (GPa)") l = plt.xlabel("$K$ (GPa)") plt.legend() plt.xlim([0, 410]) plt.ylim([0, 410]) plt.plot([0, 410], [0, 410], 'k--'); ###Output _____no_output_____
week1/Basic Python Operations for Working with Text (Completed Class Copy).ipynb
###Markdown Table of Contents0.0.1&nbsp;&nbsp;Installing Required Libraries0.0.1.1&nbsp;&nbsp;Getting Familiar With Jupyter Notebooks1&nbsp;&nbsp;Week 1: Basic Python Operations for Working with Text2&nbsp;&nbsp;The Scale of Data in the 21st Century2.1&nbsp;&nbsp;Overview2.1.0.1&nbsp;&nbsp;Text Analytics2.1.0.2&nbsp;&nbsp;Data Engineering2.1.0.3&nbsp;&nbsp;Statistics / Machine Learning2.2&nbsp;&nbsp;Loading Text into Memory2.2.0.1&nbsp;&nbsp;Opening Files2.2.1&nbsp;&nbsp;An Aside: List Comprehension2.2.2&nbsp;&nbsp;Visualizing Summary Metrics Using Matplotlib2.2.3&nbsp;&nbsp;First Method: Create a Dictionary to Store Word Count2.2.4&nbsp;&nbsp;Using Python's Built-In Counter2.2.5&nbsp;&nbsp;In-Class Question2.3&nbsp;&nbsp;Zipf's Law2.3.1&nbsp;&nbsp;General Definition2.3.2&nbsp;&nbsp;Approximation in NLP3&nbsp;&nbsp;Regular Expressions3.0.1&nbsp;&nbsp;Match the first time a capital letter appears in the tweet3.0.2&nbsp;&nbsp;Match all capital letters that appears in the tweet3.0.3&nbsp;&nbsp;Match all words that are at least 3 characters long3.0.4&nbsp;&nbsp;Word Boundaries3.0.5&nbsp;&nbsp;Removing Stopwords Using Regex3.0.5.1&nbsp;&nbsp;Exercises4&nbsp;&nbsp;Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1&nbsp;&nbsp;Next Week (March 24th)4.1.1&nbsp;&nbsp;Check for Understanding Installing Required Libraries ###Code !pip3 install matplotlib !pip3 install pandas ###Output _____no_output_____ ###Markdown Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append). ###Code # Open Tale of Two Cities text_file = open("tale-of-two-cities.txt", "r") print(text_file) ###Output <_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='UTF-8'> ###Markdown Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger: ###Code import sys EMPTY_STRING = "" ONE_CHAR_STRING = "a" TWO_CHAR_STRING = "ab" print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.") print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.") print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.") ###Output The size of EMPTY_STRING is 49 bytes. The size of ONE_CHAR_STRING is 50 bytes. The size of TWO_CHAR_STRING is 51 bytes. ###Markdown The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol. ###Code text_file.readline() ###Output _____no_output_____ ###Markdown You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line ###Code text_file.seek(0) #reset the stream position to the start of the text file for i in range(2): # repeat the below line twice print(f"Iteration {i + 1}: {text_file.readline(5)}") ###Output Iteration 1: IT Iteration 2: WAS t ###Markdown Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**: ###Code text_file.seek(0) # reset the stream position to the start of the file lines = text_file.readlines() # read all the lines and return a list of strings ###Output _____no_output_____ ###Markdown We see that there are **12870** lines of text in the novel. ###Code print(f"There are {len(lines)} lines in the novel.") total_num_chars = 0 for line in lines: # iterate through each line total_num_chars += len(line) # add the number of characters in a line to the total count of characters avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average print(f"On average, each line has {avg_chars} characters.") import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt ###Output _____no_output_____ ###Markdown An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib ###Code NUM_BINS = 30 # increase this number to make the visualization more granular plt.rcParams["figure.figsize"] = (15,6) plt.hist([len(line) for line in lines], bins=NUM_BINS) plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title plt.xlabel("Number of Characters in Line") # label the X axis plt.ylabel("Count of Lines") # label the Y axis plt.show() ###Output _____no_output_____ ###Markdown What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object. ###Code words = [] # create a list of all words word_count = {} # create a dictionary to store word counts for line in lines: # for each line in the novel for word in line.split(" "): # for each word in the line words.append(word) # add the word to the list of words if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1 word_count[word] = 1 else: word_count[word] += 1 # if the word has been seen before, increment its count by 1 print(f"There's an estimated {len(words)} words in the novel.") print(f"There's {len(word_count.keys())} unique words in the novel.") ###Output _____no_output_____ ###Markdown Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file. ###Code assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true." ###Output _____no_output_____ ###Markdown Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use: ###Code from collections import Counter def count_words(lines, delimiter=" "): words = Counter() # instantiate a Counter object called words for line in lines: for word in line.split(delimiter): words[word] += 1 # increment count for word return words ###Output _____no_output_____ ###Markdown A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function. ###Code def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6): plt.rcParams["figure.figsize"] = (x_size,y_size) plt.hist(values, bins=bins) if title: plt.title(title) # give the plot a title if xlabel: plt.xlabel(xlabel) # label the X axis if ylabel: plt.ylabel(ylabel) # label the Y axis plt.show() make_histogram(word_count.values(), title="Distribution of Word Count", xlabel="Number of Times Word Appears", ylabel="Number of Unique Words") ###Output _____no_output_____ ###Markdown In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful? ###Code import pandas as pd # output the results to a dataframe word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency word_count_df["word"] = list(word_count.keys()) word_count_df["frequency"] = list(word_count.values()) word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error ###Output _____no_output_____ ###Markdown Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions ###Code # get the top stopwords word_count_df.sort_values(by=["frequency"], ascending=False).head(5) import re SAMPLE_TWEET = ''' #wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from #google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts" ''' # create a dataframe version of Dickens' novel dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r"), columns=["line"]) dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "") ###Output _____no_output_____ ###Markdown Match the first time a capital letter appears in the tweet ###Code match = re.search("[A-Z]", SAMPLE_TWEET) match.group() ###Output _____no_output_____ ###Markdown Match all capital letters that appears in the tweet ###Code # re re.findall("[A-Z]", SAMPLE_TWEET) # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])') ###Output _____no_output_____ ###Markdown Match all words that are at least 3 characters long ###Code # re re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5 # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})') dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})') dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text? ###Code text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." # re text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." text = re.sub(r'(a|A)', '', text) text text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix! # pandas dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', case=False) dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Removing Stopwords Using Regex ###Code # re text = re.sub('(the|The)', '', text, flags=re.IGNORECASE) text # pandas dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False) dickens_text_df.head() ###Output _____no_output_____ ###Markdown Table of Contents0.0.1&nbsp;&nbsp;Installing Required Libraries0.0.1.1&nbsp;&nbsp;Getting Familiar With Jupyter Notebooks1&nbsp;&nbsp;Week 1: Basic Python Operations for Working with Text2&nbsp;&nbsp;The Scale of Data in the 21st Century2.1&nbsp;&nbsp;Overview2.1.0.1&nbsp;&nbsp;Text Analytics2.1.0.2&nbsp;&nbsp;Data Engineering2.1.0.3&nbsp;&nbsp;Statistics / Machine Learning2.2&nbsp;&nbsp;Loading Text into Memory2.2.0.1&nbsp;&nbsp;Opening Files2.2.1&nbsp;&nbsp;An Aside: List Comprehension2.2.2&nbsp;&nbsp;Visualizing Summary Metrics Using Matplotlib2.2.3&nbsp;&nbsp;First Method: Create a Dictionary to Store Word Count2.2.4&nbsp;&nbsp;Using Python's Built-In Counter2.2.5&nbsp;&nbsp;In-Class Question2.3&nbsp;&nbsp;Zipf's Law2.3.1&nbsp;&nbsp;General Definition2.3.2&nbsp;&nbsp;Approximation in NLP3&nbsp;&nbsp;Regular Expressions3.0.1&nbsp;&nbsp;Match the first time a capital letter appears in the tweet3.0.2&nbsp;&nbsp;Match all capital letters that appears in the tweet3.0.3&nbsp;&nbsp;Match all words that are at least 3 characters long3.0.4&nbsp;&nbsp;Word Boundaries3.0.5&nbsp;&nbsp;Removing Stopwords Using Regex3.0.5.1&nbsp;&nbsp;Exercises4&nbsp;&nbsp;Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1&nbsp;&nbsp;Next Week (March 24th)4.1.1&nbsp;&nbsp;Check for Understanding Installing Required Libraries ###Code !pip3 install matplotlib !pip3 install pandas ###Output Requirement already satisfied: matplotlib in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages Requirement already satisfied: numpy>=1.7.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib) Requirement already satisfied: kiwisolver>=1.0.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib) Requirement already satisfied: python-dateutil>=2.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib) Requirement already satisfied: six>=1.10 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib) Requirement already satisfied: pytz in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib) Requirement already satisfied: cycler>=0.10 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from matplotlib) Requirement already satisfied: setuptools in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from kiwisolver>=1.0.1->matplotlib) You are using pip version 9.0.3, however version 20.0.2 is available. You should consider upgrading via the 'pip install --upgrade pip' command. Requirement already satisfied: pandas in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages Requirement already satisfied: pytz>=2017.2 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from pandas) Requirement already satisfied: numpy>=1.13.3 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from pandas) Requirement already satisfied: python-dateutil>=2.6.1 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from pandas) Requirement already satisfied: six>=1.5 in /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages (from python-dateutil>=2.6.1->pandas) You are using pip version 9.0.3, however version 20.0.2 is available. You should consider upgrading via the 'pip install --upgrade pip' command. ###Markdown Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append). ###Code # Open Tale of Two Cities text_file = open("tale-of-two-cities.txt", "r") print(text_file) ###Output <_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='UTF-8'> ###Markdown Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger: ###Code import sys EMPTY_STRING = "" ONE_CHAR_STRING = "a" TWO_CHAR_STRING = "ab" print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.") print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.") print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.") ###Output The size of EMPTY_STRING is 49 bytes. The size of ONE_CHAR_STRING is 50 bytes. The size of TWO_CHAR_STRING is 51 bytes. ###Markdown The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol. ###Code text_file.readline() ###Output _____no_output_____ ###Markdown You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line ###Code text_file.seek(0) #reset the stream position to the start of the text file for i in range(2): # repeat the below line twice print(f"Iteration {i + 1}: {text_file.readline(5)}") ###Output Iteration 1: IT Iteration 2: WAS t ###Markdown Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**: ###Code text_file.seek(0) # reset the stream position to the start of the file lines = text_file.readlines() # read all the lines and return a list of strings ###Output _____no_output_____ ###Markdown We see that there are **12870** lines of text in the novel. ###Code print(f"There are {len(lines)} lines in the novel.") total_num_chars = 0 for line in lines: # iterate through each line total_num_chars += len(line) # add the number of characters in a line to the total count of characters avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average print(f"On average, each line has {avg_chars} characters.") import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt ###Output _____no_output_____ ###Markdown An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib ###Code NUM_BINS = 30 # increase this number to make the visualization more granular plt.rcParams["figure.figsize"] = (15,6) plt.hist([len(line) for line in lines], bins=NUM_BINS) plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title plt.xlabel("Number of Characters in Line") # label the X axis plt.ylabel("Count of Lines") # label the Y axis plt.show() ###Output _____no_output_____ ###Markdown What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object. ###Code words = [] # create a list of all words word_count = {} # create a dictionary to store word counts for line in lines: # for each line in the novel for word in line.split(" "): # for each word in the line words.append(word) # add the word to the list of words if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1 word_count[word] = 1 else: word_count[word] += 1 # if the word has been seen before, increment its count by 1 print(f"There's an estimated {len(words)} words in the novel.") print(f"There's {len(word_count.keys())} unique words in the novel.") ###Output There's an estimated 143345 words in the novel. There's 21683 unique words in the novel. ###Markdown Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file. ###Code assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true." ###Output _____no_output_____ ###Markdown Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use: ###Code from collections import Counter def count_words(lines, delimiter=" "): words = Counter() # instantiate a Counter object called words for line in lines: for word in line.split(delimiter): words[word] += 1 # increment count for word return words ###Output _____no_output_____ ###Markdown A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function. ###Code def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6): plt.rcParams["figure.figsize"] = (x_size,y_size) plt.hist(values, bins=bins) if title: plt.title(title) # give the plot a title if xlabel: plt.xlabel(xlabel) # label the X axis if ylabel: plt.ylabel(ylabel) # label the Y axis plt.show() make_histogram(word_count.values(), title="Distribution of Word Count", xlabel="Number of Times Word Appears", ylabel="Number of Unique Words") ###Output _____no_output_____ ###Markdown In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful? ###Code import pandas as pd # output the results to a dataframe word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency word_count_df["word"] = list(word_count.keys()) word_count_df["frequency"] = list(word_count.values()) word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error ###Output _____no_output_____ ###Markdown Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions ###Code # get the top stopwords word_count_df.sort_values(by=["frequency"], ascending=False).head(5) import re SAMPLE_TWEET = ''' #wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from #google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts" ''' # create a dataframe version of Dickens' novel dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r"), columns=["line"]) dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "") ###Output _____no_output_____ ###Markdown Match the first time a capital letter appears in the tweet ###Code match = re.search("[A-Z]", SAMPLE_TWEET) match.group() ###Output _____no_output_____ ###Markdown Match all capital letters that appears in the tweet ###Code # re re.findall("[A-Z]", SAMPLE_TWEET) # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])') ###Output _____no_output_____ ###Markdown Match all words that are at least 3 characters long ###Code # re re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5 # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})') dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})') dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text? ###Code text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." # re text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." text = re.sub(r'(a|A)', '', text) text text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix! # pandas dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', case=False) dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Removing Stopwords Using Regex ###Code # re text = re.sub('(the|The)', '', text, flags=re.IGNORECASE) text # pandas dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False) dickens_text_df.head() ###Output _____no_output_____ ###Markdown Table of Contents0.0.1&nbsp;&nbsp;Installing Required Libraries0.0.1.1&nbsp;&nbsp;Getting Familiar With Jupyter Notebooks1&nbsp;&nbsp;Week 1: Basic Python Operations for Working with Text2&nbsp;&nbsp;The Scale of Data in the 21st Century2.1&nbsp;&nbsp;Overview2.1.0.1&nbsp;&nbsp;Text Analytics2.1.0.2&nbsp;&nbsp;Data Engineering2.1.0.3&nbsp;&nbsp;Statistics / Machine Learning2.2&nbsp;&nbsp;Loading Text into Memory2.2.0.1&nbsp;&nbsp;Opening Files2.2.1&nbsp;&nbsp;An Aside: List Comprehension2.2.2&nbsp;&nbsp;Visualizing Summary Metrics Using Matplotlib2.2.3&nbsp;&nbsp;First Method: Create a Dictionary to Store Word Count2.2.4&nbsp;&nbsp;Using Python's Built-In Counter2.2.5&nbsp;&nbsp;In-Class Question2.3&nbsp;&nbsp;Zipf's Law2.3.1&nbsp;&nbsp;General Definition2.3.2&nbsp;&nbsp;Approximation in NLP3&nbsp;&nbsp;Regular Expressions3.0.1&nbsp;&nbsp;Match the first time a capital letter appears in the tweet3.0.2&nbsp;&nbsp;Match all capital letters that appears in the tweet3.0.3&nbsp;&nbsp;Match all words that are at least 3 characters long3.0.4&nbsp;&nbsp;Word Boundaries3.0.5&nbsp;&nbsp;Removing Stopwords Using Regex3.0.5.1&nbsp;&nbsp;Exercises4&nbsp;&nbsp;Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1&nbsp;&nbsp;Next Week (March 24th)4.1.1&nbsp;&nbsp;Check for Understanding Installing Required Libraries ###Code !pip3 install matplotlib !pip3 install pandas ###Output _____no_output_____ ###Markdown Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append). ###Code # Open Tale of Two Cities text_file = open("tale-of-two-cities.txt", "r", encoding='utf8') print(text_file) ###Output <_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='utf8'> ###Markdown Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger: ###Code import sys EMPTY_STRING = "" ONE_CHAR_STRING = "a" TWO_CHAR_STRING = "ab" print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.") print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.") print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.") ###Output The size of EMPTY_STRING is 49 bytes. The size of ONE_CHAR_STRING is 50 bytes. The size of TWO_CHAR_STRING is 51 bytes. ###Markdown The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol. ###Code text_file.readline() ###Output _____no_output_____ ###Markdown You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line ###Code text_file.seek(0) #reset the stream position to the start of the text file for i in range(2): # repeat the below line twice print(f"Iteration {i + 1}: {text_file.readline(5)}") ###Output Iteration 1: IT Iteration 2: WAS t ###Markdown Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**: ###Code text_file.seek(0) # reset the stream position to the start of the file lines = text_file.readlines() # read all the lines and return a list of strings ###Output _____no_output_____ ###Markdown We see that there are **12870** lines of text in the novel. ###Code print(f"There are {len(lines)} lines in the novel.") total_num_chars = 0 for line in lines: # iterate through each line total_num_chars += len(line) # add the number of characters in a line to the total count of characters avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average print(f"On average, each line has {avg_chars} characters.") import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt ###Output _____no_output_____ ###Markdown An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib ###Code NUM_BINS = 30 # increase this number to make the visualization more granular plt.rcParams["figure.figsize"] = (15,6) plt.hist([len(line) for line in lines], bins=NUM_BINS) plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title plt.xlabel("Number of Characters in Line") # label the X axis plt.ylabel("Count of Lines") # label the Y axis plt.show() ###Output _____no_output_____ ###Markdown What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object. ###Code words = [] # create a list of all words word_count = {} # create a dictionary to store word counts for line in lines: # for each line in the novel for word in line.split(" "): # for each word in the line words.append(word) # add the word to the list of words if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1 word_count[word] = 1 else: word_count[word] += 1 # if the word has been seen before, increment its count by 1 print(f"There's an estimated {len(words)} words in the novel.") print(f"There's {len(word_count.keys())} unique words in the novel.") ###Output There's an estimated 143345 words in the novel. There's 21683 unique words in the novel. ###Markdown Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file. ###Code assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true." ###Output _____no_output_____ ###Markdown Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use: ###Code from collections import Counter def count_words(lines, delimiter=" "): words = Counter() # instantiate a Counter object called words for line in lines: for word in line.split(delimiter): words[word] += 1 # increment count for word return words ###Output _____no_output_____ ###Markdown A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function. ###Code def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6): plt.rcParams["figure.figsize"] = (x_size,y_size) plt.hist(values, bins=bins) if title: plt.title(title) # give the plot a title if xlabel: plt.xlabel(xlabel) # label the X axis if ylabel: plt.ylabel(ylabel) # label the Y axis plt.show() make_histogram(word_count.values(), title="Distribution of Word Count", xlabel="Number of Times Word Appears", ylabel="Number of Unique Words") ###Output _____no_output_____ ###Markdown In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful? ###Code import pandas as pd # output the results to a dataframe word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency word_count_df["word"] = list(word_count.keys()) word_count_df["frequency"] = list(word_count.values()) word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error ###Output _____no_output_____ ###Markdown Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions ###Code # get the top stopwords word_count_df.sort_values(by=["frequency"], ascending=False).head(5) import re SAMPLE_TWEET = ''' #wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from #google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts" ''' # create a dataframe version of Dickens' novel dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r", encoding='utf8').readlines(), columns=["line"]) dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "") ###Output _____no_output_____ ###Markdown Match the first time a capital letter appears in the tweet ###Code match = re.search("[A-Z]", SAMPLE_TWEET) match.group() ###Output _____no_output_____ ###Markdown Match all capital letters that appears in the tweet ###Code # re re.findall("[A-Z]", SAMPLE_TWEET) # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])') ###Output _____no_output_____ ###Markdown Match all words that are at least 3 characters long ###Code # re re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5 # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})') dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})') dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text? ###Code text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." # re text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." text = re.sub(r'(a|A)', '', text) text text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix! # pandas dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', flags=re.IGNORECASE) dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Removing Stopwords Using Regex ###Code # re text = re.sub('(the|The)', '', text, flags=re.IGNORECASE) text # pandas dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False) dickens_text_df.head() ###Output _____no_output_____ ###Markdown Exercises1. One of the main characters in A Tale of Two Cities is `Sydney Carton`. How many times is the word `Carton` used?2. How many times does the word `the` appear in the novel?3. How would you find replace the stopword `the` using regex from `Tale of Two Cities`3. What percentage of lines in Dickens' text contain adverbs? For now, you can classify an adverb as a word that ends in `ly`.4. One many times does Charles Dickens use the pattern `WORD, WORD, and WORD` in this novel (for example `red, bluff, and free`)? Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)Every day late is -10%.You are a business analyst working for a major US toy retailer:* A manager in the marketing department wants to find out the most frequently used words in positive reviews (five stars) and negative reviews (one star) in order to determine what occasion the toys are purchased for (Christmas, birthdays, and anniversaries.). He would like your opinion on **which gift occasions (Christmas, birthdays, or anniversaries) tend to have the most positive reviews** to focus marketing budget on those days.* One of your product managers suspects that **toys purchased for male recipients (husbands, sons, etc.)** tend to be much more likely to be reviewed poorly. She would like to see some data points confirming or rejecting her hypothesis. * Use **regular expressions to parse out all references to recipients and gift occassions**, and account for the possibility that people may spell words "son" / "children" / "Christmas" as both singular and plural, upper or lower-cased.* Explain what some of pitfalls/limitations are of using only a word count analysis to make these inferences. What additional research/steps would you need to do to verify your conclusions?Perform the same word count analysis using the reviews received from Amazon to answer your marketing manager's question. They are stored in two files, (`poor_amazon_toy_reviews.txt`) and (`good-amazon-toy-reviews.txt`). **Provide a few sentences with your findings and business recommendations.** Make any assumptions you'd like to- this is a fictitious company after all. I just want you to get into the habit of "finishing" your analysis: to avoid delivering technical numbers to a non-technical manager.**Submit everything as a new notebook and Slack direct message to me (Yu Chen) the HW as an attachment.**`NOTE`: Name the notebook `lastname_firstname_HW1.ipynb`. Next Week (March 24th)* `scikit-learn`, `nltk`, and `scipy` libraries for NLP (make sure to install each of these libraries* encoding schemes* Bayes Rule, Naive Bayes, probability theory for text classification* Similiarity/distance measures* N-Grams* Tokenization, lemmatization, stemming* Basic word vectors: Count, TF-IDF, One-Hot encoding* Dimensionality Reduction Check for Understanding1. Which of the encodings below will be able to encode this text: `사업`2. **True or False**: the word `dog` will have the same binary representation regardless of whether it is `ASCII`, `latin1`, or `utf8`.3. According to the Zipf Law approximation, approximately what frequency (express it has a percent) would the 3rd most popular word in a generic piece of text appear?4. **True or False**: what is considered a stopword changes depending on the business context and dataset you are working with. If true, provide an example. If false, explain why it is false. ###Code good_reviews = open('good_amazon_toy_reviews.txt', "r", encoding='utf8').read() poor_reviews = open('poor_amazon_toy_reviews.txt', "r", encoding='utf8').read() good_reviews_df = pd.read_csv('good_amazon_toy_reviews.txt', sep="\n", header=None, names=["line"]) poor_reviews_df = pd.read_csv('poor_amazon_toy_reviews.txt', sep="\n", header=None, names=["line"]) word_list = re.findall(r'\b[A-z]+\b', good_reviews+poor_reviews) word_list = [word.lower() for word in word_list] word_dict = Counter(word_list) # sorted(word_dict.items(), key=lambda x: x[1], reverse=True) from fuzzywuzzy import fuzz occasion_re = {} christmas_word = [word for word in word_dict.keys() if fuzz.ratio(r'christmas',word) >= 80] christmas_word occasion_re['christmas'] = r'\b(xmas|' + '|'.join([word for word in christmas_word if word not in ['christ','christians','charisma']]) + r')\b' birthday_word = [word for word in word_dict.keys() if fuzz.ratio(r'birthday',word) >= 80] birthday_word occasion_re['birthday'] = r'\b(' + '|'.join([word for word in birthday_word if word not in ['birthed']]) + r')\b' anniversary_word = [word for word in word_dict.keys() if fuzz.ratio(r'anniversary',word) >= 80] anniversary_word occasion_re['anniversary']= r'\b(' + '|'.join([word for word in anniversary_word if word not in ['adversary']]) + r')\b' occasion_re['valentine'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'valentine',word) >= 80]) + r')\b' occasion_re['thanksgiving'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'thanksgiving',word) >= 90]) + r')\b' occasion_re['halloween'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'halloween',word) >= 85]) + r')\b' occasion_re['easter'] = r'\b(' + '|'.join([word for word in word_dict.keys() if fuzz.ratio(r'easter',word) >= 95]) + r')\b' occasion_re occasion_df = pd.DataFrame(columns=['good_word','good_review','poor_word','poor_review'] , index=['christmas','birthday','anniversary','valentine','thanksgiving','halloween','easter','total']) occasion_df = occasion_df.fillna(0) for line in good_reviews_df['line']: for occasion in occasion_df.index: if occasion == 'total': occasion_df.loc[occasion,'good_word'] += len(re.findall(r'\b[A-z]+\b',line)) occasion_df.loc[occasion,'good_review'] += 1 else: if re.findall(occasion_re[occasion],line,flags=re.IGNORECASE): occasion_df.loc[occasion,'good_word'] += len(re.findall(occasion_re[occasion],line,flags=re.IGNORECASE)) occasion_df.loc[occasion,'good_review'] += 1 for line in poor_reviews_df['line']: for occasion in occasion_df.index: if occasion == 'total': occasion_df.loc[occasion,'poor_word'] += len(re.findall(r'\b[A-z]+\b',line)) occasion_df.loc[occasion,'poor_review'] += 1 else: if re.findall(occasion_re[occasion],line,flags=re.IGNORECASE): occasion_df.loc[occasion,'poor_word'] += len(re.findall(occasion_re[occasion],line,flags=re.IGNORECASE)) occasion_df.loc[occasion,'poor_review'] += 1 occasion_df male_word = ['son','sons'] male_word.extend([word for word in word_dict.keys() if fuzz.ratio(r'husband',word) >= 85]) male_word.extend(['father','fathers']) male_word.extend([word for word in word_dict.keys() if fuzz.ratio(r'dad',word) >= 85]) male_word.extend([word for word in word_dict.keys() if fuzz.ratio(r'granda',word) >= 85]) male_word.remove('dead') male_word.remove('grand') male_word.remove('grandma') male_word.remove('grandman') male_word.remove('grandmas') male_word_re = r'\b(' + '|'.join([word for word in male_word]) + r')\b' male_word_re male_df = pd.DataFrame(columns=['good_review','poor_review'],index=['male','total']) male_df = male_df.fillna(0) for line in good_reviews_df['line']: for i in male_df.index: if i == 'total': male_df.loc[i,'good_review'] += 1 else: if re.findall(male_word_re,line,flags=re.IGNORECASE): male_df.loc[i,'good_review'] += 1 for line in poor_reviews_df['line']: for i in male_df.index: if i == 'total': male_df.loc[i,'poor_review'] += 1 else: if re.findall(male_word_re,line,flags=re.IGNORECASE): male_df.loc[i,'poor_review'] += 1 male_df def make_sorted_dict(word_list): word_dict = {k:round(v/len(word_list),4) for k,v in Counter(word_list).items()} return dict(sorted(word_dict.items(), key=lambda x: x[1], reverse=True)) son_word = re.findall(r'\bsons?\b', good_reviews+poor_reviews, flags=re.IGNORECASE) make_sorted_dict(son_word) child_word = re.findall(r'\bchild(?:ren)?\b', good_reviews+poor_reviews, flags=re.IGNORECASE) make_sorted_dict(child_word) christmas_word = Counter(re.findall(r'\bchristmas(?:es)?\b', good_reviews+poor_reviews, flags=re.IGNORECASE)) make_sorted_dict(christmas_word) ###Output _____no_output_____ ###Markdown Table of Contents0.0.1&nbsp;&nbsp;Installing Required Libraries0.0.1.1&nbsp;&nbsp;Getting Familiar With Jupyter Notebooks1&nbsp;&nbsp;Week 1: Basic Python Operations for Working with Text2&nbsp;&nbsp;The Scale of Data in the 21st Century2.1&nbsp;&nbsp;Overview2.1.0.1&nbsp;&nbsp;Text Analytics2.1.0.2&nbsp;&nbsp;Data Engineering2.1.0.3&nbsp;&nbsp;Statistics / Machine Learning2.2&nbsp;&nbsp;Loading Text into Memory2.2.0.1&nbsp;&nbsp;Opening Files2.2.1&nbsp;&nbsp;An Aside: List Comprehension2.2.2&nbsp;&nbsp;Visualizing Summary Metrics Using Matplotlib2.2.3&nbsp;&nbsp;First Method: Create a Dictionary to Store Word Count2.2.4&nbsp;&nbsp;Using Python's Built-In Counter2.2.5&nbsp;&nbsp;In-Class Question2.3&nbsp;&nbsp;Zipf's Law2.3.1&nbsp;&nbsp;General Definition2.3.2&nbsp;&nbsp;Approximation in NLP3&nbsp;&nbsp;Regular Expressions3.0.1&nbsp;&nbsp;Match the first time a capital letter appears in the tweet3.0.2&nbsp;&nbsp;Match all capital letters that appears in the tweet3.0.3&nbsp;&nbsp;Match all words that are at least 3 characters long3.0.4&nbsp;&nbsp;Word Boundaries3.0.5&nbsp;&nbsp;Removing Stopwords Using Regex3.0.5.1&nbsp;&nbsp;Exercises4&nbsp;&nbsp;Homework 1 (Due Monday March 23rd, 2020 at 11:59pm PST)4.1&nbsp;&nbsp;Next Week (March 24th)4.1.1&nbsp;&nbsp;Check for Understanding Installing Required Libraries ###Code !pip3 install matplotlib !pip3 install pandas ###Output _____no_output_____ ###Markdown Getting Familiar With Jupyter Notebooks Jupyter keyboard shortcuts:- Press `Esc` to go into **Command Mode**. Your cell should turn from green highlights to blue highlights.- In **Command Mode**, press `M` to go into `Markdown` mode. This turns your cell into Markdown text so you can type text.- Press `Y` to go into `Code` mode. This then allows you to begin typing Python code.- Press `A` to insert a cell above your current cell.- Press `B` to insert a cell below your current cell.- Press `D` twice to delete your current cell.- Press `Shift` + `Enter` to save your cell. Week 1: Basic Python Operations for Working with Text The Scale of Data in the 21st Century ASCII table converting numbers to characters.(Wikipedia) OverviewBy the end of this week, you should be able to perform the following operations: Text Analytics- **load a text file into memory** using Python's built-in streaming libraries- **visualize word count and line length distributions** as histograms using Matplotlib Data Engineering- **read strings from a text input/output stream** using `readline()` and `readlines()`- **use both native Python dictionaries and `collections.Counter` objects** to produce word counts for a text corpus- perform basic search/replace operations using **regular expressions**- encode/decode text from bytes to support internationalization and digital-native characters (such as **emojis**). Statistics / Machine Learning- **create a word transition matrix using Numpy arrays**, which can be used for probabilistic inference and text generation (we will cover Week 2) Loading Text into MemoryThere are a variety of ways to hold data within memory. For text analytics and natural language processing purposes, we'll be most concerned with the following:- **list**- **set**- **dictionary**- **tuple**- **Numpy array**Imagine that we would like to find the most commonly used words in ***A Tale of Two Cities***, by the famed English novelist Charles Dickens, stored in a text file called **`tale-of-two-cities.txt`**, in the same directory as this Jupyter notebook. Later on, we'll use 3rd-party libraries to automate much of the processing, but for now, we'll explore Python's built-in functions for text processing. Opening Files The **`open()`** function takes *two* parameters; **filename**, and **mode**. In our case, `mode` is set to `r` for **read**, since we plan to read the file's contents, as opposed to `w` (write), or `a` (append). ###Code # Open Tale of Two Cities text_file = open("tale-of-two-cities.txt", "r") print(text_file) ###Output <_io.TextIOWrapper name='tale-of-two-cities.txt' mode='r' encoding='UTF-8'> ###Markdown Typically, a text character is **1 byte** in size. One byte is equal to **8 bits**. This means conceptually, the size of a string should be $N$ bytes, where $N$ is the number of characters. However, you'll see that in Python, the size of a string is larger: ###Code import sys EMPTY_STRING = "" ONE_CHAR_STRING = "a" TWO_CHAR_STRING = "ab" print(f"The size of EMPTY_STRING is {sys.getsizeof(EMPTY_STRING)} bytes.") print(f"The size of ONE_CHAR_STRING is {sys.getsizeof(ONE_CHAR_STRING)} bytes.") print(f"The size of TWO_CHAR_STRING is {sys.getsizeof(TWO_CHAR_STRING)} bytes.") ###Output The size of EMPTY_STRING is 49 bytes. The size of ONE_CHAR_STRING is 50 bytes. The size of TWO_CHAR_STRING is 51 bytes. ###Markdown The **`open()`** function returns a **`TextIOWrapper`** object from Python's `io` module, which handles common input/output streaming operations. A **stream** is a potentially infinite sequence of elements (in our case, characters) arriving over time. You'll use streams to model data that is **unbounded** (it's undetermined the volume, the length, and frequency of the data). A stream has a pointer to its current position within the sequence. This object has an extremely helpful **`readline()`** method that reads from a text file until encountering an **`EOF`** marker or a new line symbol. ###Code text_file.readline() ###Output _____no_output_____ ###Markdown You can pass in a parameter to **`readline()`** to control how many bytes of input stream data you'll receive. For instance, **`readline(2)`** returns at most 2 bytes of text input data. You might use this, for instance, if your Python application is reading not from a flat text file, but from a socket, which supplies a continuous stream of data with fixed length (ie., the messages all have the same number of characters).**In-Class Question**: *Assume you just opened the text file with **`open()`**. What output is returned when **`text_file.readline(5)`** is called the **second** time?*- **A)** The entire first line of the novel- **B)** The first 5 characters of the second line- **C)** The entire second line of the novel- **D)** The first 5 characters of the first line- **E)** The 6th-10th characters of the first line ###Code text_file.seek(0) #reset the stream position to the start of the text file for i in range(2): # repeat the below line twice print(f"Iteration {i + 1}: {text_file.readline(5)}") ###Output Iteration 1: IT Iteration 2: WAS t ###Markdown Each time that you call **`readline()`**, a position marker within **`TextIOWrapper`** is moved forward:We typically will use **`readlines()`** instead to read text files line by line. This returns a Python **list**: ###Code text_file.seek(0) # reset the stream position to the start of the file lines = text_file.readlines() # read all the lines and return a list of strings ###Output _____no_output_____ ###Markdown We see that there are **12870** lines of text in the novel. ###Code print(f"There are {len(lines)} lines in the novel.") total_num_chars = 0 for line in lines: # iterate through each line total_num_chars += len(line) # add the number of characters in a line to the total count of characters avg_chars = round(total_num_chars / len(lines),1) # divide total character count by number of lines to get average print(f"On average, each line has {avg_chars} characters.") import matplotlib.pyplot as plt # we are importing the pyplot module from matplotlib, and naming it as plt ###Output _____no_output_____ ###Markdown An Aside: List ComprehensionSometimes, we need to iterate through a list and perform some sort of operation (sum all the elements, or remove a certain character). The traditional way to do this is using a for loop:```Pythonlengths = [] declare an empty listfor line in lines: iterate through each line lengths.append(len(line)) add the length of each line to the list```A slightly less verbose way, called **list comprehension**, to write this is```Pythonlengths = [len(line) for line in lines]```List comprehension is **typically slightly faster**, since it avoids the additional `append()` call for each iteration of the for loop. See this example from StackOverflow:```Pythondef slower(): using traditional iteration result = [] for elem in some_iterable: result.append(elem) return result``````Pythondef faster(): using list comprehension return [elem for elem in some_iterable]```Within the Python REPL **(read-eval-print-loop)**:```Python>>> some_iterable = range(1000)>>> import timeit>>> timeit.timeit('f()', 'from __main__ import slower as f', number=10000)1.4456570148468018>>> timeit.timeit('f()', 'from __main__ import faster as f', number=10000)0.49323201179504395``` Visualizing Summary Metrics Using Matplotlib ###Code NUM_BINS = 30 # increase this number to make the visualization more granular plt.rcParams["figure.figsize"] = (15,6) plt.hist([len(line) for line in lines], bins=NUM_BINS) plt.title("Distribution of Line Lengths in Tale of Two Cities") # give the plot a title plt.xlabel("Number of Characters in Line") # label the X axis plt.ylabel("Count of Lines") # label the Y axis plt.show() ###Output _____no_output_____ ###Markdown What if now we want to visualize how many times each word appears in the entire novel (for now, we won't worry about **stemming / lemmatization** and other preprocessing steps)? First Method: Create a Dictionary to Store Word CountDictionaries in Python have **keys** and **values**. The keys must be unique (no duplicate keys). They can be accessed via the **`keys()`** and **`values()`** methods of a dictionary object. ###Code words = [] # create a list of all words word_count = {} # create a dictionary to store word counts for line in lines: # for each line in the novel for word in line.split(" "): # for each word in the line words.append(word) # add the word to the list of words if word not in word_count.keys(): # if the word has not been seen before, add it to the dictionary with initial count of 1 word_count[word] = 1 else: word_count[word] += 1 # if the word has been seen before, increment its count by 1 print(f"There's an estimated {len(words)} words in the novel.") print(f"There's {len(word_count.keys())} unique words in the novel.") ###Output _____no_output_____ ###Markdown Let's use Python **`sets`** to check that our dictionary's keys are unique. Remember that a set is a collection of **unique elements**, so calling **`set(words)`** will return only the unique words in our text file. ###Code assert len(word_count.keys()) == len(set(words)), "This error message will be printed if the assertion to the left is not true." ###Output _____no_output_____ ###Markdown Using Python's Built-In CounterSince the task of building a count using a dictionary is a common operation, Python provides a built-in object called `Counter` that we can use: ###Code from collections import Counter def count_words(lines, delimiter=" "): words = Counter() # instantiate a Counter object called words for line in lines: for word in line.split(delimiter): words[word] += 1 # increment count for word return words ###Output _____no_output_____ ###Markdown A core principle of software engineering and programming is **DRY**: Don't Repeat Yourself. Since we are likely going to be making many histograms throughout this course, it's best that we create a reusable function. ###Code def make_histogram(values, title=None,xlabel=None,ylabel=None, bins=30, x_size=15, y_size=6): plt.rcParams["figure.figsize"] = (x_size,y_size) plt.hist(values, bins=bins) if title: plt.title(title) # give the plot a title if xlabel: plt.xlabel(xlabel) # label the X axis if ylabel: plt.ylabel(ylabel) # label the Y axis plt.show() make_histogram(word_count.values(), title="Distribution of Word Count", xlabel="Number of Times Word Appears", ylabel="Number of Unique Words") ###Output _____no_output_____ ###Markdown In-Class Question- Why does this distribution look the way it does? - What additional steps could be taken to make the results more meaningful? ###Code import pandas as pd # output the results to a dataframe word_count_df = pd.DataFrame(columns=["word", "frequency"]) # create a dataframe with two columns, word and frequency word_count_df["word"] = list(word_count.keys()) word_count_df["frequency"] = list(word_count.values()) word_count_df.to_csv("dickens_word_count.csv") # saves to an outputs folder - if you don't have one, Python will throw an error ###Output _____no_output_____ ###Markdown Zipf's Law General DefinitionZipf's Law states that for `N` words, the `k`th most frequent word will appear with a normalized frequency equal to The parameter $s$ is an exponent that defines the behavior of the distribution. Traditionally, in natural language, $s = 1$. Stefan Evert, http://zipfr.r-forge.r-project.org/materials/LREC2018/tutorial_lrec2018.handout.pdf Approximation in NLPIf $t_1$ is the most common word in a collection of text, and $t_2$ is the next most common word, then the frequency of the $i$th most common word is proportional to $\frac{1}{i}$. The approximation we'll use specifically for natural languages is$$f(t_i) = \frac{0.1}{i^\alpha}$$$\alpha = 1$.To represent the frequency of a word in a body of text.In human language, there are **a few high-frequency words and many low-frequency words**. What does this mean in terms of machine learning / data modelling?* In many cases, the high frequency words do not carry much value in terms of predictive power or signal. These are frequently **stopwords** that must be removed / otherwise feature-engineered. Regular Expressions ###Code # get the top stopwords word_count_df.sort_values(by=["frequency"], ascending=False).head(5) import re SAMPLE_TWEET = ''' #wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from #google or #wikipedia, totally useless! Avoid Wolfram at all costs, #ScrewWolframProducts" ''' # create a dataframe version of Dickens' novel dickens_text_df = pd.DataFrame( open("tale-of-two-cities.txt", "r"), columns=["line"]) dickens_text_df["line"] = dickens_text_df["line"].str.replace("\n", "") ###Output _____no_output_____ ###Markdown Match the first time a capital letter appears in the tweet ###Code match = re.search("[A-Z]", SAMPLE_TWEET) match.group() ###Output _____no_output_____ ###Markdown Match all capital letters that appears in the tweet ###Code # re re.findall("[A-Z]", SAMPLE_TWEET) # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([A-Z])') ###Output _____no_output_____ ###Markdown Match all words that are at least 3 characters long ###Code # re re.findall("[a-zA-Z]{3,}", SAMPLE_TWEET)[:5] # show only the first 5 # pandas dickens_text_df["results"] = dickens_text_df["line"].str.extract(r'([a-zA-Z]{3,})') dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'([a-zA-Z]{3,})') dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Word BoundariesConsider the sentence:*A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor.*What happens if you try to parse out all `Thor` references? What happens if you want to remove `A` or `a`, or `the` to clean up the text? ###Code text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." # re text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." text = re.sub(r'(a|A)', '', text) text text = "A thorough examination of the movie shows Thor was a thorn in the side of the villains, both then and now. thor." re.findall(r'\b(thor|Thor)\b', text) # notice the use of the r string prefix! # pandas dickens_text_df["results"] = dickens_text_df["line"].str.findall(r'\bthe\b', case=False) dickens_text_df.head(5) ###Output _____no_output_____ ###Markdown Removing Stopwords Using Regex ###Code # re text = re.sub('(the|The)', '', text, flags=re.IGNORECASE) text # pandas dickens_text_df["results"] = dickens_text_df["line"].str.replace(r'\bthe\b', '', case=False) dickens_text_df.head() ###Output _____no_output_____
issa/assignment.ipynb
###Markdown Cluster Assignments ###Code def cluster_assignment(cluster_res, data_name='user_id'): """ Converts the dictionary containing user_id and user_cluster assignment to a pandas DataFrame. cluster_res : dictionary Result from clustering function with keys being the user_id and values their cluster membership col : string Column name of the user or item Returns ------- result : pandas DataFrame Two columns representing the user/item and their corresponding cluster assignments """ import pandas as pd if data_name == 'user_id': cluster_name = 'ucluster' else: cluster_name = 'icluster' c_assignment = pd.DataFrame(list(cluster_res.items()), columns=[data_name, cluster_name]) c_assignment.set_index(data_name, inplace=True) return c_assignment uc_assignment = cluster_assignment(y_u, data_name='user_id') ic_assignment = cluster_assignment(y_i, data_name='item_id') ic_assignment ###Output _____no_output_____ ###Markdown Unit Test ###Code import unittest import pandas as pd from pandas._testing import assert_frame_equal class Test_cluster_assign(unittest.TestCase): def test_cluster_assignment(self): dict_cluster_i = {0: 2, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1, 6: 2, 7: 1, 8: 3, 9: 3} dict_cluster_u = {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 2, 6: 2, 7: 3, 8: 1, 9: 2} df_ex_u = pd.DataFrame(list(dict_cluster_u.items()), columns=['user_id', 'ucluster']) df_ex_u.set_index('user_id', inplace=True) df_ex_i = pd.DataFrame(list(dict_cluster_i.items()), columns=['item_id', 'icluster']) df_ex_i.set_index('item_id', inplace=True) df_assignment_u = cluster_assignment(dict_cluster_u, data_name='user_id') df_assignment_i = cluster_assignment(dict_cluster_i, data_name='item_id') assert_frame_equal(df_ex_u, df_assignment_u) assert_frame_equal(df_ex_i, df_assignment_i) unittest.main(argv=[''], verbosity=2, exit=False) ###Output test_cluster_assignment (__main__.Test_cluster_assign) ... ok ---------------------------------------------------------------------- Ran 1 test in 0.010s OK
day5/task2.ipynb
###Markdown Assignment 2: Improving the discretized solutionIn this exercise you will make several improvements to the cakeeating code in the lecture 7, part 2, to make the solution evenmore accurate. Task 1. Base solutionCopy the version of the cake eating code (with discretized choices)from the lecture slides and modify if needed to to ensure:- There are two separate grids for state $ W_t $ and choice $ c_t $. - The grids for states and choices are initialized at the time the object is created, and do not change when the Bellman equation is solved. Debug the code and produce the convergence plots as in the lecture. ###Code # write your code here # come up with a test of your own import numpy as np import matplotlib.pyplot as plt %matplotlib inline from scipy import interpolate class cake_discretized(): def __init__(self,beta=.9, Wbar=10, ngrid_state=50, ngrid_choice=100): self.beta = beta # Discount factor self.Wbar = Wbar # Upper bound on cake size self.ngrid_state = ngrid_state # Number of grid points for the size of cake self.ngrid_choice = ngrid_choice # Number of grid points for how much of cake to consume self.epsilon = np.finfo(float).eps # smallest positive float number self.grid_state = np.linspace(self.epsilon,Wbar,ngrid_state) # grid for state space self.grid_choice = np.linspace(self.epsilon,Wbar,ngrid_choice) # grid for decision space self.interpolation = 'linear' # interpolation type for Bellman equation self.choice_bound = False # impose the state bound on the trial values of choice or not def bellman(self,V0): #Bellman operator, V0 is one-dim vector of values on grid matW = np.repeat(np.reshape(self.grid_state,(1,-1)),self.ngrid_choice,0) # matrix with state space repeated in rows c = np.repeat(np.reshape(self.grid_choice,(-1,1)),self.ngrid_state,1) # decisions grid repeated by columns if self.choice_bound: c *= np.reshape(self.grid_state,(1,-1)) /self.Wbar # normalize max choice to current wealth matWpr = matW-c # size of cake in the next period matWpr[matWpr==0] = self.epsilon # add small quantity to avoid log(0) mask = matWpr>0 # mask off infeasible choices # interpolation kind if self.interpolation=='linear': interfunc = interpolate.interp1d(self.grid_state,V0,kind='slinear',fill_value="extrapolate") elif self.interpolation=='quadratic': interfunc = interpolate.interp1d(self.grid_state,V0,kind='quadratic',fill_value="extrapolate") elif self.interpolation=='cubic': interfunc = interpolate.interp1d(self.grid_state,V0,kind='cubic',fill_value="extrapolate") elif self.interpolation=='polynomial': p = np.polynomial.polynomial.polyfit(self.grid_state,V0,self.ngrid_state-1) interfunc = lambda x: np.polynomial.polynomial.polyval(x,p) else: print('Unknown interpolation type') return None # INPERPOLATE values of next period value at next period case sizes matV1 = interfunc(matWpr) preV1 = np.full((self.ngrid_choice,self.ngrid_state),-np.inf) # init V with -inf preV1[mask] = np.log(c[mask]) + self.beta*matV1[mask] # maximand of the Bellman equation V1 = np.amax(preV1,0,keepdims=False) # maximum in every column c1 = c[np.argmax(preV1,axis=0),range(self.ngrid_state)] # choose the max attaining levels of c return V1, c1 def solve(self, maxiter=1000, tol=1e-4, callback=None, interpolation='linear', choice_bound = False): '''Solves the model using successive approximations''' self.interpolation = interpolation # update solver settings self.choice_bound = choice_bound V0=np.log(self.grid_state) # on first iteration assume consuming everything for iter in range(maxiter): V1,c1=self.bellman(V0) if callback: callback(iter,self.grid_state,V1,c1) # callback for making plots if np.all(abs(V1-V0) < tol): break V0=V1 else: # when i went up to maxiter print('No convergence: maximum number of iterations achieved!') return V1,c1 m = cake_discretized(beta=0.92,Wbar=10,ngrid_state=50,ngrid_choice=50) V,c = m.solve() ###Output _____no_output_____ ###Markdown Task 2. Accuracy measureModify the function that compares the numerical solution to the analytical sothat it outputs a measure of accuracy equal to the average of squared deviations overa fixed dense grid. Also provide an argument to disable the plot for Task 5. ###Code # write your code here def accuracy(model,V=None,policy=None,title='',npoints=1000,plot=True): '''Check the cake eating numerical solution against the analytic solution''' # analytic solution aV = lambda w: np.log(w)/(1 - model.beta) + np.log(1 - model.beta)/(1 - model.beta) + model.beta* np.log(model.beta)/((1 - model.beta)**2) aP = lambda w: (1 - model.beta) * w if 'cake_ongrid' in str(type(model)): grid = model.grid else: grid = model.grid_state # solve if needed if V is None or policy is None: V,policy = model.solve() # accuracy measure xd = np.linspace(grid[0],grid[-1],npoints) # dense grid for accuracy measure ac = ((aV(xd)-np.interp(xd,grid,V))**2).mean() # make plots if plot: fig1, (ax1,ax2) = plt.subplots(1,2,figsize=(14,8)) ax1.grid(b=True, which='both', color='0.65', linestyle='-') ax2.grid(b=True, which='both', color='0.65', linestyle='-') ax1.set_title('Value functions') ax2.set_title('Policy functionas') ax1.set_xlabel('Cake size, W') ax2.set_xlabel('Cake size, W') ax1.set_ylabel('Value function') ax2.set_ylabel('Policy function') ax1.plot(grid[1:],V[1:],linewidth=1.5,label='Numerical') ax1.plot(grid[1:],aV(grid[1:]),linewidth=1.5,label='Analytical') ax2.plot(grid,policy,linewidth=1.5,label='Numerical') ax2.plot(grid,aP(grid),linewidth=1.5,label='Analytical') ax1.legend() ax2.legend() fig1.suptitle(title) plt.show() return ac ac0=accuracy(m,V=V,policy=c) print('Accuracy of the basic solution is',ac0) ###Output _____no_output_____ ###Markdown Task 3. Bounding the choice gridModify the Bellman equation code to make the choice discretization grid dependenton the point of the state space where it is applied to, namely make the grid withthe same number of points going from $ 0 $ to $ \vec{W}_j $ when solvingat the point $ \vec{W}_j $, instead of from $ 0 $ to $ \bar{W} $.Repeat the accuracy check in Task 2 with the new specification. ###Code # write your code here V,c = m.solve(choice_bound=True) ac1=accuracy(m,V=V,policy=c) print('Accuracy of the method with bound on choices is',ac1,'instead of',ac0) ###Output _____no_output_____ ###Markdown Task 4. Improving interpolation methodWe could utilize more advanced interpolation schemes for the value function itself.Replace linear interpolation of the value function with quadratic and cubic splines, and approximatingpolynomials.Compare the accuracy of the new two versions to the original solution and the solutionwith the improvement from task 3.What is the most accurate solution algorithm? ###Code # write your code here for knd in 'linear','quadratic','cubic','polynomial': V,c = m.solve(choice_bound=True,interpolation=knd) ac=accuracy(m,V=V,policy=c) print('Accuracy with '+knd+' interpolation is',ac) ###Output _____no_output_____ ###Markdown Task 5. Convergence to true solutionMake a plot of the accuracy measure as function of number of grid points (assumingthe number of grid points on choice grid is 2 times that of the states) for each of the fourinterpolation schemes, with and without the bounding of the choicesOn the separate axes plot the same curve under the assumption that the number of gridpoints on the choice grid is 10 time larger than the state grid, for each of the 4interpolation schemes, with and without the bounding of the choicesWhat is the best way to improve the accuracy of the solution? ###Code # write your code here fig1, (ax1,ax2) = plt.subplots(1,2,figsize=(14,8)) fig1.suptitle('Same grids for states and choices') ax1.set_title('Basic solver') ax2.set_title('With bounding of the choices') ax1.set_xlabel('Number of grid points') ax2.set_xlabel('Number of grid points') ax1.set_ylabel('Accuracy') ax2.set_ylabel('Accuracy') grids = np.arange(50,551,100,dtype='int') for knd in 'linear','quadratic','cubic','polynomial': line = np.empty(grids.size) for i in range(grids.size): K = grids[i] m = cake_discretized(beta=0.92,Wbar=10,ngrid_state=K,ngrid_choice=2*K) V,c = m.solve(choice_bound=False,interpolation=knd) line[i]=accuracy(m,V=V,policy=c,plot=False) print('.',end='') ax1.plot(grids,line,label=knd+' interpolation') print('|',end='') for i in range(grids.size): K = grids[i] m = cake_discretized(beta=0.92,Wbar=10,ngrid_state=K,ngrid_choice=2*K) V,c = m.solve(choice_bound=True,interpolation=knd) line[i]=accuracy(m,V=V,policy=c,plot=False) print('.',end='') ax2.plot(grids,line,label=knd+' interpolation') print('|',end='') ax1.legend() ax2.legend() plt.show() ###Output _____no_output_____
data-wrangling/DS_Data_Munging.ipynb
###Markdown Data Munging Relational DataThe simplest type of data we have see might consist a single table with a some columns and some rows. This sort of data is easy to analyze and compute and we generally want to reduce our data to a single table before we start running machine learning algorithms. Yet, real world data doesn't necessarily fit into this paradigm. Most real world data is messy and complicated which doesn't fit well into a tabular format and we will have to do some work to reduce this complexity. Additionally, in many case we can reduce our memory cost by not keeping data in a single table, but instead in a set of data structures with defined relations between them. Here we will explore a bit of data and see how combining different sets of data can help us generate useful features.First we need some data. We will make use of some data from Wikipedia and we will use the pandas `read_html` function to scrape the data from a particular webpage. We will study the top 10 companies in the Fortune Global 500 which conveniently have [their own Wikipedia page](https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446).We will download the data in tabular form, but work with it as a list of dictionaries, this will allow us to get used to working with unstructured data. ###Code import pandas as pd import json df = pd.read_html('https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446', header=0)[0] fortune_500 = json.loads(df.to_json(orient="records")) df ###Output _____no_output_____ ###Markdown Lets look at the data. ###Code fortune_500 ###Output _____no_output_____ ###Markdown This is a great start to our analysis, however, there really isn't that much information here, we will need to bring in additional data sources to get any further understanding of these companies.The first question we might want to ask is how many employees does it take to get that revenue, in other words, what is the revenue per employee? Luckily, we can use Wikipedia to get that data as well, we have scraped this data manually (all from Wikipedia) and created the following dictionary. ###Code other_data = [ {"name": "Walmart", "employees": 2300000, "year founded": 1962 }, {"name": "State Grid Corporation of China", "employees": 927839, "year founded": 2002}, {"name": "China Petrochemical Corporation", "employees":358571, "year founded": 1998 }, {"name": "China National Petroleum Corporation", "employees": 1636532, "year founded": 1988}, {"name": "Toyota Motor Corporation", "employees": 364445, "year founded": 1937}, {"name": "Volkswagen AG", "employees": 642292, "year founded": 1937}, {"name": "Royal Dutch Shell", "employees": 92000, "year founded": 1907}, {"name": "Berkshire Hathaway Inc.", "employees":377000, "year founded": 1839}, {"name": "Apple Inc.", "employees": 123000, "year founded": 1976}, {"name": "Exxon Mobile Corporation", "employees": 69600, "year founded": 1999}, {"name": "BP plc", "employees": 74000, "year founded": 1908} ] ###Output _____no_output_____ ###Markdown Some data have a slightly different name than in our original set, so we will keep a dictionary of mappings between the two. Notice, we only include the mapping in the dictionary if there is a difference. ###Code mapping = { 'Apple': 'Apple Inc.', 'BP': 'BP plc', 'Berkshire Hathaway': 'Berkshire Hathaway Inc.', 'China National Petroleum': 'China National Petroleum Corporation', 'Exxon Mobil': 'Exxon Mobile Corporation', 'Sinopec Group': 'China Petrochemical Corporation', 'State Grid': 'State Grid Corporation of China', 'Toyota Motor': 'Toyota Motor Corporation', 'Volkswagen': 'Volkswagen AG' } ###Output _____no_output_____ ###Markdown This data is one to one, meaning the data contained in one source only aligns with a single element in the other source, thus we should be able to put these together. However, we know that the data isn't in a great form to be joined at the moment. This is for two reasons1. All the names will not align (we need to use our mapping)2. The `list` structure is not optimized for looking through elements. While for 10 elements the second reason won't really matter, for larger data sets such performance considerations are extremely important. We can turn this list of dictionaries into a dictionary of dictionaries, so we can quickly access each element of the data. ###Code dict_data = {k["name"] : k for k in other_data} dict_data ###Output _____no_output_____ ###Markdown **Question:** If we had many entries in `other_data`, we could display a small piece by printing `other_data[:5]`. With dataframes we might use `df.head()`. Can you think of a way to print out a small piece of a dictionary? Now we can easily compute the revenue per employee, we need to map the "Company" value in our original data with the "name" column of this other data, but we also need to use the mapping to ensure the columns will line up. We in general don't want to mutate our original data, so lets make a new list of dictionaries with this new feature (revenue per employee). On the course of doing this, we will need to handle converting some numbers like `$500 Billion` to a numeric value. Lets create a function to do this. ###Code def convert_revenue(x): return float(x.lstrip('$').rstrip('billion')) * 1e9 assert convert_revenue('$500 billion') == 500e9 ###Output _____no_output_____ ###Markdown Now we should be able to create a few functions to compute this revenue per employee and create a data list. ###Code def rev_per_emp(company): name = company[u'Company'] n_employees = dict_data[mapping.get(name, name)].get('employees') company['rev per emp'] = convert_revenue(company[u'Revenue in USD'])/n_employees return company def compute_copy(d, func): return func({k:v for k,v in d.items()}) data = list(map(lambda x : compute_copy(x, rev_per_emp), fortune_500)) ###Output _____no_output_____ ###Markdown Lets take a look at our new data and also the old data to ensure we didn't mutate anything. ###Code data[:2] fortune_500[:2] ###Output _____no_output_____ ###Markdown Now we can sort these values. We first can select out on the elements we care about and then sort that list. ###Code rev_per_emp = sorted([(i[u'Company'], i['rev per emp']) for i in data], key=lambda x : x[1], reverse=True) rev_per_emp ###Output _____no_output_____ ###Markdown This results in a much different order. What does this tell us about the companies?Now lets pull in some other data (this is data science, more data is always better!). We can see that these companies are in a few different industries, let find out which ones. ###Code from collections import Counter Counter(i[u'Industry'] for i in data) ###Output _____no_output_____ ###Markdown One thing we might want to know is what sort of market share they have of the specific industry to which they belong. Let's look at the two industries that categorize the 6 of the top 10, `Automobiles` and `Petroleum`. We can select only those elements of our data to work with. ###Code sub_data = [i for i in data if i[u'Industry'] in [u'Automobiles', u'Petroleum']] sub_data ###Output _____no_output_____ ###Markdown It might be the case that the each particular category has a different relevant metric for market share. For example, we could look at total revenue for a car company or we could look at cars produced. So for the automobile industry we will look at the percent total of cars produced. We can get this data again from Wikipedia. ###Code df_list = pd.read_html("https://en.wikipedia.org/w/index.php?title=Automotive_industry&oldid=875776152", header=0) car_totals = json.loads(df_list[0].to_json(orient="records")) car_by_man = json.loads(df_list[2].to_json(orient='records')) car_totals[:2] car_by_man[:2] ###Output _____no_output_____ ###Markdown Now lets get only the groups we care about and divide by the total production which we will take as the latest year. ###Code total_prod = sorted((i[u"Year"], i[u'Production']) for i in car_totals)[-1][1] total_prod ###Output _____no_output_____ ###Markdown Now we can find the market share for each of the car companies. We will keep track of a market share dictionary. We will again need to keep track of some slight name differences. ###Code car_by_man_dict = {i[u'Group']:i[u'Vehicles'] for i in car_by_man} market_share = {} for name, orig_name in zip(['Toyota', 'Volkswagen Group'], ['Toyota', 'Volkswagen']): market_share[orig_name] = car_by_man_dict[name]/ float(total_prod) market_share ###Output _____no_output_____ ###Markdown Now we can do the same for the Petroleum industry, but in this case, lets compute the market share by revenue. On Wikipedia, we can find a list of oil companies by revenue. Although its not a complete list, it has enough companies that we don't expect the companies left off the list to contribute greatly to our analysis. ###Code rev = pd.read_html("https://en.wikipedia.org/w/index.php?title=List_of_largest_oil_and_gas_companies_by_revenue&oldid=871711850", header=1)[0] rev = rev.iloc[:, 1:3] rev.columns = ['Company', 'Revenue'] rev = rev[~(rev['Company'] == 'Company name')] oil_data = json.loads(rev.to_json(orient="records")) oil_data[:2] ###Output _____no_output_____ ###Markdown Now we can compute the totals and market share. Since the data here might be slightly different (perhaps older) than our original data, we will compute the market share of each company within this data set, then pull out the numbers we care about. ###Code total = sum([float(i[u'Revenue'].rstrip('*')) for i in oil_data]) shares = {i[u'Company']:float(i[u'Revenue'].rstrip('*'))/total for i in oil_data} print(total) ###Output _____no_output_____ ###Markdown Now we can pull out the companies we care about in the petroleum industry. ###Code petro_companies = [i[u'Company'] for i in data if i['Industry'] == u'Petroleum'] petro_companies ###Output _____no_output_____ ###Markdown Lets check if these are all in the our shares dictionary. ###Code [(i, i in shares) for i in petro_companies] ###Output _____no_output_____ ###Markdown Some of these companies are directly there, and looking through our dictionary, we can see the others are there without exact names. ###Code shares.keys() ###Output _____no_output_____ ###Markdown So lets make a fuzzy match, this will be a pretty simple one where it will try to match words in a name and take the maximum number of matches. ###Code def fuzzy_match(word, s): words = set(word.split(' ')) overlaps = [(k, len(v.intersection(words))) for k, v in s.items()] return max(overlaps, key=lambda x : x[1])[0] split_names = {i: set(i.split(' ')) for i in shares.keys()} for i in petro_companies: match = fuzzy_match(i, split_names) print("matched {} to {}".format(i, match)) market_share[i] = shares[match] market_share ###Output _____no_output_____ ###Markdown By industryWe have some nice examples of data munging, now lets see an example of keeping data in a relational fashion. Lets say we want to add another feature which is the growth of each industry. If we were to store this data as a single quantity, we would be saving a bunch of extra information, we would be much better off extracting this information and keeping it in a single table so we are not replicating by industry. With PandasNow we can also perform these same computations with Pandas, lets see how this compares. ###Code df = pd.read_html('https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446', header=0)[0] df df['rev'] = df['Revenue in USD'].apply(convert_revenue) df['employees'] = df['Company'].apply(lambda x : dict_data[mapping.get(x, x)].get('employees')) df['rev_per_employee'] = df['rev'] / df['employees'].astype(float) df.sort_values(by='rev_per_employee', ascending=False) df_list = pd.read_html("https://en.wikipedia.org/w/index.php?title=Automotive_industry&oldid=875776152", header=0) df_totals = df_list[0] df_by_man = df_list[2] total_prod = df_totals.sort_values(by='Year').iloc[-1]['Production'] total_prod df_by_man['share'] = df_by_man['Vehicles'].astype(float) / total_prod market_share = df_by_man.set_index('Group')['share'][['Toyota', 'Volkswagen Group']] market_share rev = pd.read_html("https://en.wikipedia.org/w/index.php?title=List_of_largest_oil_and_gas_companies_by_revenue&oldid=871711850", header=1)[0] rev = rev.iloc[:, 1:3] rev.columns = ['Company', 'Revenue'] rev = rev[~(rev['Company'] == 'Company name')] rev rev['rev_clean'] = rev['Revenue'].apply(lambda x : float(x.rstrip('*'))) total = rev['rev_clean'].sum() total rev['share'] = rev['rev_clean'] / total rev rev = rev[rev['Company'].isin(['Exxon Mobil', 'Sinopec', 'China National Petroleum Corporation', 'Royal Dutch Shell'])].copy() rev # do fuzzy search split_names = {i: set(i.split(' ')) for i in df['Company']} def fuzzy(word): return fuzzy_match(word, split_names) rev['name'] = rev['Company'].apply(fuzzy) rev ms2 = df.merge(rev[['share', 'name']], left_on='Company', right_on='name') ###Output _____no_output_____ ###Markdown Now we want to put these together and get only the company and the market share. ###Code ms = market_share.reset_index()[['Group','share']] ms.columns = ['Company', 'share'] pd.concat([ms, ms2[['Company', 'share']]]) ###Output _____no_output_____
Movie_Notebook.ipynb
###Markdown data cleaningThe main data cleaning task is related with missing values. The typical reasons why data are missing can be someone who forgot to fill in a field, transferring them from a legacy database, a programming error or simply a user chosen not to fill a field tied to his belief about the interpretation and use of the data. These sources are just simple random mistakes. In our dataset there are some unknown filled values. We know that Pandas will recognise “NA” as a missing value, but what about the others? If there’s multiple users manually entering data, then this is a common problem. Maybe I like to use “n/a” but you like to use “unknown”. We will also drop columns that are not used like, US DVD Sales, MPAA Rating , etc. We will then turn the "Worldwide Gross" column into a numeric type so pandas can make a histogram out of it. We will also remove all NaN rows and re-index the dataframe. This is done specifically for computing the p-value thought the stats.pearsonr method that does not support NaNs and infinities. ###Code import matplotlib.pyplot as plt import numpy.ma as ma import pandas as pd import numpy as np import math from scipy import stats %matplotlib inline missing_values = ["n/a", "na", "--","unknown","Unknown","0"] movie_data = pd.read_csv("movies.csv",na_values = missing_values) to_drop =['US DVD Sales', 'MPAA Rating', 'Running Time (min)', 'Distributor', 'Source', 'Creative Type'] new_names = {'Worldwide Gross': 'w_gross', 'Production Budget':'p_budget', 'Major Genre': 'm_genre', 'IMDB Votes':'imdb_votes', 'IMDB Rating':'imdb_rating', 'Rotten Tomatoes Rating': 'tomatoes', 'US Gross': 'us_gross', 'Release Date': 'r_date'} #drop unnecessary columns movie_data.drop(to_drop, inplace=True, axis=1) #remove all NaN rows and re-index the dataframe #this is done specifically for computing the p-value through the stats.pearsonr method #that does not support NaNs and infinities. movie_data = movie_data.dropna() movie_data = movie_data.reset_index(drop=True) #rename columns, some commands cant handle variables with spaces movie_data.rename(columns=new_names, inplace=True) #transform strings to numeric movie_data["w_gross"] = pd.to_numeric(movie_data["w_gross"]) ###Output _____no_output_____ ###Markdown plotting the histogramsBy plotting the first graphs we can see that there is a clear correlation between the Worldwide Gross and the number of IMDB Votes as they clearly follow a geometric distribution. ###Code movie_data.plot.hist(y='w_gross') movie_data.plot.hist(y='imdb_votes') movie_data.plot.hist(y='tomatoes') ###Output _____no_output_____ ###Markdown The "Rotten tomatoes rating" somehow resembles a uniform distribution with a peak and at the value 90 and a bottom at value 10. The "IMDB rating" follows a left-skewed distribution. The mean is to the left of the peak. This is the main definition behind “skewness”, which is technically a measure of the distribution of values around the mean. ###Code movie_data.plot.hist(y='imdb_rating') ###Output _____no_output_____ ###Markdown counting the number of movies for every major genreIts clear that the most popular genres are drama and comedy. ###Code # Count unique values in column 'm_genre' of the dataframe genres_count_values = movie_data['m_genre'].value_counts() # Value table print(genres_count_values) # Make a pandas dataframe out of the pandas series genres_count_dataframe = pd.DataFrame(genres_count_values) # Plot the graph genres_count_dataframe.plot.bar() ###Output Drama 392 Comedy 310 Action 230 Adventure 134 Thriller/Suspense 127 Horror 71 Romantic Comedy 68 Musical 21 Western 20 Black Comedy 19 Documentary 7 Concert/Performance 1 Name: m_genre, dtype: int64 ###Markdown log-log plot We will define some bins with exponential increasing size. We will use the numpy.logspace that returns an evenly space logarithmic sequence of numbers. Using the min() function we can find where the minimum value lies and with log(max)/log(min) where the values will end. That is enough for the logspace input to generate exponential numbers. ###Code min_value_imdb_votes = movie_data['imdb_votes'].min() max_value_imdb_votes = movie_data['imdb_votes'].max() print('minimum value of the imdb_votes:'+str(min_value_imdb_votes)) print('maximum value of the imdb_votes:'+str(max_value_imdb_votes)) #generate sequence of exponentially increasing numbers product = min_value_imdb_votes exponential_bins = [product] while product < max_value_imdb_votes: exponential_bins.append(product) product = product*2 exponential_bins = list(exponential_bins) print('Exponential increasing bins to include the data range:',exponential_bins) plt.plot(exponential_bins) plt.show() #empty list to hold the mean values mean_values = [] #length of the list that holds the bin ranges bin_ranges_list_length = len(exponential_bins) #find the mean values of the bins for i,obj in enumerate(exponential_bins): if i < (bin_ranges_list_length - 1): current = exponential_bins[i] next_ = exponential_bins[i + 1] mean_value_of_the_bin = ((current+next_)/2) mean_values.append(int(mean_value_of_the_bin)) exponential_bins = list(exponential_bins) mean_values = list(mean_values) print('Mean values:',mean_values) #mean values as input for bucket limits plt.hist(movie_data['imdb_votes'], bins=exponential_bins) plt.show() #in a log-log scale plt.hist(movie_data['imdb_votes'], bins=exponential_bins) plt.yscale('log') plt.xscale('log') plt.xticks(mean_values,[np.log(i) for i in mean_values], rotation='vertical') plt.show() ###Output minimum value of the imdb_votes:33.0 maximum value of the imdb_votes:519541.0 Exponential increasing bins to include the data range: [33.0, 33.0, 66.0, 132.0, 264.0, 528.0, 1056.0, 2112.0, 4224.0, 8448.0, 16896.0, 33792.0, 67584.0, 135168.0, 270336.0] ###Markdown doing the same for the w_gross column ###Code min_value_w_gross = movie_data['w_gross'].min() max_value_w_gross = movie_data['w_gross'].max() print('minimum value of the w_gross:'+str(min_value_w_gross)) print('maximum value of the w_gross:'+str(max_value_w_gross)) #generate sequence of exponentially increasing numbers product = min_value_w_gross exponential_bins = [product] while product < max_value_w_gross: exponential_bins.append(product) product = product*3 exponential_bins = list(exponential_bins) print('Exponential increasing bins to include the data range:',exponential_bins) plt.plot(exponential_bins) plt.show() #empty list to hold the mean values mean_values = [] #length of the list that holds the bin ranges bin_ranges_list_length = len(exponential_bins) #find the mean values of the bins for i,obj in enumerate(exponential_bins): if i < (bin_ranges_list_length - 1): current = exponential_bins[i] next_ = exponential_bins[i + 1] mean_value_of_the_bin = ((current+next_)/2) mean_values.append(int(mean_value_of_the_bin)) mean_values = list(mean_values) print('Mean values:',mean_values) #mean values as input for bucket limits plt.hist(movie_data['w_gross'], bins=mean_values) plt.show() #in a log-log scale plt.hist(movie_data['w_gross'], bins=mean_values) plt.yscale('log') plt.xscale('log') plt.xticks(mean_values,[np.log(i) for i in mean_values], rotation='vertical') plt.show() ###Output minimum value of the w_gross:20987.0 maximum value of the w_gross:2767891499.0 Exponential increasing bins to include the data range: [20987.0, 20987.0, 62961.0, 188883.0, 566649.0, 1699947.0, 5099841.0, 15299523.0, 45898569.0, 137695707.0, 413087121.0, 1239261363.0] ###Markdown scatter plot of w_gross and imdb_votes ###Code fig = plt.figure() ax = plt.gca() ax.plot(movie_data['w_gross'] ,movie_data['imdb_votes'] ,'o', c='red', alpha=0.05, markeredgecolor='none') ax.set_yscale('log') ax.set_xscale('log') ax.set_xlabel('gross') ax.set_ylabel('imdb votes') ###Output _____no_output_____ ###Markdown We can see that there is a direct correlation between those two columns. As the worldwide gross gets higher the IMDB votes also get higher. attributes correlation ###Code fig, ax = plt.subplots(3, 2) #increase figure size fig = plt.gcf() fig.set_size_inches(18.5, 10.5) ax[0, 0].plot(movie_data['w_gross'] ,movie_data['tomatoes'] ,'o', c='red', alpha=0.05, markeredgecolor='none') ax[0, 0].set_xlabel('Worldwide Gross') ax[0, 0].set_ylabel('Rotten Tomatoes Rating') ax[0, 0].set_xscale('log') ax[0, 0].set_yscale('log') ax[1, 0].plot(movie_data['w_gross'] ,movie_data['imdb_rating'] ,'o', c='red', alpha=0.05, markeredgecolor='none') ax[1, 0].set_xlabel('Worldwide Gross') ax[1, 0].set_ylabel('IMDB Rating') ax[1, 0].set_xscale('log') ax[1, 0].set_yscale('log') ax[2, 0].plot(movie_data['w_gross'] ,movie_data['imdb_votes'] ,'o', c='red', alpha=0.05, markeredgecolor='none') ax[2, 0].set_xlabel('Worldwide Gross') ax[2, 0].set_ylabel('IMDB Votes') ax[2, 0].set_xscale('log') ax[2, 0].set_yscale('log') ax[0, 1].plot(movie_data['tomatoes'] ,movie_data['imdb_votes'] ,'o', c='red', alpha=0.05, markeredgecolor='none') ax[0, 1].set_xlabel('Rotten Tomatoes Rating') ax[0, 1].set_ylabel('IMDB Votes') ax[0, 1].set_xscale('log') ax[0, 1].set_yscale('log') ax[1, 1].plot(movie_data['tomatoes'] ,movie_data['imdb_rating'] ,'o', c='red', alpha=0.05, markeredgecolor='none') ax[1, 1].set_xlabel('Rotten Tomatoes Rating') ax[1, 1].set_ylabel('IMDB Rating') ax[0, 1].set_xscale('log') ax[0, 1].set_yscale('log') ax[2, 1].plot(movie_data['imdb_votes'] ,movie_data['imdb_rating'] ,'o', c='red', alpha=0.05, markeredgecolor='none') ax[2, 1].set_xlabel('IMDB Votes') ax[2, 1].set_ylabel('IMDB Rating') ax[2, 1].set_xscale('log') ax[2, 1].set_yscale('log') plt.show() ###Output _____no_output_____ ###Markdown 1) As the worldwide gross get close to 10^8 the rotten tomatoes and imdb rating improves. 2) There is a clear correlation between the worldwide gross and the number of imdb votes. 3) There is a clear correlation between the imdb votes and the imdb rating, as the one gets higher the other one gets too. 4) The same as above applies between the rotten tomatoes rating and the imdb rating.5) When the imdb votes number gets higher so does the rotten tomatoes rating does. Pearson corelationFor example w_gross with US Gross have a corellation of 0.939742.Lower values mean less correlation ###Code print(movie_data.corr(method='pearson')) ###Output _____no_output_____ ###Markdown Spearman corelation ###Code print(movie_data.corr(method='spearman')) numeric_columns = ['us_gross','w_gross','p_budget','tomatoes','imdb_rating','imdb_votes'] corr = [] p_values = pd.DataFrame() # Matrix of p-values for x in movie_data.columns: for y in movie_data.columns: #return pearson correlation coefficient and p-value for testing non-correlation if(x in numeric_columns and y in numeric_columns): corr = stats.pearsonr(movie_data[x], movie_data[y]) p_values.loc[x,y] = corr[1] print(p_values) ###Output _____no_output_____ ###Markdown We know from a previous calculation that the least popular genres are the following: Musical 21 Western 20 Black Comedy 19 Documentary 7 Concert/Performance 1 Bar plots with ErrorsAt first we will choose the desired confidence interval.The most commonly used confidence levels are 90 percent, 95 percent and 99 percent. Then we will calculate the margin error Za/2 * σ/√(n). Where Za/2 the the confidence coefficient, a the confidence level, σ the standard deviation and n the sample size. To find the critical value, or Za/2 with a confidence level of 95% we will convert the percentage to a decimal, .95, and divide it by 2 to get .475. Then, we will check out the z table to find the corresponding value that goes with .475. We can see that the closest value is 1.96, at the intersection of row 1.9 and the column of .06The final interval of the error is x̅ ± Za/2 * σ/√(n) where x̅ is the mean value ###Code unique_genres = movie_data['m_genre'].unique() not_popular_genres = ['Musical','Western','Black Comedy','Documentary','Concert/Performance'] popular_genres = list(filter(lambda x: x not in not_popular_genres, unique_genres)) #find the mean, count and std of the values mean_values_dataframe = pd.DataFrame(movie_data.groupby('m_genre', as_index=False)['w_gross'].agg([np.mean, 'count', np.std])) #reset the index from aggregating the mean, count and standard deviation mean_values_dataframe = mean_values_dataframe.reset_index() #drop unnecessary rows according to unpopular genres mean_values_dataframe = mean_values_dataframe[~mean_values_dataframe['m_genre'].isin(not_popular_genres)] print(mean_values_dataframe) labels = popular_genres x_pos = np.arange(len(labels)) mean_values = [] std_values = [] lower_error_list = [] upper_error_list = [] #for 95% confidence confidence_value = 1.96 for x in popular_genres: row = mean_values_dataframe.loc[mean_values_dataframe['m_genre'] == x] mean = row.iloc[0]['mean'] std = row.iloc[0]['std'] sample_size = row.iloc[0]['count'] mean_values.append(mean) std_values.append(std) lower = mean - confidence_value*(std/math.sqrt(sample_size)) upper = mean + confidence_value*(std/math.sqrt(sample_size)) lower_error_list.append(lower) upper_error_list.append(upper) error_bars = [lower_error_list,upper_error_list] print("\nError bars values") print("------------------------------") print("0 is the lower, 1 is the upper") print(pd.DataFrame(error_bars)) fig, ax = plt.subplots() #yerr takes as input two a two size list that contains the upper and lower bounds of the error ax.bar(x_pos, mean_values, yerr=error_bars, align='center', alpha=0.5, ecolor='black', capsize=10) ax.set_ylabel('Mean value') ax.set_xticks(x_pos) ax.set_xticklabels(labels,rotation='vertical') ax.yaxis.grid(True) plt.show() ###Output _____no_output_____ ###Markdown T-testingThe t score is a ratio between the difference between two groups and the difference within the groups. The larger the t score, the more difference there is between groups. The smaller the t score, the more similarity there is between groups. When runninG a t-test, the bigger the t-value, the more likely it is that the results are repeatable. Every t-value has a p-value to go with it. A p-value is the probability that the results from the sample data occurred by chance. P-values are from 0% to 100%. They are usually written as a decimal. For example, a p value of 5% is 0.05. Low p-values are good. They indicate that the data did not occur by chance. For example, a p-value of .01 means there is only a 1% probability that the results of the experiment happened by chance. In most cases, a p-value of 0.05 (5%) is accepted to mean the data is valid. ###Code for x,obj in enumerate(popular_genres): first_dataframe = movie_data[movie_data["m_genre"].str.contains(popular_genres[x])] first_array = first_dataframe['w_gross'] for y,objj in enumerate(popular_genres): second_dataframe = movie_data[movie_data["m_genre"].str.contains(popular_genres[y])] second_array = second_dataframe['w_gross'] t2, p2 = stats.ttest_ind(first_array.to_numpy(),second_array.to_numpy(),equal_var=False) print("--------------Comparing pairs ---------------") print(popular_genres[x]," t = " + str(t2)) print(popular_genres[y]," p = " + str(p2)) ###Output _____no_output_____ ###Markdown They stoped making good movies anymore!(data ratings say thats true, but i think were biased over the 'old things are good' and nostalgia) ###Code movie_data['r_date'] = pd.to_datetime(movie_data['r_date']) #groupby year and take the mean values a1 = pd.DataFrame(movie_data.groupby(movie_data['r_date'].dt.strftime('%Y'))['imdb_rating'].mean()) a2 = pd.DataFrame(movie_data.groupby(movie_data['r_date'].dt.strftime('%Y'))['tomatoes'].mean()) y_averages = (a1.join(a2)).reset_index() print(y_averages) ###Output _____no_output_____ ###Markdown It looks like movies in the future are going to be better!We can see that values after 2010 are from the future. We're not going to need them. Also values before 1970 have sparse data for every decade. We will keep the years from 1970 to 2010. ###Code y_averages = y_averages.astype({"r_date": int}) y_averages = y_averages.drop(y_averages[(y_averages.r_date < 1970) | (y_averages.r_date > 2010)].index) y_averages = y_averages.reset_index(drop=True) y_averages = y_averages.set_index('r_date') print(y_averages) ###Output _____no_output_____ ###Markdown The cleaned series have no year in-between missing so we can computer the average decades by averaging every 10 rows into one. ###Code y_averages_10_yr = y_averages.set_index(np.arange(len(y_averages)) // 10).mean(level=0) print(y_averages_10_yr) print('\ntomatoes graph') y_averages_10_yr['tomatoes'].plot.line() plt.show() print('imdb graph') y_averages_10_yr['imdb_rating'].plot.line() ###Output _____no_output_____ ###Markdown As we expected we have 4 decades from 1970 to 2010. According to the IMDB the rating of every decade has fallen about 0.72 in 4 decades. Thats not a big difference but according to the rotten tomatoes movie index the rating has fallen for about 30 units or 3 units in the IMDB scale. For the IMDB the numerical rating is based off of votes from users on a 1-10 star scale. The ratings are then normalized using a bayesian filtering formula that strips out "outlier" overly negative or positive ratings since IMDB has to deal with "spam" votes with it's open registration system. Rotten Tomatoes also offers user-voting, the "Tomatometer", the primary rating metric is a measure of the number of "FRESH" reviews as a percentage of overall reviews. The reviews counted into the Tomatometer is from a discrete list of selected critics/publications that remains uniform across the whole site (usually professional, but always prescreened based off of specific criteria including the requirement to have reviewed at least 100 films over the recent two years). With the saying that "they don’t make such good movies anymore" users are showing that they don’t agree with this according to their votes but the much more "professional" critics of the tomato index verify it according to our dataset. Make your own question about the dataWhich of the top directors made the most profit? And If the amount of movies they make gets bigger does the profit rises? By saying top we can define it as the directors that made more than 10 movies. ###Code directors = movie_data['Director'].unique() director_counts = pd.DataFrame(movie_data['Director'].value_counts()) #find the mean, count and std of the values director_values = pd.DataFrame(movie_data.groupby('Director')['w_gross'].agg([np.mean, 'count', np.sum])) #reset the index from aggregating the mean, count and standard deviation director_values = director_values.reset_index() money_per_movie = [] #iterate over rows for index, row in director_values.iterrows(): money_per_movie.append(row['sum']/row['count']) #add column to the dataframe director_values['money_per_movie'] = money_per_movie #drop all directors that made less than ten movies director_values = director_values.astype({"count": int}) director_values = director_values.drop(director_values[director_values['count'] <= 10].index) director_values = director_values.reset_index(drop=True) #sort them by the money they made director_values = director_values.sort_values(by=['money_per_movie'], ascending=False) print(director_values) ###Output Director mean count sum money_per_movie 8 Steven Spielberg 3.382371e+08 17 5.750030e+09 3.382371e+08 6 Robert Zemeckis 3.199067e+08 11 3.518973e+09 3.199067e+08 5 Ridley Scott 1.843963e+08 12 2.212755e+09 1.843963e+08 4 Richard Donner 1.281672e+08 11 1.409839e+09 1.281672e+08 2 Joel Schumacher 1.181810e+08 12 1.418172e+09 1.181810e+08 3 Martin Scorsese 1.001857e+08 15 1.502785e+09 1.001857e+08 1 Clint Eastwood 9.065373e+07 11 9.971910e+08 9.065373e+07 0 Brian De Palma 4.127582e+07 11 4.540340e+08 4.127582e+07 9 Woody Allen 3.027158e+07 14 4.238022e+08 3.027158e+07 7 Spike Lee 2.898311e+07 15 4.347467e+08 2.898311e+07
01.getting-started/10.register-model-create-image-deploy-service/10.register-model-create-image-deploy-service.ipynb
###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ###Code # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ###Output _____no_output_____ ###Markdown Initialize WorkspaceInitialize a workspace object from persisted configuration. ###Code from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ###Output _____no_output_____ ###Markdown Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. ###Code from azureml.core.model import Model import sklearn library_version = "sklearn"+sklearn.__version__.replace(".","x") model = Model.register(model_path = "sklearn_regression_model.pkl", model_name = "sklearn_regression_model.pkl", tags = {'area': "diabetes", 'type': "regression", 'version': library_version}, description = "Ridge regression model to predict diabetes", workspace = ws) ###Output _____no_output_____ ###Markdown You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers. ###Code regression_models = Model.list(workspace=ws, tags=['area']) for m in regression_models: print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) ###Output _____no_output_____ ###Markdown You can pick a specific model to deploy ###Code print(model.name, model.description, model.version, sep = '\t') ###Output _____no_output_____ ###Markdown Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file. ###Code %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # note here "sklearn_regression_model.pkl" is the name of the model registered under # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) except Exception as e: result = str(e) return json.dumps({"result": result.tolist()}) from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) ###Output _____no_output_____ ###Markdown Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models. ###Code from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="score.py", conda_file="myenv.yml", tags = {'area': "diabetes", 'type': "regression"}, description = "Image with ridge regression model") image = Image.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) ###Output _____no_output_____ ###Markdown List images by tag and find out the detailed build log for debugging. ###Code for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) ###Output _____no_output_____ ###Markdown Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes. ###Code from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model') from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-2' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) ###Output _____no_output_____ ###Markdown Test web service Call the web service with some dummy input data to get a prediction. ###Code import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data = test_sample) print(prediction) ###Output _____no_output_____ ###Markdown Delete ACI to clean up ###Code aci_service.delete() ###Output _____no_output_____ ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ###Code # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ###Output _____no_output_____ ###Markdown Initialize WorkspaceInitialize a workspace object from persisted configuration. ###Code from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ###Output _____no_output_____ ###Markdown Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. ###Code from azureml.core.model import Model import sklearn library_version = "sklearn"+sklearn.__version__.replace(".","x") model = Model.register(model_path = "sklearn_regression_model.pkl", model_name = "sklearn_regression_model.pkl", tags = {'area': "diabetes", 'type': "regression", 'version': library_version}, description = "Ridge regression model to predict diabetes", workspace = ws) ###Output _____no_output_____ ###Markdown You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers. ###Code regression_models = Model.list(workspace=ws, tags=['area']) for m in regression_models: print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) ###Output _____no_output_____ ###Markdown You can pick a specific model to deploy ###Code print(model.name, model.description, model.version, sep = '\t') ###Output _____no_output_____ ###Markdown Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file. ###Code %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # note here "sklearn_regression_model.pkl" is the name of the model registered under # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) # you can return any datatype as long as it is JSON-serializable return result.tolist() except Exception as e: error = str(e) return error from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) ###Output _____no_output_____ ###Markdown Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models. ###Code from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="score.py", conda_file="myenv.yml", tags = {'area': "diabetes", 'type': "regression"}, description = "Image with ridge regression model") image = Image.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) ###Output _____no_output_____ ###Markdown List images by tag and find out the detailed build log for debugging. ###Code for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) ###Output _____no_output_____ ###Markdown Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes. ###Code from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model') from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-2' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) ###Output _____no_output_____ ###Markdown Test web service Call the web service with some dummy input data to get a prediction. ###Code import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data=test_sample) print(prediction) ###Output _____no_output_____ ###Markdown Delete ACI to clean up ###Code aci_service.delete() ###Output _____no_output_____ ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ###Code # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ###Output _____no_output_____ ###Markdown Initialize WorkspaceInitialize a workspace object from persisted configuration. ###Code from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ###Output _____no_output_____ ###Markdown Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. ###Code from azureml.core.model import Model import sklearn library_version = "sklearn"+sklearn.__version__.replace(".","x") model = Model.register(model_path = "sklearn_regression_model.pkl", model_name = "sklearn_regression_model.pkl", tags = {'area': "diabetes", 'type': "regression", 'version': library_version}, description = "Ridge regression model to predict diabetes", workspace = ws) ###Output _____no_output_____ ###Markdown You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers. ###Code regression_models = ws.models(tags=['area']) for name, m in regression_models.items(): print("Name:", name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) ###Output _____no_output_____ ###Markdown You can pick a specific model to deploy ###Code print(model.name, model.description, model.version, sep = '\t') ###Output _____no_output_____ ###Markdown Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file. ###Code %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # note here "sklearn_regression_model.pkl" is the name of the model registered under # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) except Exception as e: result = str(e) return json.dumps({"result": result.tolist()}) from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) ###Output _____no_output_____ ###Markdown Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models. ###Code from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="score.py", conda_file="myenv.yml", tags = {'area': "diabetes", 'type': "regression"}, description = "Image with ridge regression model") image = Image.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) ###Output _____no_output_____ ###Markdown List images by tag and find out the detailed build log for debugging. ###Code for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) ###Output _____no_output_____ ###Markdown Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes. ###Code from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model') from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-2' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) ###Output _____no_output_____ ###Markdown Test web service Call the web service with some dummy input data to get a prediction. ###Code import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data = test_sample) print(prediction) ###Output _____no_output_____ ###Markdown Delete ACI to clean up ###Code aci_service.delete() ###Output _____no_output_____ ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ###Code # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ###Output _____no_output_____ ###Markdown Initialize WorkspaceInitialize a workspace object from persisted configuration. ###Code from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ###Output _____no_output_____ ###Markdown Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. ###Code from azureml.core.model import Model import sklearn library_version = "sklearn"+sklearn.__version__.replace(".","x") model = Model.register(model_path = "sklearn_regression_model.pkl", model_name = "sklearn_regression_model.pkl", tags = {'area': "diabetes", 'type': "regression", 'version': library_version}, description = "Ridge regression model to predict diabetes", workspace = ws) ###Output _____no_output_____ ###Markdown You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers. ###Code regression_models = Model.list(workspace=ws, tags=['area']) for m in regression_models: print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) ###Output _____no_output_____ ###Markdown You can pick a specific model to deploy ###Code print(model.name, model.description, model.version, sep = '\t') ###Output _____no_output_____ ###Markdown Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file. ###Code %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # note here "sklearn_regression_model.pkl" is the name of the model registered under # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) # you can return any datatype as long as it is JSON-serializable return result.tolist() except Exception as e: error = str(e) return error from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) ###Output _____no_output_____ ###Markdown Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models. ###Code from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="score.py", conda_file="myenv.yml", tags = {'area': "diabetes", 'type': "regression"}, description = "Image with ridge regression model") image = Image.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) ###Output _____no_output_____ ###Markdown List images by tag and find out the detailed build log for debugging. ###Code for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) ###Output _____no_output_____ ###Markdown Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes. ###Code from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model') from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-2' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) ###Output _____no_output_____ ###Markdown Test web service Call the web service with some dummy input data to get a prediction. ###Code import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data=test_sample) print(prediction) ###Output _____no_output_____ ###Markdown Delete ACI to clean up ###Code aci_service.delete() ###Output _____no_output_____ ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ###Code # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ###Output _____no_output_____ ###Markdown Initialize WorkspaceInitialize a workspace object from persisted configuration. ###Code from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ###Output _____no_output_____ ###Markdown Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. ###Code from azureml.core.model import Model import sklearn library_version = "sklearn"+sklearn.__version__.replace(".","x") model = Model.register(model_path = "sklearn_regression_model.pkl", model_name = "sklearn_regression_model.pkl", tags = {'area': "diabetes", 'type': "regression", 'version': library_version}, description = "Ridge regression model to predict diabetes", workspace = ws) ###Output _____no_output_____ ###Markdown You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers. ###Code regression_models = Model.list(tags=['area']) for m in regression_models: print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) ###Output _____no_output_____ ###Markdown You can pick a specific model to deploy ###Code print(model.name, model.description, model.version, sep = '\t') ###Output _____no_output_____ ###Markdown Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file. ###Code %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # note here "sklearn_regression_model.pkl" is the name of the model registered under # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) except Exception as e: result = str(e) return json.dumps({"result": result.tolist()}) from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) ###Output _____no_output_____ ###Markdown Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models. ###Code from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="score.py", conda_file="myenv.yml", tags = {'area': "diabetes", 'type': "regression"}, description = "Image with ridge regression model") image = Image.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) ###Output _____no_output_____ ###Markdown List images by tag and find out the detailed build log for debugging. ###Code for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) ###Output _____no_output_____ ###Markdown Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes. ###Code from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model') from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-2' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) ###Output _____no_output_____ ###Markdown Test web service Call the web service with some dummy input data to get a prediction. ###Code import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data = test_sample) print(prediction) ###Output _____no_output_____ ###Markdown Delete ACI to clean up ###Code aci_service.delete() ###Output _____no_output_____ ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ###Code # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ###Output _____no_output_____ ###Markdown Initialize WorkspaceInitialize a workspace object from persisted configuration. ###Code from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ###Output _____no_output_____ ###Markdown Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. ###Code from azureml.core.model import Model import sklearn library_version = "sklearn"+sklearn.__version__.replace(".","x") model = Model.register(model_path = "sklearn_regression_model.pkl", model_name = "sklearn_regression_model.pkl", tags = {'area': "diabetes", 'type': "regression", 'version': library_version}, description = "Ridge regression model to predict diabetes", workspace = ws) ###Output _____no_output_____ ###Markdown You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers. ###Code regression_models = ws.models(tags=['area']) for name, m in regression_models.items(): print("Name:", name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) ###Output _____no_output_____ ###Markdown You can pick a specific model to deploy ###Code print(model.name, model.description, model.version, sep = '\t') ###Output _____no_output_____ ###Markdown Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file. ###Code %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # note here "sklearn_regression_model.pkl" is the name of the model registered under # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) except Exception as e: result = str(e) return json.dumps({"result": result.tolist()}) from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) ###Output _____no_output_____ ###Markdown Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models. ###Code from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="score.py", conda_file="myenv.yml", tags = {'area': "diabetes", 'type': "regression"}, description = "Image with ridge regression model") image = Image.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) ###Output _____no_output_____ ###Markdown List images by tag and find out the detailed build log for debugging. ###Code for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) ###Output _____no_output_____ ###Markdown Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes. ###Code from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model') from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-2' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) ###Output _____no_output_____ ###Markdown Test web service Call the web service with some dummy input data to get a prediction. ###Code import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data = test_sample) print(prediction) ###Output _____no_output_____ ###Markdown Delete ACI to clean up ###Code aci_service.delete() ###Output _____no_output_____ ###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 10. Register Model, Create Image and Deploy ServiceThis example shows how to deploy a web service in step-by-step fashion: 1. Register model 2. Query versions of models and select one to deploy 3. Create Docker image 4. Query versions of images 5. Deploy the image as web service **IMPORTANT**: * This notebook requires you to first complete "01.SDK-101-Train-and-Deploy-to-ACI.ipynb" Notebook The 101 Notebook taught you how to deploy a web service directly from model in one step. This Notebook shows a more advanced approach that gives you more control over model versions and Docker image versions. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ###Code # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ###Output _____no_output_____ ###Markdown Initialize WorkspaceInitialize a workspace object from persisted configuration. ###Code from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ###Output _____no_output_____ ###Markdown Register Model You can add tags and descriptions to your models. Note you need to have a `sklearn_linreg_model.pkl` file in the current directory. This file is generated by the 01 notebook. The below call registers that file as a model with the same name `sklearn_linreg_model.pkl` in the workspace.Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric. ###Code from azureml.core.model import Model import sklearn library_version = "sklearn"+sklearn.__version__.replace(".","x") model = Model.register(model_path = "sklearn_regression_model.pkl", model_name = "sklearn_regression_model.pkl", tags = {'area': "diabetes", 'type': "regression", 'version': library_version}, description = "Ridge regression model to predict diabetes", workspace = ws) ###Output _____no_output_____ ###Markdown You can explore the registered models within your workspace and query by tag. Models are versioned. If you call the register_model command many times with same model name, you will get multiple versions of the model with increasing version numbers. ###Code regression_models = Model.list(workspace=ws, tags=['area']) for m in regression_models: print("Name:", m.name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) ###Output _____no_output_____ ###Markdown You can pick a specific model to deploy ###Code print(model.name, model.description, model.version, sep = '\t') ###Output _____no_output_____ ###Markdown Create Docker Image Show `score.py`. Note that the `sklearn_regression_model.pkl` in the `get_model_path` call is referring to a model named `sklearn_linreg_model.pkl` registered under the workspace. It is NOT referenceing the local file. ###Code %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # note here "sklearn_regression_model.pkl" is the name of the model registered under # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) except Exception as e: result = str(e) return json.dumps({"result": result.tolist()}) from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) myenv.add_pip_package("pynacl==1.2.1") with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) ###Output _____no_output_____ ###Markdown Note that following command can take few minutes. You can add tags and descriptions to images. Also, an image can contain multiple models. ###Code from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="score.py", conda_file="myenv.yml", tags = {'area': "diabetes", 'type': "regression"}, description = "Image with ridge regression model") image = Image.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) ###Output _____no_output_____ ###Markdown List images by tag and find out the detailed build log for debugging. ###Code for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) ###Output _____no_output_____ ###Markdown Deploy image as web service on Azure Container InstanceNote that the service creation can take few minutes. ###Code from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "diabetes", 'type': "regression"}, description = 'Predict diabetes using regression model') from azureml.core.webservice import Webservice aci_service_name = 'my-aci-service-2' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) ###Output _____no_output_____ ###Markdown Test web service Call the web service with some dummy input data to get a prediction. ###Code import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data = test_sample) print(prediction) ###Output _____no_output_____ ###Markdown Delete ACI to clean up ###Code aci_service.delete() ###Output _____no_output_____
statistics/pvalues_analysis-from-CWoLa.ipynb
###Markdown Calculation of p-valuesThis notebook is for generating figures 8, 13, 14 of arXiv:1805.02664 Import and initialize some functions ###Code from scipy.optimize import minimize import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit from scipy.stats import poisson, norm, kstest import numdifftools from numpy.linalg import inv import matplotlib.gridspec as gridspec sigaeloss_bb = np.load('../data_strings/sigae_2prong_loss_bb1.npy') sigae3ploss_bb = np.load('../data_strings/sigae_3prong_loss_bb1.npy') #sigaeloss_bkg = np.load('../data/sigaeloss_bkg.npy') mass_bb = np.load('../data/mass_bb1.npy') #mass_bkg = np.load('../data/mass_bkg.npy') bkgaeloss_bb = np.load('../data_strings/bkgaeloss_bb1.npy') #bkgaeloss_bkg = np.load('../data/bkgaeloss_bkg.npy') #sigaeloss_bb = np.load('../data_strings/sigae_2prong_loss_bb1.npy') #sigae3ploss_bb = np.load('../data_strings/sigae_3prong_loss_bb1.npy') bkgae0loss_bb = np.load('../data_strings/bkgae_rndbkg_loss_bb1.npy') bkgae1loss_bb = np.load('../data_strings/bkgae_purebkg_loss_bb1.npy') import pandas as pd f_bb = pd.read_hdf('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5') dt = f_bb.values dt.shape f_bb.columns correct = (dt[:,3]>0) &(dt[:,19]>0) dt = dt[correct] mass_bb = mass_bb[correct] mass_bb.shape dt.shape sigaeloss_bb.shape for i in range(13,19): dt[:,i] = dt[:,i]/dt[:,3] for i in range(29,35): dt[:,i] = dt[:,i]/(dt[:,19]) correct = (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1) dt = dt[correct] mass_bb = mass_bb[correct] for i in range(13,19): dt[:,i] = dt[:,i]/dt[:,3] for i in range(29,35): dt[:,i] = dt[:,i]/(dt[:,19]) bins = np.linspace(-2,2,101) plt.hist(dt[:,23],bins,alpha=0.2,color='r'); #plt.hist(dt[bkg_idx,14],bins,alpha=0.2,color='b'); cuts = {'bb1':[10, 10, 1.3, 1.3],'bb2':[0.4,0.4, .7], 'bb3':[3,3,1.8]} #'bb1':[8, 8, 2.1] #'bb1':[1000000, 10000, 2.1] #'bb2':[0.4,0.4, .7] #'bb3':[3,3,1.76] ############### Black BOX 1 Default CUT ############## #sigae_wp = 10000 #bkgae_wp = 2.1 sigae_wp, sigae3p_wp, bkgae0_wp, bkgae1_wp = cuts['bb1'] print(sigae_wp, sigae3p_wp, bkgae_wp) print(sigaeloss_bb) bkgae0loss_bb.shape index_bb = np.where((bkgae0loss_bb>bkgae0_wp)&(bkgae1loss_bb>bkgae1_wp)&(sigaeloss_bb<sigae_wp)&(sigae3ploss_bb<sigae3p_wp)&(dt[:,14]>0.85)&(dt[:,18]>0.9))[0] #index_bkg = np.where((bkgaeloss_bkg>bkgae_wp)&(sigaeloss_bkg<sigae_wp))[0] #index_bb = np.where((dt[:,18]>0.9))[0] #print(len(index_bb),len(index_bkg)) print(len(index_bb)) index_bb.shape #### Without Scalefactor #PLOT FOR BLACK BOX 1 with REALNVP plt.style.use('ggplot') bins = np.linspace(3000,6900,27) #print(bins) #bkg_hist = plt.hist(mass_bkg[index_bkg],bins=bins,alpha=0.3,color='r',label='background'); obs_hist = plt.hist(mass_bb[index_bb],bins=bins,alpha=0.3,color='b',label='Blackbox2') plt.xlabel(r'$m_{JJ}$ [GeV]') plt.ylabel('Number of events') plt.legend(loc='upper right') plt.title('$m_{JJ}$ without SF') plt.axvline(x=3823) plt.show() index_bb = np.where((bkgaeloss_bb>bkgae_wp)&(sigaeloss_bb<sigae_wp)&(sigae3ploss_bb<sigae3p_wp)&(dt[:,14]>0.85)&(dt[:,18]>0.9)&(dt[:,0]>3700)&(dt[:,0]<3900))[0] index_bb.shape #### Without Scalefactor #PLOT FOR BLACK BOX 1 with REALNVP plt.style.use('ggplot') bins = np.linspace(0,2000,27) #print(bins) #bkg_hist = plt.hist(mass_bkg[index_bkg],bins=bins,alpha=0.3,color='r',label='background'); obs_hist = plt.hist(dt[index_bb,3],bins=bins,alpha=0.3,color='b',label='Blackbox2') plt.xlabel(r'$m_{JJ}$ [GeV]') plt.ylabel('Number of events') plt.legend(loc='upper right') plt.title('$m_{JJ}$ without SF') plt.axvline(x=732) plt.show() obs_hist #datasets_nosig = bkg_hist[0] datasets_sig = obs_hist[0] datasets_sig = [102., 91., 94., 72., 66., 56., 51., 42., 36., 27., 30., 24., 19., 29., 11., 11., 14., 8., 6., 6., 4., 6., 9., 3., 3., 2., 2., 0., 0., 1., 1., 0.] bins = [3050, 3200, 3350, 3500, 3650, 3800, 3950, 4100, 4250, 4400, 4550, 4700, 4850, 5000, 5150, 5300, 5450, 5600, 5750, 5900, 6050, 6200, 6350, 6500, 6650, 6800, 6950, 7100, 7250, 7400, 7550, 7700, 7850] import ROOT as r xlow, xhigh = 2800,7000 BINS = (xhigh-xlow)/100 BINS = int(BINS) roothist_obs = r.TH1F('data_obs','data_obs',BINS,xlow,xhigh) for i in range(0,BINS): roothist_obs.SetBinContent(i+1,obs_hist[0][i]) f = r.TFile.Open("blackbox1.root",'recreate') roothist_obs.Write() f.Close() f = r.TFile.Open("blackbox1.root",'read') f.ls() f.Close() c11 = r.TCanvas("myCanvasName","The Canvas Title",800,600) roothist_obs.Draw() c11.Draw() #filenames_nosig = ["../data/finalscan_nosignal/0_005_" + str(i) + "_bincounts.dat" for i in range(3,12)] #filenames_sig = ["../data/finalscan_signal/sig_bin" + str(i) + "_bincounts.dat" for i in range(3,12)] #datasets_nosig = np.array([np.loadtxt(filename) for filename in filenames_nosig]) #datasets_sig = np.array([np.loadtxt(filename) for filename in filenames_sig]) def get_p_value(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,return_teststat = False,plotsys=True,myax=None): ydata = np.array(ydata) #Assume poisson is gaussian with N+1 variance if not yerr: yerr = np.sqrt(ydata+1) else: yerr=np.array(yerr) def fit_func(x,p1,p2,p3): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. xi = 0. y = x/13000. return p1*(1.-y)**(p2-xi*p3)*y**-p3 xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)]) xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)]) #Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins ydata = np.array(ydata) * 100 / xwidths yerr = np.array(yerr)*100/ np.array(xwidths) #Least square fit, masking out the signal region popt, pcov = curve_fit(fit_func, np.delete(xdata,mask), np.delete(ydata,mask),sigma=np.delete(yerr,mask),maxfev=10000) if verbose: print('fit params: ', popt) ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata]) #Check that the function is a good fit to the sideband residuals = np.delete((ydata - ydata_fit)/yerr,mask) if verbose > 0: print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf)) print(residuals) print(((ydata - ydata_fit)/yerr)[mask]) print('\n') #The following code is used to get the bin errors by propagating the errors on the fit params def fit_func_array(parr): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. p1, p2, p3 = parr xi = 0. return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata]) jac=numdifftools.core.Jacobian(fit_func_array) x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T) #For plot, take systematic error band as the diagonal of the covariance matrix y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)]) if (plotfile != None) & (plotfile != 'ax'): if plotsys: plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4) yerr2 = np.array(yerr) yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8 plt.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4) plt.plot(xdata, ydata_fit, 'r--', label='data') plt.yscale('log', nonposy='clip') if plotfile == 'ax': if plotsys: myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4) yerr2 = np.array(yerr) yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8 myax.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4) myax.plot(xdata, ydata_fit, 'r--', label='data') myax.set_yscale('log', nonposy='clip') if plotfile == 'show': plt.show() elif plotfile: plt.savefig(plotfile) #Now, let's compute some statistics. # Will use asymptotic formulae for p0 from Cowan et al arXiv:1007.1727 # and systematics procedure from https://cds.cern.ch/record/2242860/files/NOTE2017_001.pdf #First get systematics in the signal region #This function returns array of signal predictions in the signal region def signal_fit_func_array(parr): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. p1, p2, p3 = parr xi = 0. return np.array([np.sum([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3*xwidths[mask[i]]/100 for i, x in enumerate(xdata[mask])])]) #Get covariance matrix of prediction uncertainties in the signal region jac=numdifftools.core.Jacobian(signal_fit_func_array) x_signal_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T) #Inverse signal region covariance matrix: inv_x_signal_cov = inv(x_signal_cov) #Get observed and predicted event counts in the signal region obs = np.array([np.sum(np.array(ydata)[mask]*np.array(xwidths)[mask]/100)]) expected = np.array([np.sum([fit_func(xdata[targetbin],popt[0],popt[1],popt[2])*xwidths[targetbin]/100 for targetbin in mask])]) #Negative numerator of log likelihood ratio, for signal rate mu = 0 def min_log_numerator(expected_nuis_arr): #expected_nuis_arr is the array of systematic background uncertainty nuisance parameters #These are event rate densities expected_nuis_arr = np.array(expected_nuis_arr) to_return = 0 #Poisson terms for i, expected_nuis in enumerate(expected_nuis_arr): #Poisson lambda. Have to rescale nuisance constribution by bin width my_lambda = expected[i]+expected_nuis_arr[i] #Prevent negative predicted rates if my_lambda < 10**-10: my_lambda = 10**-10 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda) #Gaussian nuisance term nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr) to_return = to_return + nuisance_term return -to_return def jac_min_log_numerator(expected_nuis_arr): #expected_nuis_arr is the array of systematic background uncertainty nuisance parameters #These are event rate densities expected_nuis_arr = np.array(expected_nuis_arr) to_return = np.array([0.]) #Poisson terms #Poisson lambda. Have to rescale nuisance constribution by bin width my_lambda = expected+expected_nuis_arr dmy_lambda = np.array([1.]) #Prevent negative predicted rates my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10 dmy_lambda[my_lambda < 10**-10] = 0 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return = to_return + (obs*dmy_lambda/my_lambda - dmy_lambda) #Gaussian nuisance term nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr) to_return = to_return + nuisance_term return -to_return #Initialization of nuisance params expected_nuis_array_init = [0.02] #shift log likelihood to heklp minimization algo def rescaled_min_log_numerator(expected_nuis_arr): return min_log_numerator(expected_nuis_arr) - min_log_numerator(expected_nuis_array_init) #Perform minimization over nuisance parameters. Set bounds for bg nuisance at around 8 sigma. bnds=[[-8*y_unc[mask[0]],8*y_unc[mask[0]]]] minimize_log_numerator = minimize(rescaled_min_log_numerator, expected_nuis_array_init, jac=jac_min_log_numerator, bounds=bnds) if verbose: print("numerator: ", minimize_log_numerator.items(),'\n') #Now get likelihood ratio denominator def min_log_denom(nuis_arr): #nuis_arr contains the bg systematics and also the signal rate expected_nuis_arr = np.array(nuis_arr)[:1] #print(expected_nuis_arr) mu = nuis_arr[1] #Signal prediction pred = [mu] to_return = 0 #Poisson terms for i, expected_nuis in enumerate(expected_nuis_arr): #Poisson lambda my_lambda = expected[i]+expected_nuis_arr[i] + pred[i] #Prevent prediction from going negative if my_lambda < 10**-10: my_lambda = 10**-10 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda) #Gaussian nuisance term nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr) to_return = to_return + nuisance_term return -to_return def jac_min_log_denom(nuis_arr): #expected_nuis_arr is the array of systematic background uncertainty nuisance parameters #These are event rate densities expected_nuis_arr = np.array(nuis_arr)[:1] mu = nuis_arr[1] pred = [mu] to_return_first = np.array([0.]) #Poisson terms #Poisson lambda. Have to rescale nuisance constribution by bin width my_lambda = expected+expected_nuis_arr+pred dmy_lambda = np.array([1.]) #Prevent prediction from going negative my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10 dmy_lambda[my_lambda < 10**-10] = 0 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return_first = to_return_first + (obs*dmy_lambda/my_lambda - dmy_lambda) #Gaussian nuisance term nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr) to_return_first = to_return_first + nuisance_term to_return_last = np.array([0.]) dpred = np.array([[1.]]) my_lambda = expected+expected_nuis_arr+pred dmy_lambda = dpred to_return_last = np.dot((obs/my_lambda),dmy_lambda.T) - np.sum(dmy_lambda,axis=1) return -np.append(to_return_first, to_return_last) #initizalization for minimization nuis_array_init = [0.01,1.] #Shift log likelihood for helping minimization algo. def rescaled_min_log_denom(nuis_arr): return min_log_denom(nuis_arr) - min_log_denom(nuis_array_init) bnds = ((None,None),(None,None)) minimize_log_denominator = minimize(rescaled_min_log_denom,nuis_array_init, jac=jac_min_log_denom, bounds=bnds) if verbose: print("Denominator: ", minimize_log_denominator.items(),'\n') if minimize_log_denominator.x[-1] < 0: Zval = 0 neglognum = 0 neglogden = 0 else: neglognum = min_log_numerator(minimize_log_numerator.x) neglogden = min_log_denom(minimize_log_denominator.x) Zval = np.sqrt(2*(neglognum - neglogden)) p0 = 1-norm.cdf(Zval) if verbose: print("z = ", Zval) print("p0 = ", p0) #plt.title(str(p0)) # if plotfile == 'show': # plt.show() # elif plotfile: # plt.savefig(plotfile) if return_teststat: return p0, 2*(neglognum - neglogden) else: return p0 def add_mjjplot(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,plotsys=True,myax=None,plotfit=True): ydata = np.array(ydata) #Assume poisson is gaussian with N+1 variance if not yerr: yerr = np.sqrt(ydata+1) else: yerr=np.array(yerr) def fit_func(x,p1,p2,p3): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. xi = 0. y = x/13000. return p1*(1.-y)**(p2-xi*p3)*y**-p3 xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)]) xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)]) #Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins ydata = np.array(ydata) * 100 / xwidths yerr = np.array(yerr)*100/ np.array(xwidths) #Least square fit, masking out the signal region popt, pcov = curve_fit(fit_func, np.delete(xdata,mask), np.delete(ydata,mask), sigma=np.delete(yerr,mask),maxfev=10000) if verbose: print('fit params: ', popt) ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata]) #Check that the function is a good fit to the sideband residuals = np.delete((ydata - ydata_fit)/yerr,mask) if verbose > 0: print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf)) print(residuals) print(((ydata - ydata_fit)/yerr)[mask]) print('\n') #The following code is used to get the bin errors by propagating the errors on the fit params def fit_func_array(parr): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. p1, p2, p3 = parr xi = 0. return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata]) jac=numdifftools.core.Jacobian(fit_func_array) x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T) #For plot, take systematic error band as the diagonal of the covariance matrix y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)]) if (plotfile != None) & (plotfile != 'ax'): if plotfit: if plotsys: plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4) plt.plot(xdata, ydata_fit, 'r--', label='data') plt.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4) plt.yscale('log', nonposy='clip') if plotfile == 'ax': if plotfit: if plotsys: myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4) myax.plot(xdata, ydata_fit, 'r--', label='data') myax.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4) myax.set_yscale('log', nonposy='clip') if plotfile == 'show': plt.show() elif plotfile: plt.savefig(plotfile) ###Output _____no_output_____ ###Markdown Define the binning ###Code binvals = bins #binvals = [#1900.,2001.,2107.,2219.,2337.,2461.,2592.,2730., # 2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.,4500,4700,4900,5100,5300,5500,5700,5900,6100] bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] len(bincenters) masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(1,len(binvals)-2)] print(masks) ###Output [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28], [27, 28, 29], [28, 29, 30], [29, 30, 31]] ###Markdown Calculate and plot p-values for mass scanFigure 8, right ###Code pvalues_sig = [get_p_value(datasets_sig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)] pvalues_nosig = [get_p_value(datasets_nosig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)] plt.plot(bincenters[:-2],pvalues_nosig) plt.semilogy() f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) plt.ylabel(r'$p_0$',fontsize=fontsize) plt.title(r'No signal') #plt.title(r'With signal') # plt.xlabel(r'$m_{JJ}$') #plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') plt.show() fontsize=22 smfontsize=16 plt.plot(bincenters[:-2],pvalues_sig) plt.semilogy() #plt.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) plt.ylabel(r'$p_0$',fontsize=fontsize) #plt.title(r'No signal') plt.title(r'With signal') #plt.axvline(x=3823,color='b') # plt.xlabel(r'$m_{JJ}$') #plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') dashes = [5,5] color='0.5' linewidth=1.2 for sigma in range(2,6): plt.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth) if sigma > 2: sigmastring = r'$' + str(sigma) + '\sigma$' plt.text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize) plt.show() fontsize=22 smfontsize=16 #binvals = [#1900., # 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.] #bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] bincenters = bincenters[3:18] masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)] plt.close('all') f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5)) # plt.figure(figsize=(5,5)) linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}] choices = [0,1,4,6] colors = ['black'] colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)]) # colors = ['black'] # colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)]) effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"] dashes = [5,5] color='0.5' linewidth=1.2 for ax in axarr: for sigma in range(1,8): ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth) if sigma > 2: sigmastring = r'$' + str(sigma) + '\sigma$' axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize) for eff_i, effchoice in enumerate(choices): pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_nosig)] pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_sig)] # plt.subplot(1,2,1) axarr[0].plot(bincenters,pvalues_nosig, color=colors[eff_i], **linestyles[eff_i]) #plt.subplot(1,2,2) axarr[1].plot(bincenters,pvalues_sig, color=colors[eff_i], **linestyles[eff_i]) if eff_i > 0: axarr[1].annotate(effs[eff_i],fontsize=smfontsize, xy=(bincenters[4],pvalues_sig[4]), xycoords='data', xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=0.3", ), ) # axarr[1].annotate("Test", # xy=(3000, 10**-7), xycoords='data', # xytext=(3200, 10**-8), textcoords='data', # arrowprops=dict(arrowstyle="->", #linestyle="dashed", # color="0.0", # #patchB=el, # shrinkB=5, # connectionstyle="arc3,rad=0.3", # ), # ) f.subplots_adjust(wspace=0) #plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False) plt.semilogy() f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize) axarr[0].set_title(r'No signal') axarr[1].set_title(r'With signal') # plt.xlabel(r'$m_{JJ}$') plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') plt.show() fontsize=22 smfontsize=16 #binvals = [#1900., # 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.] #bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] bincenters = bincenters[3:18] masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)] plt.close('all') f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5)) # plt.figure(figsize=(5,5)) linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}] choices = [0,1,4,6] colors = ['black'] colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)]) # colors = ['black'] # colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)]) effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"] dashes = [5,5] color='0.5' linewidth=1.2 for ax in axarr: for sigma in range(1,8): ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth) if sigma > 2: sigmastring = r'$' + str(sigma) + '\sigma$' axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize) for eff_i, effchoice in enumerate(choices): pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_nosig[:,effchoice])] pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_sig[:,effchoice])] # plt.subplot(1,2,1) axarr[0].plot(bincenters,pvalues_nosig, color=colors[eff_i], **linestyles[eff_i]) #plt.subplot(1,2,2) axarr[1].plot(bincenters,pvalues_sig, color=colors[eff_i], **linestyles[eff_i]) if eff_i > 0: axarr[1].annotate(effs[eff_i],fontsize=smfontsize, xy=(bincenters[4],pvalues_sig[4]), xycoords='data', xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=0.3", ), ) # axarr[1].annotate("Test", # xy=(3000, 10**-7), xycoords='data', # xytext=(3200, 10**-8), textcoords='data', # arrowprops=dict(arrowstyle="->", #linestyle="dashed", # color="0.0", # #patchB=el, # shrinkB=5, # connectionstyle="arc3,rad=0.3", # ), # ) f.subplots_adjust(wspace=0) #plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False) plt.semilogy() f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize) axarr[0].set_title(r'No signal') axarr[1].set_title(r'With signal') # plt.xlabel(r'$m_{JJ}$') plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Figure 8 ###Code import matplotlib.gridspec as gridspec fontsize=20 smfontsize=16 binvals = [#1900., 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.] bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] bincenters = bincenters[3:12] masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,12)] plt.close('all') fig = plt.figure(figsize=(16,5)) outer = gridspec.GridSpec(1,2,wspace=0.3,width_ratios=[5,5]) plt_i = 0 inner = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=outer[0]) ax = plt.Subplot(fig, inner[0]) ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post', color='palegoldenrod',alpha=1.) for line_i in [4,6,9,11]: ax.axvline(binvals[line_i],color='0.4',dashes=[5,3]) signallabel_y=0.15*10**6 ax.annotate("",xy=(binvals[6],signallabel_y), xytext = (binvals[9],signallabel_y), arrowprops=dict(arrowstyle='<->')) ax.text(0.5*(binvals[6] + binvals[9]),signallabel_y,r"Signal"'\n'r"region",va='center',ha='center',fontsize=smfontsize, bbox=dict(facecolor='white',edgecolor='none', alpha=1.0)) sidebandlabel_y=0.8*10**-1 ax.annotate("",xy=(binvals[4],sidebandlabel_y), xytext = (binvals[6],sidebandlabel_y), arrowprops=dict(arrowstyle='<->')) ax.annotate("",xy=(binvals[9],sidebandlabel_y), xytext = (binvals[11],sidebandlabel_y), arrowprops=dict(arrowstyle='<->')) ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[9] + binvals[11]),sidebandlabel_y), fontsize = smfontsize, color='white', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=-0.2", ),) ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[4] + binvals[6]),sidebandlabel_y), fontsize = smfontsize, arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=-0.3", ),) chosen_set = [0,1,4,6,9] [get_p_value(datasets_sig[4,i,1:],binvals,mask=[6,7,8],verbose=0,plotfile='ax',myax=ax) for i in chosen_set] ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize) ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize) ax.set_ylim([2*10**-2,10**6]) ax.set_xlim([2001,4350]) fig.add_subplot(ax) inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer[1], wspace=0) axarr = [plt.Subplot(fig, inner[0]),plt.Subplot(fig, inner[1])] dashes = [5,5] color='0.5' linewidth=1.2 linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}] choices = [0,1,4,6] colors = ['black'] colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)]) # colors = ['black'] # colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)]) effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"] for ax in axarr: ax.set_yscale('log') ax.set_ylim([2.*10**-14,1]) for sigma in range(1,8): ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth) if sigma > 2: sigmastring = r'$' + str(sigma) + '\sigma$' axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize) for eff_i, effchoice in enumerate(choices): pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_nosig[:,effchoice])] pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_sig[:,effchoice])] axarr[0].plot(bincenters,pvalues_nosig, color=colors[eff_i], **linestyles[eff_i]) axarr[1].plot(bincenters,pvalues_sig, color=colors[eff_i], **linestyles[eff_i]) if eff_i > 0: axarr[1].annotate(effs[eff_i],fontsize=smfontsize, xy=(bincenters[4],pvalues_sig[4]), xycoords='data', xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=0.3", ), ) # axarr[1].annotate("Test", # xy=(3000, 10**-7), xycoords='data', # xytext=(3200, 10**-8), textcoords='data', # arrowprops=dict(arrowstyle="->", #linestyle="dashed", # color="0.0", # #patchB=el, # shrinkB=5, # connectionstyle="arc3,rad=0.3", # ), # ) # f.subplots_adjust(wspace=0) # plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False) # plt.semilogy() #f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) #axarr.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize) axarr[0].set_title(r'No signal',fontsize=fontsize) axarr[1].set_title(r'With signal',fontsize=fontsize) axarr[0].text(3700,2.75*10**-15,r'$m_{JJ} \, / \, \mathrm{GeV} $',va='top',ha='center',fontsize=fontsize) axarr[1].set_yticklabels([]) fig.add_subplot(axarr[0]) fig.add_subplot(axarr[1]) #plt.savefig('pvalplots.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Figure 14 ###Code spacing=0.05 f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True) for bin_i in range(3,12): # plt.close('all') row = int((bin_i-3)/3) col = (bin_i-3)%3 ax = axarr[row,col] ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post', color='palegoldenrod',alpha=1.) if row == 2: ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize) if col == 0: ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize) for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]: ax.axvline(binvals[line_i],color='0.4',dashes=[5,3]) chosen_set = [0,1,4,6,9] def plotfit(i): if i == 9: return False else: return True [add_mjjplot(datasets_sig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax, plotsys = plotfit(i)) for i in chosen_set] ax.set_ylim([2*10**-1,2*10**5]) f.subplots_adjust(hspace=spacing) f.subplots_adjust(wspace=spacing) for axrow in axarr: for ax in axrow: ax.label_outer() #plt.savefig('mJJarr_sig.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown FIgure 13 ###Code spacing=0.05 f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True) for bin_i in range(3,12): # plt.close('all') row = int((bin_i-3)/3) col = (bin_i-3)%3 ax = axarr[row,col] if row == 2: ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize) if col == 0: ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize) for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]: ax.axvline(binvals[line_i],color='0.4',dashes=[5,3]) chosen_set = [0,1,4,6,9] def plotfit(i): if i == 9: return False else: return True [add_mjjplot(datasets_nosig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax, plotsys = plotfit(i)) for i in chosen_set] ax.set_ylim([2*10**-1,2*10**5]) f.subplots_adjust(hspace=spacing) f.subplots_adjust(wspace=spacing) for axrow in axarr: for ax in axrow: ax.label_outer() #plt.savefig('mJJarr_nosig.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Calculation of p-valuesThis notebook is for generating figures 8, 13, 14 of arXiv:1805.02664 Import and initialize some functions ###Code from scipy.optimize import minimize import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit from scipy.stats import poisson, norm, kstest import numdifftools from numpy.linalg import inv import matplotlib.gridspec as gridspec sigaeloss_bb = np.load('../data/sigaeloss_bb1.npy') sigaeloss_bkg = np.load('../data/sigaeloss_bkg.npy') mass_bb = np.load('../data/mass_bb1.npy') mass_bkg = np.load('../data/mass_bkg.npy') bkgaeloss_bb = np.load('../data/bkgaeloss_bb1.npy') bkgaeloss_bkg = np.load('../data/bkgaeloss_bkg.npy') ############### DEFAULT CUT ############## sigae_wp = .64 bkgae_wp = 2 index_bb = np.where((bkgaeloss_bb>bkgae_wp)&(sigaeloss_bb<sigae_wp))[0] index_bkg = np.where((bkgaeloss_bkg>bkgae_wp)&(sigaeloss_bkg<sigae_wp))[0] print(len(index_bb),len(index_bkg)) #### Without Scalefactor #PLOT FOR BLACK BOX 1 with REALNVP plt.style.use('ggplot') bins = np.linspace(2800,7000,30) bkg_hist = plt.hist(mass_bkg[index_bkg],bins=bins,alpha=0.3,color='r',label='background'); obs_hist = plt.hist(mass_bb[index_bb],bins=bins,alpha=0.3,color='b',label='Blackbox1'); plt.xlabel(r'$m_{JJ}$ [GeV]') plt.ylabel('Number of events') plt.legend(loc='upper right') plt.title('$m_{JJ}$ without SF') plt.show() datasets_nosig = bkg_hist[0] datasets_sig = obs_hist[0] #filenames_nosig = ["../data/finalscan_nosignal/0_005_" + str(i) + "_bincounts.dat" for i in range(3,12)] #filenames_sig = ["../data/finalscan_signal/sig_bin" + str(i) + "_bincounts.dat" for i in range(3,12)] #datasets_nosig = np.array([np.loadtxt(filename) for filename in filenames_nosig]) #datasets_sig = np.array([np.loadtxt(filename) for filename in filenames_sig]) def get_p_value(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,return_teststat = False,plotsys=True,myax=None): ydata = np.array(ydata) #Assume poisson is gaussian with N+1 variance if not yerr: yerr = np.sqrt(ydata+1) else: yerr=np.array(yerr) def fit_func(x,p1,p2,p3): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. xi = 0. y = x/13000. return p1*(1.-y)**(p2-xi*p3)*y**-p3 xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)]) xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)]) #Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins ydata = np.array(ydata) * 100 / xwidths yerr = np.array(yerr)*100/ np.array(xwidths) #Least square fit, masking out the signal region popt, pcov = curve_fit(fit_func, np.delete(xdata,mask), np.delete(ydata,mask),sigma=np.delete(yerr,mask),maxfev=10000) if verbose: print('fit params: ', popt) ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata]) #Check that the function is a good fit to the sideband residuals = np.delete((ydata - ydata_fit)/yerr,mask) if verbose > 0: print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf)) print(residuals) print(((ydata - ydata_fit)/yerr)[mask]) print('\n') #The following code is used to get the bin errors by propagating the errors on the fit params def fit_func_array(parr): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. p1, p2, p3 = parr xi = 0. return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata]) jac=numdifftools.core.Jacobian(fit_func_array) x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T) #For plot, take systematic error band as the diagonal of the covariance matrix y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)]) if (plotfile != None) & (plotfile != 'ax'): if plotsys: plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4) yerr2 = np.array(yerr) yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8 plt.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4) plt.plot(xdata, ydata_fit, 'r--', label='data') plt.yscale('log', nonposy='clip') if plotfile == 'ax': if plotsys: myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,facecolor='gray',edgecolor=None,alpha=0.4) yerr2 = np.array(yerr) yerr2[yerr>=ydata] = yerr2[yerr>=ydata]*0.8 myax.errorbar(xdata, ydata,[yerr2,yerr],None, 'bo', label='data',markersize=4) myax.plot(xdata, ydata_fit, 'r--', label='data') myax.set_yscale('log', nonposy='clip') if plotfile == 'show': plt.show() elif plotfile: plt.savefig(plotfile) #Now, let's compute some statistics. # Will use asymptotic formulae for p0 from Cowan et al arXiv:1007.1727 # and systematics procedure from https://cds.cern.ch/record/2242860/files/NOTE2017_001.pdf #First get systematics in the signal region #This function returns array of signal predictions in the signal region def signal_fit_func_array(parr): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. p1, p2, p3 = parr xi = 0. return np.array([np.sum([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3*xwidths[mask[i]]/100 for i, x in enumerate(xdata[mask])])]) #Get covariance matrix of prediction uncertainties in the signal region jac=numdifftools.core.Jacobian(signal_fit_func_array) x_signal_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T) #Inverse signal region covariance matrix: inv_x_signal_cov = inv(x_signal_cov) #Get observed and predicted event counts in the signal region obs = np.array([np.sum(np.array(ydata)[mask]*np.array(xwidths)[mask]/100)]) expected = np.array([np.sum([fit_func(xdata[targetbin],popt[0],popt[1],popt[2])*xwidths[targetbin]/100 for targetbin in mask])]) #Negative numerator of log likelihood ratio, for signal rate mu = 0 def min_log_numerator(expected_nuis_arr): #expected_nuis_arr is the array of systematic background uncertainty nuisance parameters #These are event rate densities expected_nuis_arr = np.array(expected_nuis_arr) to_return = 0 #Poisson terms for i, expected_nuis in enumerate(expected_nuis_arr): #Poisson lambda. Have to rescale nuisance constribution by bin width my_lambda = expected[i]+expected_nuis_arr[i] #Prevent negative predicted rates if my_lambda < 10**-10: my_lambda = 10**-10 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda) #Gaussian nuisance term nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr) to_return = to_return + nuisance_term return -to_return def jac_min_log_numerator(expected_nuis_arr): #expected_nuis_arr is the array of systematic background uncertainty nuisance parameters #These are event rate densities expected_nuis_arr = np.array(expected_nuis_arr) to_return = np.array([0.]) #Poisson terms #Poisson lambda. Have to rescale nuisance constribution by bin width my_lambda = expected+expected_nuis_arr dmy_lambda = np.array([1.]) #Prevent negative predicted rates my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10 dmy_lambda[my_lambda < 10**-10] = 0 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return = to_return + (obs*dmy_lambda/my_lambda - dmy_lambda) #Gaussian nuisance term nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr) to_return = to_return + nuisance_term return -to_return #Initialization of nuisance params expected_nuis_array_init = [0.02] #shift log likelihood to heklp minimization algo def rescaled_min_log_numerator(expected_nuis_arr): return min_log_numerator(expected_nuis_arr) - min_log_numerator(expected_nuis_array_init) #Perform minimization over nuisance parameters. Set bounds for bg nuisance at around 8 sigma. bnds=[[-8*y_unc[mask[0]],8*y_unc[mask[0]]]] minimize_log_numerator = minimize(rescaled_min_log_numerator, expected_nuis_array_init, jac=jac_min_log_numerator, bounds=bnds) if verbose: print("numerator: ", minimize_log_numerator.items(),'\n') #Now get likelihood ratio denominator def min_log_denom(nuis_arr): #nuis_arr contains the bg systematics and also the signal rate expected_nuis_arr = np.array(nuis_arr)[:1] #print(expected_nuis_arr) mu = nuis_arr[1] #Signal prediction pred = [mu] to_return = 0 #Poisson terms for i, expected_nuis in enumerate(expected_nuis_arr): #Poisson lambda my_lambda = expected[i]+expected_nuis_arr[i] + pred[i] #Prevent prediction from going negative if my_lambda < 10**-10: my_lambda = 10**-10 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return = to_return + (obs[i]*np.log(my_lambda) - my_lambda) #Gaussian nuisance term nuisance_term = -0.5*np.dot(np.dot(expected_nuis_arr,inv_x_signal_cov),expected_nuis_arr) to_return = to_return + nuisance_term return -to_return def jac_min_log_denom(nuis_arr): #expected_nuis_arr is the array of systematic background uncertainty nuisance parameters #These are event rate densities expected_nuis_arr = np.array(nuis_arr)[:1] mu = nuis_arr[1] pred = [mu] to_return_first = np.array([0.]) #Poisson terms #Poisson lambda. Have to rescale nuisance constribution by bin width my_lambda = expected+expected_nuis_arr+pred dmy_lambda = np.array([1.]) #Prevent prediction from going negative my_lambda[my_lambda < 10**-10] = np.ones(len(my_lambda[my_lambda < 10**-10])) * 10**-10 dmy_lambda[my_lambda < 10**-10] = 0 #Poisson term. Ignore the factorial piece which will cancel in likelihood ratio to_return_first = to_return_first + (obs*dmy_lambda/my_lambda - dmy_lambda) #Gaussian nuisance term nuisance_term = -np.dot(inv_x_signal_cov,expected_nuis_arr) to_return_first = to_return_first + nuisance_term to_return_last = np.array([0.]) dpred = np.array([[1.]]) my_lambda = expected+expected_nuis_arr+pred dmy_lambda = dpred to_return_last = np.dot((obs/my_lambda),dmy_lambda.T) - np.sum(dmy_lambda,axis=1) return -np.append(to_return_first, to_return_last) #initizalization for minimization nuis_array_init = [0.01,1.] #Shift log likelihood for helping minimization algo. def rescaled_min_log_denom(nuis_arr): return min_log_denom(nuis_arr) - min_log_denom(nuis_array_init) bnds = ((None,None),(None,None)) minimize_log_denominator = minimize(rescaled_min_log_denom,nuis_array_init, jac=jac_min_log_denom, bounds=bnds) if verbose: print("Denominator: ", minimize_log_denominator.items(),'\n') if minimize_log_denominator.x[-1] < 0: Zval = 0 neglognum = 0 neglogden = 0 else: neglognum = min_log_numerator(minimize_log_numerator.x) neglogden = min_log_denom(minimize_log_denominator.x) Zval = np.sqrt(2*(neglognum - neglogden)) p0 = 1-norm.cdf(Zval) if verbose: print("z = ", Zval) print("p0 = ", p0) #plt.title(str(p0)) # if plotfile == 'show': # plt.show() # elif plotfile: # plt.savefig(plotfile) if return_teststat: return p0, 2*(neglognum - neglogden) else: return p0 def add_mjjplot(ydata,binvals,mask=[],verbose=0,plotfile=None,yerr=None,plotsys=True,myax=None,plotfit=True): ydata = np.array(ydata) #Assume poisson is gaussian with N+1 variance if not yerr: yerr = np.sqrt(ydata+1) else: yerr=np.array(yerr) def fit_func(x,p1,p2,p3): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. xi = 0. y = x/13000. return p1*(1.-y)**(p2-xi*p3)*y**-p3 xdata = np.array([0.5*(binvals[i]+binvals[i+1]) for i in range(0,len(binvals)-1)]) xwidths = np.array([-binvals[i]+binvals[i+1] for i in range(0,len(binvals)-1)]) #Assuming inputs are bin counts, this is needed to get densities. Important for variable-width bins ydata = np.array(ydata) * 100 / xwidths yerr = np.array(yerr)*100/ np.array(xwidths) #Least square fit, masking out the signal region popt, pcov = curve_fit(fit_func, np.delete(xdata,mask), np.delete(ydata,mask), sigma=np.delete(yerr,mask),maxfev=10000) if verbose: print('fit params: ', popt) ydata_fit = np.array([fit_func(x,popt[0],popt[1],popt[2]) for x in xdata]) #Check that the function is a good fit to the sideband residuals = np.delete((ydata - ydata_fit)/yerr,mask) if verbose > 0: print("Goodness: ",kstest(residuals, norm(loc=0,scale=1).cdf)) print(residuals) print(((ydata - ydata_fit)/yerr)[mask]) print('\n') #The following code is used to get the bin errors by propagating the errors on the fit params def fit_func_array(parr): #see the ATLAS diboson resonance search: https://arxiv.org/pdf/1708.04445.pdf. p1, p2, p3 = parr xi = 0. return np.array([p1*(1.-(x/13000.))**(p2-xi*p3)*(x/13000.)**-p3 for x in xdata]) jac=numdifftools.core.Jacobian(fit_func_array) x_cov=np.dot(np.dot(jac(popt),pcov),jac(popt).T) #For plot, take systematic error band as the diagonal of the covariance matrix y_unc=np.sqrt([row[i] for i, row in enumerate(x_cov)]) if (plotfile != None) & (plotfile != 'ax'): if plotfit: if plotsys: plt.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4) plt.plot(xdata, ydata_fit, 'r--', label='data') plt.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4) plt.yscale('log', nonposy='clip') if plotfile == 'ax': if plotfit: if plotsys: myax.fill_between(xdata,ydata_fit+y_unc,ydata_fit-y_unc,color='gray',alpha=0.4) myax.plot(xdata, ydata_fit, 'r--', label='data') myax.errorbar(xdata, ydata,yerr,None, 'bo', label='data',markersize=4) myax.set_yscale('log', nonposy='clip') if plotfile == 'show': plt.show() elif plotfile: plt.savefig(plotfile) ###Output _____no_output_____ ###Markdown Define the binning ###Code bins binvals = bins #binvals = [#1900.,2001.,2107.,2219.,2337.,2461.,2592.,2730., # 2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.,4500,4700,4900,5100,5300,5500,5700,5900,6100] bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] len(bincenters) masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(1,len(binvals)-2)] print(masks) ###Output [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28]] ###Markdown Calculate and plot p-values for mass scanFigure 8, right ###Code pvalues_sig = [get_p_value(datasets_sig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)] pvalues_nosig = [get_p_value(datasets_nosig[0:],binvals,mask=mask,verbose=0,plotfile=None) for i, mask in enumerate(masks)] plt.plot(bincenters[:-2],pvalues_nosig) plt.semilogy() f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) plt.ylabel(r'$p_0$',fontsize=fontsize) plt.title(r'No signal') #plt.title(r'With signal') # plt.xlabel(r'$m_{JJ}$') #plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') plt.show() plt.plot(bincenters[:-2],pvalues_sig) plt.semilogy() f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) plt.ylabel(r'$p_0$',fontsize=fontsize) #plt.title(r'No signal') plt.title(r'With signal') plt.axvline(x=3823,color='b') # plt.xlabel(r'$m_{JJ}$') #plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') plt.show() fontsize=22 smfontsize=16 #binvals = [#1900., # 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.] #bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] bincenters = bincenters[3:18] masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)] plt.close('all') f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5)) # plt.figure(figsize=(5,5)) linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}] choices = [0,1,4,6] colors = ['black'] colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)]) # colors = ['black'] # colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)]) effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"] dashes = [5,5] color='0.5' linewidth=1.2 for ax in axarr: for sigma in range(1,8): ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth) if sigma > 2: sigmastring = r'$' + str(sigma) + '\sigma$' axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize) for eff_i, effchoice in enumerate(choices): pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_nosig)] pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_sig)] # plt.subplot(1,2,1) axarr[0].plot(bincenters,pvalues_nosig, color=colors[eff_i], **linestyles[eff_i]) #plt.subplot(1,2,2) axarr[1].plot(bincenters,pvalues_sig, color=colors[eff_i], **linestyles[eff_i]) if eff_i > 0: axarr[1].annotate(effs[eff_i],fontsize=smfontsize, xy=(bincenters[4],pvalues_sig[4]), xycoords='data', xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=0.3", ), ) # axarr[1].annotate("Test", # xy=(3000, 10**-7), xycoords='data', # xytext=(3200, 10**-8), textcoords='data', # arrowprops=dict(arrowstyle="->", #linestyle="dashed", # color="0.0", # #patchB=el, # shrinkB=5, # connectionstyle="arc3,rad=0.3", # ), # ) f.subplots_adjust(wspace=0) #plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False) plt.semilogy() f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize) axarr[0].set_title(r'No signal') axarr[1].set_title(r'With signal') # plt.xlabel(r'$m_{JJ}$') plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') plt.show() fontsize=22 smfontsize=16 #binvals = [#1900., # 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.] #bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] bincenters = bincenters[3:18] masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,18)] plt.close('all') f, axarr = plt.subplots(1,2, sharex=True, sharey=True,figsize=(5*1.4,5)) # plt.figure(figsize=(5,5)) linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}] choices = [0,1,4,6] colors = ['black'] colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)]) # colors = ['black'] # colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)]) effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"] dashes = [5,5] color='0.5' linewidth=1.2 for ax in axarr: for sigma in range(1,8): ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth) if sigma > 2: sigmastring = r'$' + str(sigma) + '\sigma$' axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize) for eff_i, effchoice in enumerate(choices): pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_nosig[:,effchoice])] pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_sig[:,effchoice])] # plt.subplot(1,2,1) axarr[0].plot(bincenters,pvalues_nosig, color=colors[eff_i], **linestyles[eff_i]) #plt.subplot(1,2,2) axarr[1].plot(bincenters,pvalues_sig, color=colors[eff_i], **linestyles[eff_i]) if eff_i > 0: axarr[1].annotate(effs[eff_i],fontsize=smfontsize, xy=(bincenters[4],pvalues_sig[4]), xycoords='data', xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=0.3", ), ) # axarr[1].annotate("Test", # xy=(3000, 10**-7), xycoords='data', # xytext=(3200, 10**-8), textcoords='data', # arrowprops=dict(arrowstyle="->", #linestyle="dashed", # color="0.0", # #patchB=el, # shrinkB=5, # connectionstyle="arc3,rad=0.3", # ), # ) f.subplots_adjust(wspace=0) #plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False) plt.semilogy() f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize) axarr[0].set_title(r'No signal') axarr[1].set_title(r'With signal') # plt.xlabel(r'$m_{JJ}$') plt.ylim([0.5*10**(-13),1]) #plt.tight_layout() #plt.savefig('pvalue_plots.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Injected signal ###Code sighist=np.array([ 7, 22, 31, 54, 75, 106, 192, 217, 113, 40, 8, 5, 4, 1, 0]) mjjmin=2001 mjjmax=4350 mybinboundaries = np.round(np.logspace(np.log10(mjjmin), np.log10(mjjmax), num=16)) mybincenters = np.array([0.5*(mybinboundaries[i+1] + mybinboundaries[i]) for i in range(0,len(mybinboundaries)-1)]) mybinwidths = np.array([mybinboundaries[i+1] - mybinboundaries[i] for i in range(0,len(mybinboundaries)-1)]) plt.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post',color='0.8') ###Output _____no_output_____ ###Markdown Figure 8, left ###Code plt.figure(figsize=(5*1.4,5)) for line_i in [4,6,9,11]: plt.axvline(binvals[line_i],color='0.4',dashes=[5,3]) plt.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post', color='palegoldenrod',alpha=1.) signallabel_y=0.15*10**6 plt.annotate("",xy=(binvals[6],signallabel_y), xytext = (binvals[9],signallabel_y), arrowprops=dict(arrowstyle='<->')) plt.text(0.5*(binvals[6] + binvals[9]),signallabel_y,r"Signal"'\n'r"region",va='center',ha='center',fontsize=smfontsize, bbox=dict(facecolor='white',edgecolor='none', alpha=1.0)) sidebandlabel_y=1.0*10**-1 plt.annotate("",xy=(binvals[4],sidebandlabel_y), xytext = (binvals[6],sidebandlabel_y), arrowprops=dict(arrowstyle='<->')) plt.annotate("",xy=(binvals[9],sidebandlabel_y), xytext = (binvals[11],sidebandlabel_y), arrowprops=dict(arrowstyle='<->')) plt.annotate(r"Sideband", xytext=(2000,sidebandlabel_y*4/3), xy = (0.5*(binvals[9] + binvals[11]),sidebandlabel_y), fontsize = smfontsize, color='white', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=-0.2", ),) plt.annotate(r"Sideband", xytext=(2000,sidebandlabel_y*4/3), xy = (0.5*(binvals[4] + binvals[6]),sidebandlabel_y), fontsize = smfontsize, arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=-0.3", ),) chosen_set = [0,1,4,6,9] plt.ylabel(r'Events / 100 GeV',fontsize=fontsize) plt.xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize) plt.ylim([2*10**-2,10**6]) for i in chosen_set: add_mjjplot(datasets_sig[4,i,1:],binvals,mask=[6,7,8],verbose=0,plotfile='tmp.png') plt.savefig('/mnt/c/Users/Jack/Physics/jj_for_NN/mJJplots_' + str(i) + '.png', bbox_inches='tight') #plt.xlim([2001,4350]) plt.show() ###Output _____no_output_____ ###Markdown Figure 8 ###Code import matplotlib.gridspec as gridspec fontsize=20 smfontsize=16 binvals = [#1900., 2001.,2107.,2219.,2337.,2461.,2592.,2730.,2875.,3028.,3189.,3358.,3536.,3724.,3922.,4131.,4350.] bincenters = [0.5*(binvals[i] + binvals[i+1]) for i in range(len(binvals)-1)] bincenters = bincenters[3:12] masks = [[bin_i-1,bin_i, bin_i+1] for bin_i in range(3,12)] plt.close('all') fig = plt.figure(figsize=(16,5)) outer = gridspec.GridSpec(1,2,wspace=0.3,width_ratios=[5,5]) plt_i = 0 inner = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=outer[0]) ax = plt.Subplot(fig, inner[0]) ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post', color='palegoldenrod',alpha=1.) for line_i in [4,6,9,11]: ax.axvline(binvals[line_i],color='0.4',dashes=[5,3]) signallabel_y=0.15*10**6 ax.annotate("",xy=(binvals[6],signallabel_y), xytext = (binvals[9],signallabel_y), arrowprops=dict(arrowstyle='<->')) ax.text(0.5*(binvals[6] + binvals[9]),signallabel_y,r"Signal"'\n'r"region",va='center',ha='center',fontsize=smfontsize, bbox=dict(facecolor='white',edgecolor='none', alpha=1.0)) sidebandlabel_y=0.8*10**-1 ax.annotate("",xy=(binvals[4],sidebandlabel_y), xytext = (binvals[6],sidebandlabel_y), arrowprops=dict(arrowstyle='<->')) ax.annotate("",xy=(binvals[9],sidebandlabel_y), xytext = (binvals[11],sidebandlabel_y), arrowprops=dict(arrowstyle='<->')) ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[9] + binvals[11]),sidebandlabel_y), fontsize = smfontsize, color='white', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=-0.2", ),) ax.annotate(r"Sideband", xytext=(2040,sidebandlabel_y*4/3), xy = (0.5*(binvals[4] + binvals[6]),sidebandlabel_y), fontsize = smfontsize, arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=-0.3", ),) chosen_set = [0,1,4,6,9] [get_p_value(datasets_sig[4,i,1:],binvals,mask=[6,7,8],verbose=0,plotfile='ax',myax=ax) for i in chosen_set] ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize) ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize) ax.set_ylim([2*10**-2,10**6]) ax.set_xlim([2001,4350]) fig.add_subplot(ax) inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer[1], wspace=0) axarr = [plt.Subplot(fig, inner[0]),plt.Subplot(fig, inner[1])] dashes = [5,5] color='0.5' linewidth=1.2 linestyles = [{'dashes':[5,5]},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'},{'linestyle':'-'}] choices = [0,1,4,6] colors = ['black'] colors.extend([plt.cm.magma(i) for i in np.linspace(0.1,0.8,len(choices)-1)]) # colors = ['black'] # colors.extend([plt.cm.viridis(i) for i in np.linspace(0.1,0.95,len(choices)-1)]) effs = [r"100\%",r"10\%",r"1\%",r"0.2\%"] for ax in axarr: ax.set_yscale('log') ax.set_ylim([2.*10**-14,1]) for sigma in range(1,8): ax.axhline(1-norm.cdf(sigma),dashes=dashes,color=color,linewidth=linewidth) if sigma > 2: sigmastring = r'$' + str(sigma) + '\sigma$' axarr[0].text(2500,(1-norm.cdf(sigma))*1.1,sigmastring,va='bottom',ha='center',fontsize=smfontsize) for eff_i, effchoice in enumerate(choices): pvalues_nosig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_nosig[:,effchoice])] pvalues_sig = [get_p_value(ydata[1:],binvals,mask=masks[i],verbose=0,plotfile=None) for i, ydata in enumerate(datasets_sig[:,effchoice])] axarr[0].plot(bincenters,pvalues_nosig, color=colors[eff_i], **linestyles[eff_i]) axarr[1].plot(bincenters,pvalues_sig, color=colors[eff_i], **linestyles[eff_i]) if eff_i > 0: axarr[1].annotate(effs[eff_i],fontsize=smfontsize, xy=(bincenters[4],pvalues_sig[4]), xycoords='data', xytext=(bincenters[4]+200, pvalues_sig[4]/10), textcoords='data', arrowprops=dict(arrowstyle="-|>", #linestyle="dashed", color="0.0", #patchB=el, shrinkB=5, connectionstyle="arc3,rad=0.3", ), ) # axarr[1].annotate("Test", # xy=(3000, 10**-7), xycoords='data', # xytext=(3200, 10**-8), textcoords='data', # arrowprops=dict(arrowstyle="->", #linestyle="dashed", # color="0.0", # #patchB=el, # shrinkB=5, # connectionstyle="arc3,rad=0.3", # ), # ) # f.subplots_adjust(wspace=0) # plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False) # plt.semilogy() #f.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) #axarr.text(0.5,-0.01,r'$m_{JJ} \, / \, \mathrm{GeV}$',ha='center',fontsize=fontsize) axarr[0].set_ylabel(r'$p_0$',fontsize=fontsize) axarr[0].set_title(r'No signal',fontsize=fontsize) axarr[1].set_title(r'With signal',fontsize=fontsize) axarr[0].text(3700,2.75*10**-15,r'$m_{JJ} \, / \, \mathrm{GeV} $',va='top',ha='center',fontsize=fontsize) axarr[1].set_yticklabels([]) fig.add_subplot(axarr[0]) fig.add_subplot(axarr[1]) #plt.savefig('pvalplots.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown Figure 14 ###Code spacing=0.05 f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True) for bin_i in range(3,12): # plt.close('all') row = int((bin_i-3)/3) col = (bin_i-3)%3 ax = axarr[row,col] ax.fill_between(mybinboundaries[:-1],sighist*100/mybinwidths,step='post', color='palegoldenrod',alpha=1.) if row == 2: ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize) if col == 0: ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize) for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]: ax.axvline(binvals[line_i],color='0.4',dashes=[5,3]) chosen_set = [0,1,4,6,9] def plotfit(i): if i == 9: return False else: return True [add_mjjplot(datasets_sig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax, plotsys = plotfit(i)) for i in chosen_set] ax.set_ylim([2*10**-1,2*10**5]) f.subplots_adjust(hspace=spacing) f.subplots_adjust(wspace=spacing) for axrow in axarr: for ax in axrow: ax.label_outer() #plt.savefig('mJJarr_sig.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____ ###Markdown FIgure 13 ###Code spacing=0.05 f, axarr = plt.subplots(3,3,figsize=(5*1.4*2.5,5*2.5),sharex=True,sharey=True) for bin_i in range(3,12): # plt.close('all') row = int((bin_i-3)/3) col = (bin_i-3)%3 ax = axarr[row,col] if row == 2: ax.set_xlabel(r'$m_{JJ} \, / \, \mathrm{GeV}$',fontsize=fontsize) if col == 0: ax.set_ylabel(r'Events / 100 GeV',fontsize=fontsize) for line_i in [bin_i-3,bin_i-1,bin_i+2,bin_i+4]: ax.axvline(binvals[line_i],color='0.4',dashes=[5,3]) chosen_set = [0,1,4,6,9] def plotfit(i): if i == 9: return False else: return True [add_mjjplot(datasets_nosig[bin_i-3,i,1:],binvals,mask=[bin_i-1,bin_i,bin_i+1],verbose=0,plotfile='ax',myax=ax, plotsys = plotfit(i)) for i in chosen_set] ax.set_ylim([2*10**-1,2*10**5]) f.subplots_adjust(hspace=spacing) f.subplots_adjust(wspace=spacing) for axrow in axarr: for ax in axrow: ax.label_outer() #plt.savefig('mJJarr_nosig.pdf', bbox_inches='tight') plt.show() ###Output _____no_output_____
experiments/paper_eval.ipynb
###Markdown EvaluationThis notebook reproduces the evaluation results from the paper.Note: As of version 2.1b6.dev234, the Essentia library has a [bug](https://github.com/MTG/essentia/issues/1054) that causes an infinite loop for some inputs.To avoid this, you have to build our patched version of Essentia: https://github.com/cifkao/essentia/tree/patchedCopyright 2020 InterDigital R&D and Télécom Paris. Author: Ondřej Cífka Obtaining the outputsBefore running the evaluation, we need to obtain the outputs of all the systems on both of our test sets and place them in the `outputs/synth` and `outputs/real` directories (for the artificial and real inputs, respectively). The commands are different for each system: VQ-VAE```shpython -m ss_vq_vae.models.vqvae_oneshot --logdir=model run ../data/lmd/audio_test/pairs \ outputs/synth/vqvae_list outputs/synth/vqvaepython -m ss_vq_vae.models.vqvae_oneshot --logdir=model run ../data/mixing_secrests/test/pairs \ outputs/real/vqvae_list outputs/real/vqvae```The first command runs the model on all audio file pairs listed in the `../data/lmd/audio_test/pairs` file, writes the output files to the `outputs/synth/vqvae` directory and their paths to the file `outputs/synth/vqvae_list`. The second command does the same for the other test set. U+L (Ulyanov and Lebedev)```shpython -m ss_vq_vae.models.ulyanov --style-weight-log=-2.1 ../data/lmd/audio_test/pairs \ outputs/synth/ulyanov_swopt_list outputs/synth/ulyanovpython -m ss_vq_vae.models.ulyanov --style-weight-log=-2.1 ../data/mixing_secrets/test/pairs \ outputs/real/ulyanov_swopt_list outputs/real/ulyanov``` Musaicing (Driedger et al.)Clone Chris Tralie's [LetItBee repo](https://github.com/ctralie/LetItBee) and run the `Musaicing.py` script on each pair of audio files according to the instructions. Specify the content file using the `--target` option and the style file using the `--source` option, e.g.:```shpython LetItBee/Musaicing.py --sr 16000 \ --source ../data/lmd/audio_test/wav_16kHz/voices1_pitch1/00484d071147e49551de9ffb141e8b9e.style.wav \ --target ../data/lmd/audio_test/wav_16kHz/voices1_pitch1/00484d071147e49551de9ffb141e8b9e.content.wav \ --result outputs/synth/driedger/00000.wav```You might want to run these commands in parallel as they are time-consuming. Remember to write the list of output files to the `outputs/{synth,real}/driedger_list` file in the correct order, so that the evaluation code can pick them up. ###Code import os import pickle import essentia.standard as estd import librosa import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np import pandas as pd import pretty_midi import re import seaborn as sns from tqdm.auto import tqdm from ss_vq_vae.models import triplet_network SR = 16000 MFCC_KWARGS = dict( n_mfcc=13, hop_length=500 ) triplet_model, triplet_backbone = triplet_network.build_model(num_features=12) triplet_model.load_weights('timbre_metric/checkpoint.ckpt') def read_paths(tsv_path, column_names): parent_dir = os.path.dirname(tsv_path) df = pd.read_csv(tsv_path, sep='\t', names=column_names) df = df.applymap(lambda x: os.path.join(parent_dir, x)) return df def normalize_power(audio): return audio / (np.sqrt(np.mean(audio ** 2)) + np.finfo(audio.dtype).eps) def get_pitches(audio): input_sr, sr = SR, 8000 # Need to resample because of EqualLoudness audio = estd.Resample(inputSampleRate=input_sr, outputSampleRate=sr)(audio) audio = estd.EqualLoudness(sampleRate=sr)(audio) rng = np.random.default_rng(seed=(audio > 0).sum()) audio = rng.normal(loc=audio, scale=1e-4).astype(audio.dtype) # To prevent Melodia from crashing pitches = estd.MultiPitchMelodia(sampleRate=sr)(audio) pitches = [[pretty_midi.utilities.hz_to_note_number(p) for p in pl if not np.isclose(0, p)] for pl in pitches] pitches = [[int(p + 0.5) for p in pl] for pl in pitches] return pitches def eval_example_target(output, reference): def spec(audio): audio = normalize_power(audio) s = librosa.feature.melspectrogram(audio, sr=SR) return librosa.power_to_db(s) s_out, s_ref = spec(output), spec(reference) lsd = np.mean(np.sqrt(np.mean((s_out - s_ref) ** 2, axis=1))) return {'lsd': lsd} def eval_example_style(output, reference): mfcc_out = librosa.feature.mfcc(output, sr=SR, **MFCC_KWARGS)[1:] mfcc_ref = librosa.feature.mfcc(reference, sr=SR, **MFCC_KWARGS)[1:] mfcc_triplet_cos, _ = 1 - triplet_model.predict([ (mfcc_ref.T[None, :, :], mfcc_out.T[None, :, :], mfcc_out.T[None, :, :])]).reshape(2) return {'mfcc_triplet_cos': mfcc_triplet_cos} def eval_example_content(output, reference): pitches_output, pitches_reference = get_pitches(output), get_pitches(reference) assert len(pitches_output) == len(pitches_reference) jaccard = [] for pl_output, pl_reference in zip(pitches_output, pitches_reference): matches = len(set(pl_output) & set(pl_reference)) total = len(set(pl_output) | set(pl_reference)) if total == 0: jaccard.append(0) else: jaccard.append(1 - matches / total) jaccard = np.mean(jaccard) return {'pitch_jaccard': jaccard} def pad_or_truncate(audio, reference): if len(audio) < len(reference): return np.pad(audio, (0, max(0, len(reference) - len(audio)))) return audio[:len(reference)] def eval_row_synth(row): audio = row.apply(lambda path: librosa.load(path, sr=SR)[0]) audio = audio.apply(pad_or_truncate, reference=audio['target']) return pd.DataFrame({ key: { **eval_example_target(audio[key], audio['target']), **eval_example_style(audio[key], audio['target']), **eval_example_content(audio[key], audio['target']) } for key in row.keys() if key != 'target' }).stack() def eval_row_real(row): audio = row.apply(lambda path: librosa.load(path, sr=SR)[0]) audio_ref = audio[['content', 'style']] audio = audio.apply(pad_or_truncate, reference=audio_ref['content']) return pd.DataFrame({ key: { **eval_example_style(audio[key], audio_ref['style']), **eval_example_content(audio[key], audio_ref['content']) } for key in row.keys() }).stack() paths_synth_df = pd.concat([ read_paths('../data/lmd/audio_test/triplets', ['content', 'style', 'target']), read_paths('outputs/synth/vq-vae_list', ['vq-vae']), read_paths('outputs/synth/driedger_list', ['driedger']), read_paths('outputs/synth/ulyanov_list', ['ulyanov']), ], axis=1) paths_real_df = pd.concat([ read_paths('../data/mixing_secrets/test/pairs', ['content', 'style']), read_paths('outputs/real/vq-vae_list', ['vq-vae']), read_paths('outputs/real/driedger_list', ['driedger']), read_paths('outputs/real/ulyanov_list', ['ulyanov']) ], axis=1) with tqdm(total=len(paths_synth_df)) as pbar: pbar.update(-1) def fn(x): y = eval_row_synth(x) pbar.update(1) return y results_synth = paths_synth_df.apply(fn, axis=1) with tqdm(total=len(paths_real_df)) as pbar: pbar.update(-1) def fn(x): y = eval_row_real(x) pbar.update(1) return y results_real = paths_real_df.apply(fn, axis=1) results_synth.to_pickle('results_synth.pickle') results_real.to_pickle('results_real.pickle') results_synth = pd.read_pickle('results_synth.pickle') results_real = pd.read_pickle('results_real.pickle') results_all = pd.concat([results_synth, results_real], axis=1, keys=['synth', 'real']) pd.DataFrame(results_all.mean()).unstack(level=0).unstack(level=0).droplevel(axis=1, level=0).drop(('real', 'lsd'), axis=1) latex = (pd.DataFrame(results_all.mean()) .unstack(level=0).unstack(level=0) .droplevel(axis=1, level=0) .drop(('real', 'lsd'), axis=1) .loc[['content', 'style', 'ulyanov', 'driedger', 'vq-vae']] .to_latex(formatters=[x.format for x in ['{:0.2f}', '{:0.4f}', '{:0.4f}', '{:0.4f}', '{:0.4f}']])) latex = re.sub(r' +', ' ', latex) print(latex) ###Output \begin{tabular}{lrrrrr} \toprule {} & \multicolumn{3}{l}{synth} & \multicolumn{2}{l}{real} \\ {} & lsd & mfcc\_triplet\_cos & pitch\_jaccard & mfcc\_triplet\_cos & pitch\_jaccard \\ \midrule content & 14.62 & 0.3713 & 0.5365 & 0.4957 & 0.0000 \\ style & 20.36 & 0.2681 & 0.8729 & 0.0000 & 0.9099 \\ ulyanov & 14.50 & 0.3483 & 0.5441 & 0.4792 & 0.1315 \\ driedger & 14.51 & 0.2933 & 0.6445 & 0.2319 & 0.6297 \\ vq-vae & 12.16 & 0.2063 & 0.5500 & 0.2278 & 0.6197 \\ \bottomrule \end{tabular}
src/00_appToCategory.ipynb
###Markdown 数据准备部分 ###Code import numpy as np import pandas as pd from sklearn.impute import SimpleImputer import re import gc from tqdm import tqdm age_test_data = pd.read_csv(r'../data/age_test.csv',header=None,names=['uId']) age_train_data = pd.read_csv( r'../data/age_train.csv',header=None,names=['uId','age_group']) age = age_train_data['age_group'] age_train_data.drop(columns=['age_group'],inplace=True) print(age_train_data.shape) #(2010000, 1) print(age_test_data.shape) user_app_actived_data = pd.read_csv(r'../data/user_app_actived.csv',header=None,names=['uId','appId']) print(user_app_actived_data.head());print(user_app_actived_data.shape) user_numAppList = {} # {uId:[num,appList]} for user in tqdm(user_app_actived_data.values): appList = re.split(r'\#',user[1]) if appList[0] == '\\N': user_numAppList[user[0]] = [] l = 0 else: user_numAppList[user[0]] = appList l = len(appList) user_numAppList[user[0]].insert(0,l) # print(user_numAppList[1002181]) # print(user_numAppList[1002179]) del user_app_actived_data gc.collect() app_info_data = pd.read_csv(r'../data/app_info.csv',header=None,names=['appId','category']) data = app_info_data['category'].value_counts(dropna=False) print(data.index) print(data.values) app_category = {} #{appId:[category]} for aid in app_info_data.values: app_category.setdefault(aid[0],[]).append(aid[1]) print('总共有多少个app: ',len(app_category)) category_num = {} def initDict(categorylist): for cate in categorylist: category_num[cate] = 0 return category_num categorylist = data.index category_num = initDict(categorylist) print(category_num.values());print(categorylist) cate = list(np.zeros(40)) i = 1 batch_size = 10000 #每10000次存储一次数据 for numAppList in tqdm(user_numAppList.values()): # print(i) i += 1 categorylist = data.index category_num = initDict(categorylist) if len(numAppList)==1: cate = np.vstack((cate,list(category_num.values()))) else: appList = numAppList[1:-1] for appId in appList: if appId in app_category: cateList = app_category[appId] for category in cateList: category_num[category] += 1 cate = np.vstack((cate,list(category_num.values()))) if i%batch_size==0: if i==batch_size: cate = pd.DataFrame(cate, columns=['实用工具', '便捷生活', '教育', '金融理财', '购物比价', '社交通讯', '影音娱乐', '新闻阅读', '休闲益智', '商务', '运动健康', '出行导航', '经营策略', '动作射击', '儿童', '角色扮演', '拍摄美化', '棋牌桌游', '旅游住宿', '汽车', '主题个性', '美食', '体育竞速', '网络游戏', '休闲游戏', '休闲娱乐', '动作冒险', '学习办公', '益智棋牌', '表盘个性', '电子书籍', '模拟游戏', '策略游戏', '棋牌天地', '体育射击', '图书阅读','主题铃声', '角色游戏', '合作壁纸*', '医疗健康']) else: cate = pd.DataFrame(cate) cate.drop(0,inplace=True) if i==batch_size: cate.to_csv(r'../processed/category_num.csv', index=False, header=True, encoding='utf_8_sig',mode='a') else: cate.to_csv(r'../processed/category_num.csv', index=False, header=False, encoding='utf_8_sig', mode='a') cate = list(np.zeros(40)) cate = pd.DataFrame(cate) cate.drop(0,inplace=True) # print(cate);print(cate.head()) cate.to_csv(r'../processed/category_num.csv', index=False, header=False, encoding='utf_8_sig',mode='a') del user_numAppList gc.collect() ###Output _____no_output_____ ###Markdown 特征生成部分 ###Code import numpy as np import pandas as pd from sklearn import preprocessing category_num = pd.read_csv(r'../processed/category_num.csv') print(category_num.shape) # print(category_num.head()) col = pd.DataFrame(category_num.sum()) col.shape category_num = pd.DataFrame(category_num.values[:,col[0]>1000]) category_num.shape user_app_actived = pd.read_csv(r'../data/user_app_actived.csv',header=None) print(user_app_actived.shape) print(user_app_actived.head()) category_num = preprocessing.StandardScaler().fit_transform(category_num.values) uId = user_app_actived[0].values uId = uId.reshape(len(uId),1) category_num.shape category_num = np.column_stack((uId,category_num)) category_num = pd.DataFrame(category_num) print(category_num.shape) print(category_num.head()) age_train = pd.read_csv(r'../data/age_train.csv',header=None,usecols=[0]) print(age_train.shape) age_test = pd.read_csv(r'../data/age_test.csv',header=None) print(age_test.shape) age_train = pd.merge(age_train,category_num,how='inner',on=0) # print(age_train.head(10)) age_test = pd.merge(age_test,category_num,how='inner',on=0) # print(age_test.head(10)) age_train.drop(labels=[0],axis=1,inplace=True) age_test.drop(labels=[0],axis=1,inplace=True) from scipy import sparse age_train = sparse.csr_matrix(age_train,dtype=np.float32) age_test = sparse.csr_matrix(age_test,dtype=np.float32) print(age_train.shape) print(age_test.shape) sparse.save_npz(r'../trainTestData/trainData30.npz',age_train) sparse.save_npz(r'../trainTestData/testData30.npz',age_test) category_num.to_csv(r'../processed/category_num.csv',header=None,index=False) ###Output _____no_output_____
bindings/python/tutorials/CNTK_202_Language_Understanding.ipynb
###Markdown Hands-On Lab: Language Understanding with Recurrent NetworksThis hands-on lab shows how to implement a recurrent network to process text,for the Air Travel Information Services (ATIS) tasks of slot tagging and intent classification.We will start with a straight-forward embedding followed by a recurrent LSTM.We will then extend it to include neighbor words and run bidirectionally.Lastly, we will turn this system into an intent classifier. The techniques you will practice include:* model description by composing layer blocks instead of writing formulas* creating your own layer block* variables with different sequence lengths in the same network* parallel trainingWe assume that you are familiar with basics of deep learning, and these specific concepts:* recurrent networks ([Wikipedia page](https://en.wikipedia.org/wiki/Recurrent_neural_network))* text embedding ([Wikipedia page](https://en.wikipedia.org/wiki/Word_embedding)) PrerequisitesWe assume that you have already [installed CNTK](https://www.cntk.ai/pythondocs/setup.html).This tutorial requires CNTK V2. We strongly recommend to run this tutorial on a machine with a capable CUDA-compatible GPU. Deep learning without GPUs is not fun.Finally you need to download the training and test set. The following piece of code does that for you. If you get an error, please follow the manual instructions below it.We also list the imports we will need for this tutorial ###Code import os import math from cntk.blocks import * # non-layer like building blocks such as LSTM() from cntk.layers import * # layer-like stuff such as Linear() from cntk.models import * # higher abstraction level, e.g. entire standard models and also operators like Sequential() from cntk.utils import * from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, INFINITELY_REPEAT, FULL_DATA_SWEEP from cntk import Trainer from cntk.ops import cross_entropy_with_softmax, classification_error, splice from cntk.learner import adam_sgd, learning_rate_schedule, momentum_schedule from cntk.persist import load_model, save_model from _cntk_py import set_fixed_random_seed set_fixed_random_seed(1) # to become invariant to initialization order try: from tqdm import tqdm except: tqdm = lambda x: x import requests def download(data): url = "https://github.com/Microsoft/CNTK/blob/master/Examples/Tutorials/SLUHandsOn/atis.%s.ctf?raw=true" response = requests.get(url%data, stream=True) with open("atis.%s.ctf"%data, "wb") as handle: for data in tqdm(response.iter_content()): handle.write(data) for t in "train","test": try: f=open("atis.%s.ctf"%t) f.close() except: download(t) ###Output _____no_output_____ ###Markdown Fallback manual instructionsPlease download the ATIS [training](https://github.com/Microsoft/CNTK/blob/master/Tutorials/SLUHandsOn/atis.train.ctf) and [test](https://github.com/Microsoft/CNTK/blob/master/Tutorials/SLUHandsOn/atis.test.ctf) files and put them at the same folder as this notebook. ###Code # load dictionaries query_wl = [line.rstrip('\n') for line in open('query.wl')] slots_wl = [line.rstrip('\n') for line in open('slots.wl')] query_dict = {query_wl[i]:i for i in range(len(query_wl))} slots_dict = {slots_wl[i]:i for i in range(len(slots_wl))} ###Output _____no_output_____ ###Markdown Task and Model StructureThe task we want to approach in this tutorial is slot tagging.We use the [ATIS corpus](https://catalog.ldc.upenn.edu/LDC95S26).ATIS contains human-computer queries from the domain of Air Travel Information Services,and our task will be to annotate (tag) each word of a query whether it belongs to aspecific item of information (slot), and which one.The data in your working folder has already been converted into the "CNTK Text Format."Let's look at an example from the test-set file `atis.test.ctf`: 19 |S0 178:1 | BOS |S1 14:1 | flight |S2 128:1 | O 19 |S0 770:1 | show |S2 128:1 | O 19 |S0 429:1 | flights |S2 128:1 | O 19 |S0 444:1 | from |S2 128:1 | O 19 |S0 272:1 | burbank |S2 48:1 | B-fromloc.city_name 19 |S0 851:1 | to |S2 128:1 | O 19 |S0 789:1 | st. |S2 78:1 | B-toloc.city_name 19 |S0 564:1 | louis |S2 125:1 | I-toloc.city_name 19 |S0 654:1 | on |S2 128:1 | O 19 |S0 601:1 | monday |S2 26:1 | B-depart_date.day_name 19 |S0 179:1 | EOS |S2 128:1 | OThis file has 7 columns:* a sequence id (19). There are 11 entries with this sequence id. This means that sequence 19 consistsof 11 tokens;* column `S0`, which contains numeric word indices;* a comment column denoted by ``, to allow a human reader to know what the numeric word index stands for;Comment columns are ignored by the system. `BOS` and `EOS` are special wordsto denote beginning and end of sentence, respectively;* column `S1` is an intent label, which we will only use in the last part of the tutorial;* another comment column that shows the human-readable label of the numeric intent index;* column `S2` is the slot label, represented as a numeric index; and* another comment column that shows the human-readable label of the numeric label index.The task of the neural network is to look at the query (column `S0`) and predict theslot label (column `S2`).As you can see, each word in the input gets assigned either an empty label `O`or a slot label that begins with `B-` for the first word, and with `I-` for anyadditional consecutive word that belongs to the same slot.The model we will use is a recurrent model consisting of an embedding layer,a recurrent LSTM cell, and a dense layer to compute the posterior probabilities: slot label "O" "O" "O" "O" "B-fromloc.city_name" ^ ^ ^ ^ ^ | | | | | +-------+ +-------+ +-------+ +-------+ +-------+ | Dense | | Dense | | Dense | | Dense | | Dense | ... +-------+ +-------+ +-------+ +-------+ +-------+ ^ ^ ^ ^ ^ | | | | | +------+ +------+ +------+ +------+ +------+ 0 -->| LSTM |-->| LSTM |-->| LSTM |-->| LSTM |-->| LSTM |-->... +------+ +------+ +------+ +------+ +------+ ^ ^ ^ ^ ^ | | | | | +-------+ +-------+ +-------+ +-------+ +-------+ | Embed | | Embed | | Embed | | Embed | | Embed | ... +-------+ +-------+ +-------+ +-------+ +-------+ ^ ^ ^ ^ ^ | | | | | w ------>+--------->+--------->+--------->+--------->+------... BOS "show" "flights" "from" "burbank"Or, as a CNTK network description. Please have a quick look and match it with the description above:(descriptions of these functions can be found at: [the layers reference](http://cntk.ai/pythondocs/layerref.html) ###Code vocab_size = 943 ; num_labels = 129 ; num_intents = 26 # number of words in vocab, slot labels, and intent labels model_dir = "./Models" data_dir = "." # model dimensions input_dim = vocab_size label_dim = num_labels emb_dim = 150 hidden_dim = 300 def create_model(): with default_options(initial_state=0.1): return Sequential([ Embedding(emb_dim), Recurrence(LSTM(hidden_dim), go_backwards=False), Dense(num_labels) ]) # peek model = create_model() print(len(model.layers)) print(model.layers[0].E.shape) print(model.layers[2].b.value) ###Output _____no_output_____ ###Markdown CNTK ConfigurationTo train and test a model in CNTK, we need to create a model and specify how to read data and perform training and testing. In order to train we need to specify:* how to read the data * the model function and its inputs and outputs* hyper-parameters for the learner[comment]: (For testing ...) A Brief Look at Data and Data ReadingWe already looked at the data.But how do you generate this format?For reading text, this tutorial uses the `CNTKTextFormatReader`. It expects the input data to beof a specific format, which is described [here](https://github.com/Microsoft/CNTK/wiki/CNTKTextFormat-Reader).For this tutorial, we created the corpora by two steps:* convert the raw data into a plain text file that contains of TAB-separated columns of space-separated text. For example: ``` BOS show flights from burbank to st. louis on monday EOS (TAB) flight (TAB) O O O O B-fromloc.city_name O B-toloc.city_name I-toloc.city_name O B-depart_date.day_name O ``` This is meant to be compatible with the output of the `paste` command.* convert it to CNTK Text Format (CTF) with the following command: ``` python Scripts/txt2ctf.py --map query.wl intent.wl slots.wl --annotated True --input atis.test.txt --output atis.test.ctf ``` where the three `.wl` files give the vocabulary as plain text files, one line per word.In these CTF files, our columns are labeled `S0`, `S1`, and `S2`.These are connected to the actual network inputs by the corresponding lines in the reader definition: ###Code def create_reader(path, is_training): return MinibatchSource(CTFDeserializer(path, StreamDefs( query = StreamDef(field='S0', shape=vocab_size, is_sparse=True), intent_unused = StreamDef(field='S1', shape=num_intents, is_sparse=True), slot_labels = StreamDef(field='S2', shape=num_labels, is_sparse=True) )), randomize=is_training, epoch_size = INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP) # peek reader = create_reader(data_dir + "/atis.train.ctf", is_training=True) reader.streams ###Output _____no_output_____ ###Markdown TrainerWe also must define the training criterion (loss function), and also an error metric to track. ###Code def create_criterion_function(model): labels = Placeholder() ce = cross_entropy_with_softmax(model, labels) errs = classification_error (model, labels) return combine ([ce, errs]) # (features, labels) -> (loss, metric) def train(reader, model, max_epochs=16): # criterion: (model args, labels) -> (loss, metric) # here (query, slot_labels) -> (ce, errs) criterion = create_criterion_function(model) # declare argument types #criterion.set_signature(vocab_size, num_labels) criterion.replace_placeholders({criterion.placeholders[0]: Input(vocab_size), criterion.placeholders[1]: Input(num_labels)}) # training config epoch_size = 18000 minibatch_size = 70 # learner momentum_as_time_constant = minibatch_size / -math.log(0.9) # TODO: Change to round number. This is 664.39. 700? lr_per_sample = [0.003]*4+[0.0015]*24+[0.0003] # LR schedule over epochs (we don't run that mayn epochs, but if we did, these are good values) lr_schedule = learning_rate_schedule(lr_per_sample, units=epoch_size) learner = adam_sgd(criterion.parameters, lr_per_sample=lr_schedule, momentum_time_constant=momentum_as_time_constant, low_memory=True, gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True) # trainer trainer = Trainer(model, criterion.outputs[0], criterion.outputs[1], learner) # process minibatches and perform model training log_number_of_parameters(model) #progress_printer = ProgressPrinter(freq=100, first=10, tag='Training') # more detailed logging progress_printer = ProgressPrinter(tag='Training') t = 0 for epoch in range(max_epochs): # loop over epochs epoch_end = (epoch+1) * epoch_size while t < epoch_end: # loop over minibatches on the epoch data = reader.next_minibatch(minibatch_size, input_map={ # fetch minibatch criterion.arguments[0]: reader.streams.query, criterion.arguments[1]: reader.streams.slot_labels }) trainer.train_minibatch(data) # update model with it t += data[criterion.arguments[1]].num_samples # count samples processed so far progress_printer.update_with_trainer(trainer, with_metric=True) # log progress loss, metric, actual_samples = progress_printer.epoch_summary(with_metric=True) return loss, metric ###Output _____no_output_____ ###Markdown Running itYou can find the complete recipe below. ###Code def do_train(): global model model = create_model() reader = create_reader(data_dir + "/atis.train.ctf", is_training=True) train(reader, model) do_train() ###Output _____no_output_____ ###Markdown This shows how learning proceeds over epochs (passes through the data).For example, after four epochs, the loss, which is the cross-entropy criterion, has reached 0.22 as measured on the ~18000 samples of this epoch,and that the error rate is 5.0% on those same 18000 training samples.The epoch size is the number of samples--counted as *word tokens*, not sentences--toprocess between model checkpoints.Once the training has completed (a little less than 2 minutes on a Titan-X or a Surface Book),you will see an output like this```(0.06193035719939996, 0.014038397514149373)```which is a tuple containing the loss (cross entropy) and the metric (classification error) averaged over the final epoch.On a CPU-only machine, it can be 4 or more times slower. Evaluating the modelLike the train() function, we also define a function to measure accuracy on a test set. ###Code def evaluate(reader, model): criterion = create_criterion_function(model) #criterion.set_signature(None, Input(num_labels)) criterion.replace_placeholders({criterion.placeholders[0]: Input(num_labels)}) # process minibatches and perform evaluation dummy_learner = adam_sgd(criterion.parameters, lr_per_sample=1, momentum_time_constant=0, low_memory=True) evaluator = Trainer(model, criterion.outputs[0], criterion.outputs[1], dummy_learner) progress_printer = ProgressPrinter(tag='Evaluation') while True: minibatch_size = 1000 data = reader.next_minibatch(minibatch_size, input_map={ # fetch minibatch criterion.arguments[0]: reader.streams.query, criterion.arguments[1]: reader.streams.slot_labels }) #data = reader.next_minibatch(minibatch_size) # fetch minibatch if not data: # until we hit the end break metric = evaluator.test_minibatch(data) progress_printer.update(0, data[criterion.arguments[1]].num_samples, metric) # log progress loss, metric, actual_samples = progress_printer.epoch_summary(with_metric=True) return loss, metric ###Output _____no_output_____ ###Markdown Now we can measure the model accuracy. ###Code def do_test(): reader = create_reader(data_dir + "/atis.test.ctf", is_training=False) evaluate(reader, model) do_test() model.layers[2].b.value # let's run a sequence through w = [query_dict[w] for w in 'BOS flights from new york to seattle EOS'.split()] # convert to word indices print(w) onehot = np.zeros([len(w),len(query_dict)], np.float32) for t in range(len(w)): onehot[t,w[t]] = 1 pred = model.eval({model.arguments[0]:onehot}) print(pred.shape) best = np.argmax(pred,axis=2) print(best[0]) [slots_wl[s] for s in best[0]] ###Output _____no_output_____ ###Markdown Modifying the ModelIn the following, you will be given tasks to practice modifying CNTK configurations.The solutions are given at the end of this document... but please try without! A Word About [`Sequential()`](https://www.cntk.ai/pythondocs/layerref.htmlsequential)Before jumping to the tasks, let's have a look again at the model we just ran.The model is described in what we call *function-composition style*.```python Sequential([ Embedding(emb_dim), Recurrence(LSTM(hidden_dim), go_backwards=False), Dense(num_labels) ])```You may be familiar with the "sequential" notation from other neural-network toolkits.If not, [`Sequential()`](https://www.cntk.ai/pythondocs/layerref.htmlsequential) is a powerful operation that,in a nutshell, allows to compactly express a very common situation in neural networkswhere an input is processed by propagating it through a progression of layers.`Sequential()` takes an list of functions as its argument,and returns a *new* function that invokes these functions in order,each time passing the output of one to the next.For example,```python FGH = Sequential ([F,G,H]) y = FGH (x)```means the same as``` y = H(G(F(x))) ```This is known as ["function composition"](https://en.wikipedia.org/wiki/Function_composition),and is especially convenient for expressing neural networks, which often have this form: +-------+ +-------+ +-------+ x -->| F |-->| G |-->| H |--> y +-------+ +-------+ +-------+Coming back to our model at hand, the `Sequential` expression simplysays that our model has this form: +-----------+ +----------------+ +------------+ x -->| Embedding |-->| Recurrent LSTM |-->| DenseLayer |--> y +-----------+ +----------------+ +------------+ Task 1: Add Batch NormalizationWe now want to add new layers to the model, specifically batch normalization.Batch normalization is a popular technique for speeding up convergence.It is often used for image-processing setups, for example our other [hands-on lab on imagerecognition](./Hands-On-Labs-Image-Recognition).But could it work for recurrent models, too? So your task will be to insert batch-normalization layers before and after the recurrent LSTM layer.If you have completed the [hands-on labs on image processing](https://github.com/Microsoft/CNTK/blob/master/bindings/python/tutorials/CNTK_201B_CIFAR-10_ImageHandsOn.ipynb),you may remember that the [batch-normalization layer](https://www.cntk.ai/pythondocs/layerref.htmlbatchnormalization-layernormalization-stabilizer) has this form:``` BatchNormalization()```So please go ahead and modify the configuration and see what happens.If everything went right, you will notice improved convergence speed (`loss` and `metric`)compared to the previous configuration. ###Code # TODO: Add batch normalization def create_model(): with default_options(initial_state=0.1): return Sequential([ Embedding(emb_dim), Recurrence(LSTM(hidden_dim), go_backwards=False), Dense(num_labels) ]) do_train() do_test() ###Output _____no_output_____ ###Markdown Task 2: Add a Lookahead Our recurrent model suffers from a structural deficit:Since the recurrence runs from left to right, the decision for a slot labelhas no information about upcoming words. The model is a bit lopsided.Your task will be to modify the model such thatthe input to the recurrence consists not only of the current word, but also of the next one(lookahead).Your solution should be in function-composition style.Hence, you will need to write a Python function that does the following:* takes no input arguments* creates a placeholder sequence variable* computes the "next value" in this sequence using the `Delay()` layer (use this specific form: `Delay(T=-1)`); and* concatenate the current and the next value into a vector of twice the embedding dimension using `splice()`and then insert this function into `Sequential()`'s list between the embedding and the recurrent layer. ###Code # TODO: Add lookahead def create_model(): with default_options(initial_state=0.1): return Sequential([ Embedding(emb_dim), BatchNormalization(), Recurrence(LSTM(hidden_dim), go_backwards=False), BatchNormalization(), Dense(num_labels) ]) do_train() do_test() ###Output _____no_output_____ ###Markdown Task 3: Bidirectional Recurrent ModelAha, knowledge of future words help. So instead of a one-word lookahead,why not look ahead until all the way to the end of the sentence, through a backward recurrence?Let us create a bidirectional model!Your task is to implement a new layer thatperforms both a forward and a backward recursion over the data, andconcatenates the output vectors.Note, however, that this differs from the previous task in thatthe bidirectional layer contains learnable model parameters.In function-composition style,the pattern to implement a layer with model parameters is to write a *factory function*that creates a *function object*.A function object, also known as *functor*, is an object that is both a function and an object.Which means nothing else that it contains data yet still can be invoked as if it was a function.For example, `Dense(outDim)` is a factory function that returns a function object that containsa weight matrix `W`, a bias `b`, and another function to compute `input @ W + b`.E.g. saying `Dense(1024)` will create this function object, which can then be usedlike any other function, also immediately: `Dense(1024)(x)`. Confused? Let's take an example: Let us implement a new layer that combinesa linear layer with a subsequent batch normalization. To allow function composition, the layer needs to be realized as a factory function,which could look like this:```pythondef DenseLayerWithBN(dim): F = Dense(dim) G = BatchNormalization() x = Placeholder() apply_x = G(F(x)) return apply_x```Invoking this factory function will create `F`, `G`, `x`, and `apply_x`. In this example, `F` and `G` are function objects themselves, and `apply_x` is the function to be applied to the data.Thus, e.g. calling `DenseLayerWithBN(1024)` willcreate an object containing a linear-layer function object called `F`, a batch-normalization function object `G`,and `apply_x` which is the function that implements the actual operation of this layerusing `F` and `G`. It will then return `apply_x`. To the outside, `apply_x` looks and behaveslike a function. Under the hood, however, `apply_x` retains access to its specific instances of `F` and `G`.Now back to our task at hand. You will now need to create a factory function,very much like the example above.You shall create a factory functionthat creates two recurrent layer instances (one forward, one backward), and then defines an `apply_x` functionwhich applies both layer instances to the same `x` and concatenate the two results.Allright, give it a try! To know how to realize a backward recursion in CNTK,please take a hint from how the forward recursion is done.Please also do the following:* remove the one-word lookahead you added in the previous task, which we aim to replace; and* change the `hidden_dim` parameter from 300 to 150, to keep the total number of model parameters limited. ###Code # TODO: Add bidirectional recurrence def create_model(): with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day return Sequential([ Embedding(emb_dim), BatchNormalization(), Recurrence(LSTM(hidden_dim), go_backwards=False), BatchNormalization(), Dense(num_labels) ]) do_train() do_test() ###Output _____no_output_____ ###Markdown Works like a charm! This model achieves 1.83%, a tiny bit better than the lookahead model above.The bidirectional model has 40% less parameters than the lookahead one. However, if you go back and look closelyat the complete log output (not shown on this web page), you may find that the lookahead one trainedabout 30% faster.This is because the lookahead model has both less horizontal dependencies (one instead of tworecurrences) and larger matrix products, and can thus achieve higher parallelism. Solution 1: Adding Batch Normalization ###Code def create_model(): with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day return Sequential([ Embedding(emb_dim), BatchNormalization(), Recurrence(LSTM(hidden_dim), go_backwards=False), BatchNormalization(), Dense(num_labels) ]) reader = create_reader(data_dir + "/atis.train.ctf", is_training=True) model = create_model() train(reader, model, max_epochs=8) ###Output _____no_output_____ ###Markdown Solution 2: Add a Lookahead ###Code def OneWordLookahead(): x = Placeholder() apply_x = splice ([x, future_value(x)]) return apply_x def create_model(): with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day return Sequential([ Embedding(emb_dim), OneWordLookahead(), BatchNormalization(), Recurrence(LSTM(hidden_dim), go_backwards=False), BatchNormalization(), Dense(num_labels) ]) reader = create_reader(data_dir + "/atis.train.ctf", is_training=True) model = create_model() train(reader, model, max_epochs=1) ###Output _____no_output_____ ###Markdown Solution 3: Bidirectional Recurrent Model ###Code def BiRecurrence(fwd, bwd): F = Recurrence(fwd) G = Recurrence(bwd, go_backwards=True) x = Placeholder() apply_x = splice ([F(x), G(x)]) return apply_x def create_model(): with default_options(initial_state=0.1): # inject an option to mimic the BrainScript version identically; remove some day return Sequential([ Embedding(emb_dim), BatchNormalization(), BiRecurrence(LSTM(hidden_dim), LSTM(hidden_dim)), BatchNormalization(), Dense(num_labels) ]) reader = create_reader(data_dir + "/atis.train.ctf", is_training=True) model = create_model() train(reader, model, max_epochs=8) ###Output _____no_output_____
analysis/.ipynb_checkpoints/Milestone2Task3-5-checkpoint.ipynb
###Markdown Task3P1 Step 1: ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns """ If blue team wins(1), then red team win will be 0. For such paired columns, we can combine them into one column; the value can be "blue"/"red"/"noteam" If there is a tie (game duration will for sure smaller than 300s), the row is an outlier and will be dropped. KDA=(kill+assist)/death; It is usually for a single player, but in this project it will be calculated and used from a team's aspect. KDA is useful to combine and repersent the information from kill, assist and death. But when death=0, KDA should not be calculated(divided by 0). To handle such situation, we can simplely drop such rows because death=0 happens when one team is overpoweful or someone disconnected during the game. Such situation can be considered as outliers and should be dropped. Same thing can happend if game is too short (<16min (960s)) """ """ This chain will first read the dataset, rename the game duration column, and drop null values. After that, it will created two columns to save KDA for both teams using blue's/red's kill, assist, and death columns. Next, it will drop overliers and combine the info for paired columns. Finally, it will drop useless columns(including columns of no interest). """ lol1 = (pd.read_csv('../data/raw/Master_Ranked_Games.csv') .rename(columns={'gameDuraton':'duration'}) .dropna() ) lol2 = (lol1.assign(blueKDA=lambda x: (x['blueKills']+x['blueAssist'])/x['blueDeath']) .assign(redKDA=lambda x: (x['redKills']+x['redAssist'])/x['redDeath']) .drop(lol1[(lol1['blueDeath']==0)|(lol1['redDeath']==0)|(lol1['blueKills']==0)|(lol1['redKills']==0)|(lol1['duration']<960)].index) #drop outliers .assign(teamFirstBlood = lambda x: np.where(x.blueFirstBlood==1,'blue','red')) .assign(teamFirstTower = lambda x: np.where(x.blueFirstTower==1,'blue',(np.where(x.redFirstTower==1,'red','noTeam')))) .assign(teamFirstBaron = lambda x: np.where(x.blueFirstBaron==1,'blue',(np.where(x.redFirstBaron==1,'red','noTeam')))) .assign(teamFirstDragon = lambda x: np.where(x.blueFirstDragon==1,'blue',(np.where(x.redFirstDragon==1,'red','noTeam')))) .assign(teamFirstInhibitor = lambda x: np.where(x.blueFirstInhibitor==1,'blue',(np.where(x.redFirstInhibitor==1,'red','noTeam')))) .assign(teamWins = lambda x: np.where(x.blueWins==1,'blue','red')) .drop(columns={'gameId','blueWins', 'blueFirstBlood', 'blueFirstTower', 'blueFirstBaron', 'blueFirstDragon', 'blueFirstInhibitor', 'redWins', 'redFirstBlood', 'redFirstTower', 'redFirstBaron', 'redFirstDragon', 'redFirstInhibitor', 'blueJungleMinionKills', 'redJungleMinionKills','blueKills', 'blueAssist', 'blueDeath', 'redKills', 'redAssist', 'redDeath', 'blueTotalLevel', 'redTotalLevel'}) #drop now useless or no interesting columns .reset_index() .drop(columns='index') ) lol2 ###Output _____no_output_____ ###Markdown Step 2: ###Code #If blue team wins(1), then red team win will be 0. For such paired columns, we can combine them into one column(eg. teamWins). #0 for red team, 1 for blue team """ If blue team wins(1), then red team win will be 0. For such paired columns, we can combine them into one column; the value can be "blue"/"red"/"noteam" If there is a tie (game duration will for sure smaller than 300s), the row is an outlier and will be dropped. KDA=(kill+assist)/death; It is usually for a single player, but in this project it will be calculated and used from a team's aspect. KDA is useful to combine and repersent the information from kill, assist and death. But when death=0, KDA should not be calculated(divided by 0). To handle such situation, we can simplely drop such rows because death=0 happens when one team is overpoweful or someone disconnected during the game. Such situation can be considered as outliers and should be dropped. Same thing can happend if game is too short (<16min (960s)) """ def load_and_process(url_or_path_to_csv_file): # Method Chain 1 (Load data and deal with missing data) lol1 = (pd.read_csv('../data/raw/Master_Ranked_Games.csv') .rename(columns={'gameDuraton':'duration'}) .dropna() ) # Method Chain 2 (Create new columns, drop others, and do processing) lol2 = (lol1.assign(blueKDA=lambda x: (x['blueKills']+x['blueAssist'])/x['blueDeath']) .assign(redKDA=lambda x: (x['redKills']+x['redAssist'])/x['redDeath']) .drop(lol1[(lol1['blueDeath']==0)|(lol1['redDeath']==0)|(lol1['blueKills']==0)|(lol1['redKills']==0)|(lol1['duration']<960)].index) #drop outliers .assign(teamFirstBlood = lambda x: np.where(x.blueFirstBlood==1,'blue','red')) .assign(teamFirstTower = lambda x: np.where(x.blueFirstTower==1,'blue',(np.where(x.redFirstTower==1,'red','noTeam')))) .assign(teamFirstBaron = lambda x: np.where(x.blueFirstBaron==1,'blue',(np.where(x.redFirstBaron==1,'red','noTeam')))) .assign(teamFirstDragon = lambda x: np.where(x.blueFirstDragon==1,'blue',(np.where(x.redFirstDragon==1,'red','noTeam')))) .assign(teamFirstInhibitor = lambda x: np.where(x.blueFirstInhibitor==1,'blue',(np.where(x.redFirstInhibitor==1,'red','noTeam')))) .assign(teamWins = lambda x: np.where(x.blueWins==1,'blue','red')) .drop(columns={'gameId','blueWins', 'blueFirstBlood', 'blueFirstTower', 'blueFirstBaron', 'blueFirstDragon', 'blueFirstInhibitor', 'redWins', 'redFirstBlood', 'redFirstTower', 'redFirstBaron', 'redFirstDragon', 'redFirstInhibitor', 'blueJungleMinionKills', 'redJungleMinionKills','blueKills', 'blueAssist', 'blueDeath', 'redKills', 'redAssist', 'redDeath', 'blueTotalLevel', 'redTotalLevel'}) #drop now useless or no interesting columns .reset_index() .drop(columns='index') ) return lol2 load_and_process('../data/raw/Master_Ranked_Games.csv') ###Output _____no_output_____ ###Markdown Task3P2_Hexuan ###Code from scripts import project_functions df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv') df ###Output _____no_output_____ ###Markdown M2 Task4(EDA): ###Code # import pandas as pd # import numpy as np # import matplotlib.pyplot as plt # import seaborn as sns # from scripts import project_functions # df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv') df.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 90498 entries, 0 to 90497 Data columns (total 35 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 duration 90498 non-null int64 1 blueDragonKills 90498 non-null int64 2 blueBaronKills 90498 non-null int64 3 blueTowerKills 90498 non-null int64 4 blueInhibitorKills 90498 non-null int64 5 blueWardPlaced 90498 non-null int64 6 blueWardkills 90498 non-null int64 7 blueChampionDamageDealt 90498 non-null int64 8 blueTotalGold 90498 non-null int64 9 blueTotalMinionKills 90498 non-null int64 10 blueAvgLevel 90498 non-null float64 11 blueKillingSpree 90498 non-null int64 12 blueTotalHeal 90498 non-null int64 13 blueObjectDamageDealt 90498 non-null int64 14 redDragonKills 90498 non-null int64 15 redBaronKills 90498 non-null int64 16 redTowerKills 90498 non-null int64 17 redInhibitorKills 90498 non-null int64 18 redWardPlaced 90498 non-null int64 19 redWardkills 90498 non-null int64 20 redChampionDamageDealt 90498 non-null int64 21 redTotalGold 90498 non-null int64 22 redTotalMinionKills 90498 non-null int64 23 redAvgLevel 90498 non-null float64 24 redKillingSpree 90498 non-null int64 25 redTotalHeal 90498 non-null int64 26 redObjectDamageDealt 90498 non-null int64 27 blueKDA 90498 non-null float64 28 redKDA 90498 non-null float64 29 teamFirstBlood 90498 non-null object 30 teamFirstTower 90498 non-null object 31 teamFirstBaron 90498 non-null object 32 teamFirstDragon 90498 non-null object 33 teamFirstInhibitor 90498 non-null object 34 teamWins 90498 non-null object dtypes: float64(4), int64(25), object(6) memory usage: 24.2+ MB ###Markdown From the info, we can tell that all columns are numeric values.code: "df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv')" has already drop null, drop useless columns(including columns of no interest), and created two new columns for KDA, which is a better repersentation(only need 1 column) of kills,assists, and deaths. ###Code df.columns ###Output _____no_output_____ ###Markdown Columns explainationduration: length of the game in secondsblueDragonKills: of drangons killed by blue teamblueBaronKills: of Barons killed by blue teamblueTowerKills: of Towers killed by blue teamblueInhibitorKills: of Inhibitors killed by blue teamblueWardPlaced: of wards placed by blue teamblueWardkills: of wards killed by blue teamblueChampionDamageDealt: Amount of damage caused by blue team to the opponent's team's championsblueTotalGold: Amount of gold gained by blue teamblueTotalMinionKills: of minions killed by the blue teamblueAvgLevel: Average level of the blue team when game endsblueKillingSpree: of times blue team get a killingSpree recordblueTotalHeal: Amount of heal caused by blue teamblueObjectDamageDealt: Amount of damage caused by blue team to objectsblueKDA: blue team's (kill+assist)/death ratio(above are the same to red version)teamFirstBlood: team that kills a champion first in the gameteamFirstTower: team that kills a tower first in the gameteamFirstBaron: team that kills a Baron first in the gameteamFirstDragon: team that kills a dragon first in the gameteamFirstInhibitor: team that kills an Inhibitor first in the gameteamWins: team that wins the game ###Code df.head() df.describe().T ###Output _____no_output_____ ###Markdown We double make sure that there is no outlier now, and have a better view of the dataset. ###Code plt.figure(figsize=(10,6)) sns.countplot(y=df['teamWins']).set_title('Count of team wins for blue and red side') plt.ylabel('team wins') ###Output _____no_output_____ ###Markdown Without other information, there is no much difference between the number of wins for two team. We can tell that game is fair for both side. ###Code hisPlot1 = sns.displot(df, x='duration', aspect=2, palette='pastel') hisPlot1 ###Output _____no_output_____ ###Markdown If a game's duration is more than about 1200s, the distribution looks similar to a normal distribution, but need further prove. But many games also end early(some of them are dropped as overliers, but we can still tell it from this plot). ###Code sns.displot(df, x="duration", hue="teamWins", multiple="dodge", aspect=3) ###Output _____no_output_____ ###Markdown For any duration of the game, both teams seem to have about the same number of winnings. ###Code sns.countplot(y=df['teamFirstBaron']).set_title('Count of team that takes the first Baron') plt.ylabel('team kills first Baron') ###Output _____no_output_____ ###Markdown Many games end without a Baron get killed. We need to keep this in mind when we are doing further analysis. ###Code ## M2 Task5: ###Output _____no_output_____ ###Markdown From EDA, we can tell that the game is fair; both sides have about the same chance to win the game at start. RQ1: Does the team takes the first blood has more chance to win? RQ2: Does the team takes the first Baron has more chance to win?(Keep in mind that many games end without first Baron) RQ3: Does the team takes the first dragon has more chance to win? RQ4: What is relationship between team KDA and chance to win? RQ5: What is relationship between two team's total champion damage difference and chance to win? RQ6: What is relationship between two team's total heal difference and chance to win? ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scripts import project_functions df = project_functions.load_and_process('../data/raw/Master_Ranked_Games.csv') df.head() df1 = df.assign(temp1= lambda x: np.where(x.teamFirstBlood==x.teamWins,True,False)) sns.countplot(data=df1, y='temp1').set_title('Team with first blood wins') plt.ylabel('') ###Output _____no_output_____ ###Markdown Team with the first blood do have a clear more times to win the game. But the chance doesn't differ **very** much. ###Code df2 = df.assign(temp2= lambda x: np.where((x.teamFirstBaron!='noTeam')&(x.teamFirstBaron==x.teamWins), 'True',np.where((x.teamFirstBaron!='noTeam'),'False','noTeam'))) sns.countplot(data=df2, y='temp2').set_title('Team with first Baron wins') plt.ylabel('') ###Output _____no_output_____ ###Markdown According to the plot, if a team takes the first Baron, it is very likely to win the game. It may either because the 'better' team is easier to get first Baron or the first Baron gives that team very strong buff. But the relationship exists anyway. ###Code df3 = df.assign(temp3= lambda x: np.where((x.teamFirstDragon!='noTeam')&(x.teamFirstDragon==x.teamWins), 'True',np.where((x.teamFirstDragon!='noTeam'),'False','noTeam'))) sns.countplot(data=df3, y='temp3').set_title('Team with first dragon wins') plt.ylabel('') ###Output _____no_output_____ ###Markdown According to the plot, if a team takes the first dragon, it is also more likely to win the game, but the chance difference is smaller than the "first Baron's." This one's is about 5.2/3 where first Baron is about 4.4/0.9 and first blood is about 5.2/3.5. ###Code sns.displot(df, x="blueKDA", y="teamWins",aspect=3) sns.displot(df, x="redKDA", y="teamWins",aspect=3) ###Output _____no_output_____ ###Markdown According to the plot, as a team's KDA goes higher, that team is more likely to win. But, there are times that a team losses with high KDA(In plots, there are lines for the opposite team when the team KDA is high). Generally speaking, looking at two darkest area, those teams who win the game usually have a KDA larger than 2, ortherwise, teams are more likely to lose. ###Code #We are using blue's-red's df5 = df.assign(dmgD= df['blueChampionDamageDealt']-df['redChampionDamageDealt']) sns.displot(df5, x="dmgD", y="teamWins",aspect=3) ###Output _____no_output_____ ###Markdown According to the plot, the team who wins usually has higher damage than the other team(dark area). But there are also many cases that a team wins without higher damage. ###Code #We are using blue's-red's df6 = df.assign(healD= df['blueTotalHeal']-df['redTotalHeal']) sns.displot(df6, x="healD", y="teamWins",aspect=4) ###Output _____no_output_____
notebooks/part-i-data-exploration.ipynb
###Markdown Welcome to introduction to dashboards with Plotly and Dash------------------------------------------------------------------------------------------------------------------------------- Workshop facilitators: Laura Gutierrez Funderburk, Hanh Tong About this workshopIn this workshop we will explore some characteristics of the housing market in Canada. It is important to note that this workshop assumes:1. Data cleaning and exploration was completed prior to developing the dashboard2. Some comfort with `pandas` and visualization is assumed3. Comfort navigating the Jupyter environment is needed Workshop schedule:------------------------------------------------------------------------------------------------------------------------------- 1. Part I: Data explorationIn this section, we will first spend time getting familiar with the data. We will use the `pandas` and `plotly` libraries, we will also explore the `DEX` feature within Noteable to ease getting a good sense for what the data contains.In this section, we will also explore the notion of factoring code into functions, and the notion of writing a Python script that we can use to easily recreate our results. 2. Part II: Dashboard componentsIn this section, we will take what we built together in part I and explore the main components in a Dash dashboard. Part I: Data exploration ###Code import pandas as pd import plotly.express as px # Read data url = 'https://raw.githubusercontent.com/Vancouver-Datajam/dashboard-workshop-dash/main/data/delinquency_mortgage_population_2021_2020.csv' data_pop_del_mort_df = pd.read_csv(url, index_col=0) data_pop_del_mort_df.head(10) ###Output _____no_output_____ ###Markdown Exercise: Get familiar with the table-------------------------------------------------------------------------------------------------------------------------------Run the cell below. Questionsa) What are relevant variables in the data?b)What is the extent (range), mean and median of columns `DelinquencyRate`, `AverageMortgageAmount` and `PopulationSize`?c) What is the time range and frequency of the data? ###Code data_pop_del_mort_df.info() data_pop_del_mort_df.describe() ###Output _____no_output_____ ###Markdown Using Python and Plotly to generate interactive plots-------------------------------------------------------------------------------------------------------------------------------In this section we are going to write a few commands to get started with visualizations. ###Code # First attempt px.line(data_pop_del_mort_df, x = "Time", y="DelinquencyRate") ###Output _____no_output_____ ###Markdown The plot above is quite difficult to read. Let's colour the values by Geography, and add a title. ###Code # Second attempt px.line(data_pop_del_mort_df, x = "Time", y="DelinquencyRate", color="Geography", title = "Chart: line plot of Time and DelinquencyRate by Geography") ###Output _____no_output_____ ###Markdown Exercise: Let's take a look at the average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount. Change the code to visualize changes in population size. ###Code variable = px.line(data_pop_del_mort_df, x = "Time", y=variable, color="Geography", title = f"Chart: line plot of Time and {variable} by Geography") ###Output _____no_output_____ ###Markdown Let's take a look at their distribution by using a box plot. ###Code px.box(data_pop_del_mort_df, x = 'Geography', y = 'DelinquencyRate', color = 'Geography', title = 'Chart: box plot of Delinquency rate by Geoography.') ###Output _____no_output_____ ###Markdown Exercise: Let's take a look at distribution of average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount and population size. ###Code variable = px.box(data_pop_del_mort_df, x = 'Geography', y = variable, color = 'Geography', title = f'Chart: box plot of {variable} by Geoography.') ###Output _____no_output_____ ###Markdown Let's work on a scatter plot to see if there is a relationship between average mortgage amount and delinquency. ###Code px.scatter(data_frame=data_pop_del_mort_df, y = "AverageMortgageAmount", x = "DelinquencyRate", title="Average mortgage rate to delinquency rate") ###Output _____no_output_____ ###Markdown Exercise: modify the code above to colour the dots by Geography, add hover name with Time ###Code px.scatter(data_frame=data_pop_del_mort_df, y = "AverageMortgageAmount", x = "DelinquencyRate", title="Average mortgage rate to delinquency rate", color=, hover_name=) ###Output _____no_output_____ ###Markdown Using dictionaries to access different kind of functions-------------------------------------------------------------------------------------------------------------------------------We need to do quite a bit of work refactoring our code in preparation for our dashboard.We will use dictionaries to access different plotting functions.Recall, a dictionary is a data structure with `keys` and `values`. The syntax of a dictionary is as follows: dictionary = { key1 : value1, key2 : value2, key3 : value3} Where keys are typically a string, and values can be a data structure such as a string, list, set, tuple, or a function. ###Code sample_dictionary = {"list_numbers" : [1, 2, 3, 4, 5], "set_numbers": set([1, 2, 3, 4, 5]), "tuple_numbers": tuple([1, 2, 3, 4, 5]), "function_sum": sum} ###Output _____no_output_____ ###Markdown To access the values within a dictionary, we use the following notation dictionary[key] For example ###Code sample_dictionary['list_numbers'] sample_dictionary['set_numbers'] sample_dictionary['tuple_numbers'] sample_dictionary['function_sum'] ###Output _____no_output_____ ###Markdown To use the function `sum`, simply pass a list of numbers you want to add. ###Code sum([1,2,3]) ###Output _____no_output_____ ###Markdown We can obtain the same result with our dictionary as follows: ###Code sample_dictionary['function_sum']([1,2,3]) ###Output _____no_output_____ ###Markdown We can use the following dictionary to generate different kinds of plots. ###Code # Dictionary plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line} ###Output _____no_output_____ ###Markdown We can then use the dictionary to try different kinds of plots. ###Code plot_dict['scatter'](data_pop_del_mort_df, x = "Time", y="DelinquencyRate", color="Geography", title = "Chart: line plot of Time and DelinquencyRate by Geography") ###Output _____no_output_____ ###Markdown Exercise 1: change the key `scatter` for `line` , `box` and `violin` and run the cell Exercise 2: change the `x` variable to be one of `Geography` or `Time` Exercise 3: Change the `y` variable to be one of `PopulationSize`, `DelinquencyRate` or `AverageMortgageAmount` ###Code plot_dict['scatter'](data_pop_del_mort_df, x = "Time", y="DelinquencyRate", color="Geography", title = "Playing with several kinds of charts") ###Output _____no_output_____ ###Markdown Refactoring code into functions-------------------------------------------------------------------------------------------------------------------------------In the next section we will refactor our code to ease reproducibility and also to ensure our Dash app is cleaner. We can then put our function dictionary into a Python function. ###Code def graph_region(region_df, graph_type: str, dimension1: str, dimension2: str): """ Parameters ---------- region_df: (dataframe object) reshaped data frame object with mortage, delinquency and population data graph_type: (string) "box", "violin", "scatter", "line" dimension1: (str) one of 'Time' or 'Geography' dimension2: (str) one of 'AverageMortgageAmount', 'AverageMortgageAmount' or 'PopulationSize' Returns: -------- Plotly figure """ plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line} try: # Initialize function fig = plot_dict[graph_type](region_df, x=dimension1, y=dimension2, color = "Geography", hover_name = "Time") # Format figure title_string = f'Chart: {graph_type} plot of {dimension1} and {dimension2} by Geography' fig.update_layout(title = title_string) fig.update_xaxes(tickangle=-45) return fig except KeyError: print("Key not found. Make sure that 'graph_type' is in ['box','violin', 'scatter', 'line']") except ValueError: print("Dimension is not valid. dimension1 is one of 'Time' or 'Geography'") print("dimension2 is one of 'AverageMortgageAmount', 'DelinquencyRate', 'PopulationSize'") graph_region(data_pop_del_mort_df, 'line', "Time", "AverageMortgageAmount") graph_region(data_pop_del_mort_df, 'box', "Geography", "PopulationSize") graph_region(data_pop_del_mort_df, 'scatter', "AverageMortgageAmount", "DelinquencyRate") ###Output _____no_output_____ ###Markdown Bonus, incorporating time series plots ###Code # Optional to have regions fig = px.scatter(data_frame=data_pop_del_mort_df, y = "AverageMortgageAmount", x = "DelinquencyRate", size= "PopulationSize", color= "Geography", animation_frame="Time", animation_group="Geography", title = "Delinquency rate vs average mortgage over time" ) fig.update_layout(yaxis_range=[100000,500000]) fig.update_layout(xaxis_range=[0,1]) fig.show() ###Output _____no_output_____ ###Markdown Welcome to introduction to dashboards with Plotly and Dash------------------------------------------------------------------------------------------------------------------------------- Workshop facilitators: Laura Gutierrez Funderburk, Hanh Tong About this workshopIn this workshop we will explore some characteristics of the housing market in Canada. It is important to note that this workshop assumes:1. Data cleaning and exploration was completed prior to developing the dashboard2. Some comfort with `pandas` and visualization is assumed3. Comfort navigating the Jupyter environment is needed Workshop schedule:------------------------------------------------------------------------------------------------------------------------------- 1. Part I: Data explorationIn this section, we will first spend time getting familiar with the data. We will use the `pandas` and `plotly` libraries, we will also explore the `DEX` feature within Noteable to ease getting a good sense for what the data contains.In this section, we will also explore the notion of factoring code into functions, and the notion of writing a Python script that we can use to easily recreate our results. 2. Part II: Dashboard componentsIn this section, we will take what we built together in part I and explore the main components in a Dash dashboard. Part I: Data exploration ###Code import pandas as pd import plotly.express as px # Read data url = 'https://raw.githubusercontent.com/Vancouver-Datajam/dashboard-workshop-dash/main/data/delinquency_mortgage_population_2021_2020.csv' data_pop_del_mort_df = pd.read_csv(url, index_col=0) data_pop_del_mort_df.head(10) ###Output _____no_output_____ ###Markdown Exercise: Get familiar with the table-------------------------------------------------------------------------------------------------------------------------------Run the cell below. Questionsa) What are relevant variables in the data?b)What is the extent (range), mean and median of columns `DelinquencyRate`, `AverageMortgageAmount` and `PopulationSize`?c) What is the time range and frequency of the data? ###Code data_pop_del_mort_df.info() data_pop_del_mort_df.describe() ###Output _____no_output_____ ###Markdown Using Python and Plotly to generate interactive plots-------------------------------------------------------------------------------------------------------------------------------In this section we are going to write a few commands to get started with visualizations. ###Code # First attempt px.line(data_pop_del_mort_df, x = "Time", y="DelinquencyRate") ###Output _____no_output_____ ###Markdown The plot above is quite difficult to read. Let's colour the values by Geography, and add a title. ###Code # Second attempt px.line(data_pop_del_mort_df, x = "Time", y="DelinquencyRate", color="Geography", title = "Chart: line plot of Time and DelinquencyRate by Geography") ###Output _____no_output_____ ###Markdown Exercise: Let's take a look at the average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount. Change the code to visualize changes in population size. ###Code variable = px.line(data_pop_del_mort_df, x = "Time", y=variable, color="Geography", title = f"Chart: line plot of Time and {variable} by Geography") ###Output _____no_output_____ ###Markdown Let's take a look at their distribution by using a box plot. ###Code px.box(data_pop_del_mort_df, x = 'Geography', y = 'DelinquencyRate', color = 'Geography', title = 'Chart: box plot of Delinquency rate by Geoography.') ###Output _____no_output_____ ###Markdown Exercise: Let's take a look at distribution of average mortgage amount and population sizeComplete the code below to visualize the average mortgage amount and population size. ###Code variable = px.box(data_pop_del_mort_df, x = 'Geography', y = variable, color = 'Geography', title = f'Chart: box plot of {variable} by Geoography.') ###Output _____no_output_____ ###Markdown Let's work on a scatter plot to see if there is a relationship between average mortgage amount and delinquency. ###Code px.scatter(data_frame=data_pop_del_mort_df, y = "AverageMortgageAmount", x = "DelinquencyRate", title="Average mortgage rate to delinquency rate") ###Output _____no_output_____ ###Markdown Exercise: modify the code above to colour the dots by Geography, add hover name with Time ###Code px.scatter(data_frame=data_pop_del_mort_df, y = "AverageMortgageAmount", x = "DelinquencyRate", title="Average mortgage rate to delinquency rate", color=, hover_name=) ###Output _____no_output_____ ###Markdown Using dictionaries to access different kind of functions-------------------------------------------------------------------------------------------------------------------------------We need to do quite a bit of work refactoring our code in preparation for our dashboard.We will use dictionaries to access different plotting functions.Recall, a dictionary is a data structure with `keys` and `values`. The syntax of a dictionary is as follows: dictionary = { key1 : value1, key2 : value2, key3 : value3} Where keys are typically a string, and values can be a data structure such as a string, list, set, tuple, or a function. ###Code sample_dictionary = {"list_numbers" : [1, 2, 3, 4, 5], "set_numbers": set([1, 2, 3, 4, 5]), "tuple_numbers": tuple([1, 2, 3, 4, 5]), "function_sum": sum} ###Output _____no_output_____ ###Markdown To access the values within a dictionary, we use the following notation dictionary[key] For example ###Code sample_dictionary['list_numbers'] sample_dictionary['set_numbers'] sample_dictionary['tuple_numbers'] sample_dictionary['function_sum'] ###Output _____no_output_____ ###Markdown To use the function `sum`, simply pass a list of numbers you want to add. ###Code sum([1,2,3]) ###Output _____no_output_____ ###Markdown We can obtain the same result with our dictionary as follows: ###Code sample_dictionary['function_sum']([1,2,3]) ###Output _____no_output_____ ###Markdown We can use the following dictionary to generate different kinds of plots. ###Code # Dictionary plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line} ###Output _____no_output_____ ###Markdown We can then use the dictionary to try different kinds of plots. ###Code plot_dict['scatter'](data_pop_del_mort_df, x = "Time", y="DelinquencyRate", color="Geography", title = "Chart: line plot of Time and DelinquencyRate by Geography") ###Output _____no_output_____ ###Markdown Exercise 1: change the key `scatter` for `line` , `box` and `violin` and run the cell Exercise 2: change the `x` variable to be one of `Geography` or `Time` Exercise 3: Change the `y` variable to be one of `PopulationSize`, `DelinquencyRate` or `AverageMortgageAmount` ###Code plot_dict['scatter'](data_pop_del_mort_df, x = "Time", y="DelinquencyRate", color="Geography", title = "Playing with several kinds of charts") ###Output _____no_output_____ ###Markdown Refactoring code into functions-------------------------------------------------------------------------------------------------------------------------------In the next section we will refactor our code to ease reproducibility and also to ensure our Dash app is cleaner. We can then put our function dictionary into a Python function. ###Code def graph_region(region_df, graph_type: str, dimension1: str, dimension2: str): """ Parameters ---------- region_df: (dataframe object) reshaped data frame object with mortage, delinquency and population data graph_type: (string) "box", "violin", "scatter", "line" dimension1: (str) one of 'Time' or 'Geography' dimension2: (str) one of 'AverageMortgageAmount', 'AverageMortgageAmount' or 'PopulationSize' Returns: -------- Plotly figure """ plot_dict = {'box': px.box,'violin': px.violin, 'scatter': px.scatter, 'line':px.line} try: # Initialize function fig = plot_dict[graph_type](region_df, x=dimension1, y=dimension2, color = "Geography", hover_name = "Time") # Format figure title_string = f'Chart: {graph_type} plot of {dimension1} and {dimension2} by Geography' fig.update_layout(title = title_string) fig.update_xaxes(tickangle=-45) return fig except KeyError: print("Key not found. Make sure that 'graph_type' is in ['box','violin', 'scatter', 'line']") except ValueError: print("Dimension is not valid. dimension1 is one of 'Time' or 'Geography'") print("dimension2 is one of 'AverageMortgageAmount', 'DelinquencyRate', 'PopulationSize'") graph_region(data_pop_del_mort_df, 'line', "Time", "AverageMortgageAmount") graph_region(data_pop_del_mort_df, 'box', "Geography", "PopulationSize") graph_region(data_pop_del_mort_df, 'scatter', "AverageMortgageAmount", "DelinquencyRate") ###Output _____no_output_____ ###Markdown Bonus, incorporating time series plots ###Code # Optional to have regions fig = px.scatter(data_frame=data_pop_del_mort_df, y = "AverageMortgageAmount", x = "DelinquencyRate", size= "PopulationSize", color= "Geography", animation_frame="Time", animation_group="Geography", title = "Delinquency rate vs average mortgage over time" ) fig.update_layout(yaxis_range=[100000,500000]) fig.update_layout(xaxis_range=[0,1]) fig.show() ###Output _____no_output_____
notebooks/01_ensemble.ipynb
###Markdown Dev comments Project definitions:- Set a method called dfit or overwrite fit method in order to have sklearn pipeline support?- Delegates for an instance of estimator inside class or increment classes through factories (better support in sklearn pipelines)? - [X] TODO: Solve Tree Predict mistery- [ ] TODO: make meta bagged randomized prior class- [X] TODO: make a multivariate joint dist estimator for AdaBoostingRegressor- [X] TODO: make boosting and baging tree estimator classes- [X] TODO: implement bagging and boosting meta estimator (include sampling from sub models)- [X] TODO: include target scaling in pipeline- [ ] TODO: Create DensityEstimator Base Class containing sample and density methods (density calls sample and returns RV CLASS)- [ ] TODO: Mean, Variance and Entropy explainer tool (LIME, SHAP, foressts...)- [X] TODO: Decide wether to allow multi output (doesnt model well multivariate bimodal joint probabilities, cheeky to sample (two different random samples))- [X] Entropy based regression with random forest embeddings + entropy of targets in each node- [X] enhance multioutput estimator class- [X] make possible for user defined sample weights for y_ in self.sample(allow time exponential decay sampling, for example) WE MAY GET THE DISTRIBUTION FOR FREE WITH self.proba_preds- [X] Make EntropyEstimator class to handle any estimator turning regression problem into a classification one- [X] fix resolution to any number of bins- [X] Make EntropyEstimator Ensemble (Bagging and Boosting) - Came for free with BaggingClassifier (predict_proba)- [ ] Make _ChainedJointEstimator, define joint estimation strategy, maybe using mu- [X] Update EntropyEstimator sampling method encompassing KDE sampling- [ ] Incorporate _ChainedJointEstimator on MultiOutputEntropyEstimator- [ ] Reorganize 01_ensemble in 01_entropy_estimator, 02_joint_estimator and 03_ensemble_estimator- [X] Change name of MultiOutputEtimator to JointOutputEstimtor- [ ] Make cov_add_noise in estimators (not needed for kde since data is whitened with PCA)- [ ] Make chained/stacked estimator (good for time series) (creates a new feature-as the predicted bin for each estim) make parallel (with kernel tree) and sequential estimators (timeseries vs joint)- [ ] Make quantile calibration of probability densisties (to make it uniform)- [X] Make Probability calibrations for entropy estimation Imports - ###Code #export from warnings import warn from functools import partial import copy from tqdm.notebook import tqdm import numpy as np import pandas as pd from sklearn import ensemble from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.preprocessing import OneHotEncoder, normalize, QuantileTransformer, FunctionTransformer, MinMaxScaler from sklearn.calibration import CalibratedClassifierCV from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier from sklearn.utils.fixes import _joblib_parallel_args from sklearn.metrics import pairwise from sklearn.decomposition import TruncatedSVD from sklearn.pipeline import Pipeline from sklearn.utils.class_weight import compute_sample_weight from sklearn.neighbors import NearestNeighbors from numpy.linalg import LinAlgError from scipy.spatial.distance import cdist import scipy from joblib import Parallel, delayed from skdensity.utils import (cos_sim_query, sample_multi_dim, ctqdm, add_noise,sample_from_dist_array, DelegateEstimatorMixIn, _fix_X_1d, _fix_one_dist_1d, _fix_one_dist_2d, _add_n_dists_axis,_add_n_samples_axis,_add_n_dims_axis,sample_idxs, make_batches ) from skdensity.metrics import kde_entropy, quantile, marginal_variance, bimodal_variance, kde_likelihood, kde_quantile, agg_smallest_distance, cdf from skdensity.core.random_variable import KDE, RandomVariable, RVArray ###Output _____no_output_____ ###Markdown Ensemble density estimators -Density estimators based on ensemble methods, such as baging, boosting and some decision tree algorithms. All 'classes' are actually factories that dinamically extends the funcitonality of the original sklearn class with methods such as sample. Example data ###Code import seaborn as sns from sklearn.datasets import make_regression from matplotlib import cm def sigmoid(x): return 1/(1+np.exp(x)) X,y = make_regression( n_samples=100000, n_features=15, n_informative=6, n_targets=2, bias=500, effective_rank=None, tail_strength=10, noise=150, shuffle=True, coef=False, random_state=None ) #make one of X[1] feature mode weightening bimodal_factor_wieght = 2 bimodal_factors = (sigmoid(bimodal_factor_wieght*X[:,-1]) > np.random.random(size = X.shape[0])).astype(int) bimodal_factors[bimodal_factors == 0] = -1 bimodal_factors = bimodal_factors.reshape(-1,1) y = bimodal_factors*y colors = cm.get_cmap('binary')(256*(sigmoid(bimodal_factor_wieght*X[:,-1]) > np.random.random(size = X.shape[0])).astype(int)) sns.jointplot(y[:,0],y[:,1], joint_kws = {'color': colors}, alpha = 0.01) sns.jointplot(X[:,-1], y[:,1], alpha = 0.1) X_train, X_test = X[:int(0.8*len(X))], X[int(0.8*len(X)):] y_train, y_test = y[:int(0.8*len(X))], y[int(0.8*len(X)):] ###Output _____no_output_____ ###Markdown QuantileCalibrator> A class to calibrates samples in order to bring the quantile distribution closer to a uniform distribution ###Code #export class QuantileCalibrator(BaseEstimator): def __init__(self, estimator, bins = 100): self.estimator = estimator self.bins = bins return def fit(self, X, y, **sampling_kws): ''' X are the samples from the trainning set y is the true value, alligned with its respective distribution (X) ''' samples = self.estimator.sample(X, **sampling_kws) q_x = quantile(y, samples) q_dist, _ = np.histogram(q_x, bins=self.bins, range=[0,1], weights=None, density=False) #fill zeros with 1 to avoid problems in division q_dist = np.where(q_dist == 0, 1, q_dist) self.q_dist = q_dist return self def _make_resampling_weights(self, X): ''' define new sampling wieghts for each set of samples ''' cdfs = cdf(X) cdfs = cdfs[:,:,0] #works only for 1d dists weights = [] for dist in cdfs: dg = np.digitize(dist, bins=np.linspace(0,1,self.bins-1, endpoint = False)) weights.append(self.q_dist[dg]) #create weights as the bin count and then normalize weights = normalize(np.array(weights), norm = 'l1') return weights def sample(self, X, sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 0, **sampling_kws): ''' resamples values in each dist taking into account quantile calibration factor learned from training set ''' X = self.estimator.sample(X, sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 0, **sampling_kws) p = self._make_resampling_weights(X) samples = sample_from_dist_array(X, sample_size, weights = p) noise = agg_smallest_distance(samples, agg_func = np.std) noise = _add_n_dims_axis(noise) return add_noise(samples, noise_factor*noise) ###Output _____no_output_____ ###Markdown HistogramEstimator> An estimator that performas a classification on a discretized transformation of a continuous space using QuantileTransformer, predicts a probability distribution using proba_preds and maps back to continuous domain.Base estimator can be any estimator that performs predict_proba method. ###Code #export #TESTE def identity_func(x): return x IDENTITY_TRANSFORMER = FunctionTransformer( func = identity_func, inverse_func = identity_func, validate=False, accept_sparse=True, check_inverse=True, kw_args=None, inv_kw_args=None, ) class HistogramEstimator(BaseEstimator, ClassifierMixin, DelegateEstimatorMixIn): ''' Meanwhile only performs marginal density estiamtion, not joint. Thus, only 1dimensional y. For joint, should try something using RegressionChain (to pass dimension information to the prediction of other dims) ''' def __init__(self,estimator, resolution = 'auto' ,alpha = 1, calibrated_classifier = None, calibration_cv = 4,rv_bins_kws = {}): ''' resolution can be int (number of bins of uniform quantile transformation) or hist array ''' self.cumulative_target = False #used only in ClassificationKernelEstimator thorugh inheritance assert hasattr(estimator, 'predict_proba') or ('predict_proba' in dir(estimator)), 'estimator should implement `predict_proba` method' self.estimator = estimator self.alpha = alpha assert isinstance(resolution, (np.ndarray, int, str)), f'resolution should be Array of bin edges, str or int, got {resolution.__class__}' self.resolution = resolution self.rv_bins_kws = rv_bins_kws self.calibration_cv = calibration_cv if calibrated_classifier == 'default': self.calibrated_classifier = CalibratedClassifierCV(base_estimator=self.estimator, method='isotonic', cv = calibration_cv, ensemble = False) elif calibrated_classifier is None: self.calibrated_classifier = None else: assert hasattr(calibrated_classifier, 'predict_proba') or ('predict_proba' in dir(calibrated_classifier)), f'calibrated_classifier should implement `predict_proba method`' assert not isinstance(calibrated_classifier, type), f'calibrated_classifier should be an instance, not type' self.calibrated_classifier = calibrated_classifier return def _q_transformer_fit(self, y): ''' fits self.q_transformer ''' y = _fix_X_1d(y) if type(self.resolution) == str: self.bin_edges = np.histogram_bin_edges(y, bins = self.resolution) print(f'base classifier will be trained with {len(self.bin_edges)} classes') return self.bin_edges elif type(self.resolution) == np.ndarray: self.bin_edges = self.resolution elif type(self.resolution) == int: self.q_transformer = QuantileTransformer(n_quantiles = self.resolution) self._q_minmax_scaler = MinMaxScaler() y = self.q_transformer.fit_transform(y) #for case when output_distribution != uniform self._q_minmax_scaler.fit(y) return self.q_transformer elif isinstance(self.resolution, list): return self.resolution else: raise TypeError(f'self.resolution should be np.array of bin edges, str or int, got {self.resolution.__class__}') def _q_transformer_transform(self, y, cumulative = False): ''' maps floats to int (bin_id in histogram) ''' y = _fix_X_1d(y) if self.cumulative_target: if type(self.resolution) in (str, np.ndarray): hist_bins = np.digitize(y, self.bin_edges) max_bin = len(self.bin_edges) elif type(self.resolution) == int: hist_bins = self.q_transformer.transform(y) #scale between 0 and 1 hist_bins = self._q_minmax_scaler.transform(hist_bins) hist_bins = np.around(hist_bins*(self.resolution - 1), decimals = 0).astype(int) max_bin = self.resolution elif isinstance(self.resolution,np.ndarray): hist_bins = np.digitize(y, self.resolution) max_bin = self.resolution y_transformed = np.zeros((y.shape[0],max_bin), dtype = 'int8') for i in range(len(y_transformed)): bin_idx = int(hist_bins[i]) y_transformed[i, :bin_idx] = 1 y_transformed = y_transformed[:,:-1] else: if type(self.resolution) in (str, np.ndarray): y_transformed = np.digitize(y, self.bin_edges) elif type(self.resolution) == int: y_transformed = self.q_transformer.transform(y) #scale between 0 and 1 y_transformed = self._q_minmax_scaler.transform(y_transformed) y_transformed = np.around(y_transformed*(self.resolution - 1), decimals = 0).astype(int) elif isinstance(self.resolution,np.ndarray): y_transformed = np.digitize(y, self.resolution) y_transformed = y_transformed.flatten() return y_transformed def _q_transformer_inverse_transform(self,y): ''' maps from bin_id in histogram (int) to float. beware that during transform, information is lost due to downsampling, so inverse_transform will not be an exact inverse_transform. ''' y = _fix_X_1d(y) if type(self.resolution) == int: y_transformed = (y/(self.resolution - 1)).astype(float) y_transformed = self._q_minmax_scaler.inverse_transform(y_transformed) return self.q_transformer.inverse_transform(y_transformed).flatten() #1d asserted already else: raise NotImplementedError('inverse transform only implemented for case when self.resolution == int') def _preprocess_y_fit(self, y): #set y_dim if len(y.shape) == 1: self.y_dim = 1 elif len(y.shape) == 2: # assert 1d assert y.shape[-1] == 1, 'y should be 1d. For joint estimation use KernelTreeHistogramEstimator or joint estimators' self.y_dim = y.shape[-1] else: raise AssertionError('y should be 1d vector or 2d column array (n_samples,1)') #reshape when y.dim == 1 and array dim equals 2 if self.y_dim == 1: y = y.reshape(y.shape[0]) self._q_transformer_fit(y) return self def _preprocess_y_transform(self, y): #set y_dim if len(y.shape) == 1: self.y_dim = 1 elif len(y.shape) == 2: # assert 1d assert y.shape[-1] == 1, 'y should be 1d. For joint estimation use KernelTreeHistogramEstimator or joint estimators' self.y_dim = y.shape[-1] else: raise AssertionError('y should be 1d vector or 2d column array (n_samples,1)') #reshape when y.dim == 1 and array dim equals 2 if self.y_dim == 1: y = y.reshape(y.shape[0]) # Fit one instance of RandomVariable or KDE for each bin: y_transformed = self._q_transformer_transform(y) return y_transformed def _preprocess_y_fit_transform(self, y): self._preprocess_y_fit(y) return self._preprocess_y_transform(y) def fit(self, X, y = None, **estimator_fit_kws): #fit y transformer self._preprocess_y_fit(y) #transform y y_transformed = self._preprocess_y_transform(y) # fit kdes bin_ids = list(set(y_transformed)) bins_data_mapper = [y[y_transformed == i] for i in bin_ids] print('fitting RandomVariable for each bin') self._bin_dist_rvs = [RandomVariable(**self.rv_bins_kws).fit(d) for d in bins_data_mapper] #fit calibrated classifier if not self.calibrated_classifier is None: self.calibrated_classifier.fit(X = X, y = y_transformed, **estimator_fit_kws) self.estimator = self.calibrated_classifier.calibrated_classifiers_[0].base_estimator else: #fit classifier print('fitting estimator') self.estimator.fit(X = X, y = y_transformed, **estimator_fit_kws) return self def _get_bin_pdf(self,X): ''' returns pdf array of shape (n_dists, n_bins, n_dims) the values are the probability "density" for that bin ''' if not self.calibrated_classifier is None: probas = self.calibrated_classifier.predict_proba(X) probas = np.array(probas) return np.array(probas) else: probas = self.estimator.predict_proba(X) return np.array(probas) def custom_predict(self, X, agg_func = np.mean, sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 0): ''' performs aggregation in a samples drawn for a specific X and returns the custom predicted value as the result of the aggregation. Could be mean, mode, median, std, entropy, likelihood... note that agg_func recieves an array of shape (n_samples, n_dims). If you want to perform aggregation along dimensions, dont forget to tell agg_func to perform operations along axis = 0 ''' samples = self.sample(X, sample_size, weight_func, alpha, replace, noise_factor) return np.array([agg_func(sample) for sample in samples]) def _rv_bin_sample(self, bin_probas, sample_size): ''' Generate RV samples from bins of 1 observation ''' assert len(bin_probas.shape) == 2, f'Passed weights array should be 2d not {bin_probas.shape}' #SAMPLE ALL KDES AND THE SAMPLE FROM SAMPLED ARRAY samples_dist = np.array([bin_dist.sample(sample_size) for bin_dist in self._bin_dist_rvs]) samples_dist = _add_n_dims_axis(samples_dist) samples_dist = samples_dist[:,:,0] idxs = sample_idxs(bin_probas, sample_size = sample_size) samples = [] print('Sampling data from bins...') for i in tqdm(np.arange(bin_probas.shape[0])): idx = idxs[i] idx, counts = np.unique(idx, return_counts = True) s = [np.random.choice(samples_dist[i],c, replace = True) for i,c in zip(idx,counts)] samples.append(np.concatenate(s)) return np.array(samples) def sample(self, X, sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 0): ''' weight func is a function that takes weight array (n_dists, n_bins) and returned an array of the same shape but with desired processing of the weights. if weight_func is not None, alpha is ignored ''' #set alpha if not None, else use self.alpha alpha = alpha if not alpha is None else self.alpha #apply weight_func if not None, else, power to alpha bins_probas = self._get_bin_pdf(X) if self.y_dim == 1: bins_probas = _add_n_dists_axis(bins_probas) # for 1d case bins_probas = bins_probas[0,:,:] if not weight_func is None: bins_probas = normalize(weight_func(bins_probas), norm = 'l1') else: bins_probas = normalize(bins_probas**alpha, norm = 'l1') samples = self._rv_bin_sample(bins_probas, sample_size) samples = _add_n_dims_axis(samples) # make a 3d sample array with dim axis = 1 noise = agg_smallest_distance(samples, agg_func = np.std) noise = _add_n_dims_axis(noise) return add_noise(samples, noise_factor*noise) def density(self, X, dist = 'empirical', sample_size = 1000, weight_func = None, alpha = None, replace = True, noise_factor = 1e-7, **dist_kws): ''' returns a RVArray instance of RandomVariable objects fitted on sampled data based on X and other sample params ''' samples = self.sample(X, sample_size, weight_func, alpha, replace, noise_factor) print('Fitting random variable objects for each dsitribution...') rv_objects = [RandomVariable(keep_samples = False).fit(sample, dist, **dist_kws) for sample in tqdm(samples)] return RVArray(rv_objects) def score(self, X, y = None, **score_kws): return self.estimator.score(X, self._q_transformer_transform(y), **score_kws) def predict_proba(self, X): ''' predict proba handling multilabel outputs ''' probas = self.estimator.predict_proba(X) if self.cumulative_target: probas = np.hstack([i for i in probas]) return probas ###Output _____no_output_____ ###Markdown Usage Example`HistogramEstimator` turns the regression problem into a classification one, predicts the expected bins and the turns it back to continuous domain.It accepts any estimator with the `predict_proba` method as a base estimator.It works well for any kind of distribution, but only supports marginal distribution estimation ###Code from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt estim1 = HistogramEstimator(LogisticRegression(tol = 0.001, solver = 'sag'), 'auto', rv_bins_kws = {'default_dist':['empirical']}) #estim1 = JointEntropyEstimator(LogisticRegression(tol = 0.001, solver = 'sag'), resolution = 'auto', ) estim2 = HistogramEstimator(LogisticRegression(tol = 0.001, solver = 'sag'), 30, rv_bins_kws = {'default_dist':['empirical']}) estim1.fit(X_train, y_train[:,1]) estim2.fit(X_train, y_train[:,1]) i = np.random.choice(np.arange(y_test.shape[0])) alpha = 1 noise_factor = 0.2 samples2 = estim2.sample(X_test[i:i+1], sample_size = 200, alpha = alpha, noise_factor = noise_factor) #prediction = density_estimator1.custom_predict( # X_test[i:i+1], agg_func = lambda x: np.mean(x,axis = 0), alpha = alpha, beta = beta, gamma = gamma) #naive_prediction = density_estimator1.predict(X_test[i:i+1]) if (len(samples2.shape) > 1) and (samples2.shape[-1] == 2): jntplot = sns.jointplot(samples2[0,:,0], samples2[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.1}) jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution') jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value') jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value') jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value') jntplot.ax_joint.legend() else: dst = sns.distplot(samples2, kde = True, bins = 20, hist_kws = {'label':'Infered Conditional Distribution'}) dst = sns.distplot(y_test[:,1], kde = True, bins = 20, hist_kws = {'label':'Target Total Distribution'}) dst._axes.axvline(y_test[i,1], color = 'r', label = 'True Value') dst._axes.legend() alpha = 1 noise_factor = 0.0 samples1 = estim1.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor) samples2 = estim2.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor) ll1 = np.log2(kde_likelihood(y_test[:,1:2],samples1)) ll2 = np.log2(kde_likelihood(y_test[:,1:2],samples2)) sns.distplot(ll1[ll1 > -30], label = 'model1') sns.distplot(ll2[ll2 > -30], label = 'model2') plt.legend() print(np.median(ll1[ll1 > -1e10]), np.median(ll2[ll2 > -1e10])) entr1 = kde_entropy(samples1, frac = 0.2,sample_size = 1000) entr2 = kde_entropy(samples2, frac = 0.2,sample_size = 1000) sns.distplot(entr1, label = 'model1') sns.distplot(entr2, label = 'model2') plt.legend() print(entr1.mean(),entr2.mean()) q2 = quantile(y_test[:,1:],samples2) q1 = quantile(y_test[:,1:],samples1) sns.distplot(q1) sns.distplot(q2) ###Output _____no_output_____ ###Markdown `ClassificationKernelEstimator`> Performs a classification task and durying density estimation, queries the n_neighbors closest to predict_proba output (distribution vector) ###Code #export def minkowski_similarity(X): X = normalize(X, norm = 'l2') return 2/(1+np.exp(X)) class ClassificationKernelEstimator(HistogramEstimator): ''' Estimator that uses the predicted proba vector of the estimator as a kernel and then performs knn search in order to estimate the distribution ''' def __init__( self, estimator, resolution = 'auto', cumulative_target = True,alpha = 1, calibrated_classifier = None, calibration_cv = None, prefit_estimator=False, n_neighbors=30, scale_query_space=True, knn_indexer=None, knn_metric='euclidean', similarity_function=None, noise_factor = 0, n_jobs = None, ): self.n_jobs = n_jobs if cumulative_target: estimator = MultiOutputClassifier(estimator, n_jobs = n_jobs) super().__init__(estimator, resolution, alpha, calibrated_classifier, calibration_cv) self.n_neighbors = n_neighbors self.prefit_estimator = prefit_estimator self.scale_query_space = scale_query_space if knn_indexer is None: self.knn_indexer = NearestNeighbors( n_neighbors=n_neighbors, metric=knn_metric, algorithm='kd_tree') else: self.knn_indexer = knn_indexer self.knn_metric = knn_metric self.similarity_function = similarity_function self.alpha = alpha self.noise_factor = noise_factor self.cumulative_target = cumulative_target return def fit(self, X, y=None, **estimator_fit_kws): #fit y transformer self._preprocess_y_fit(y) #transform y y_transformed = self._q_transformer_transform(y) if not self.prefit_estimator: print('fitting estimator') if not self.calibrated_classifier is None: #fit calibrated classifier self.calibrated_classifier.fit(X = X, y = y_transformed, **estimator_fit_kws) self.estimator = self.calibrated_classifier.calibrated_classifiers_[0].base_estimator else: #fit classifier self.estimator.fit(X, y_transformed, **estimator_fit_kws) #get probas for query space if self.cumulative_target: probas = self.estimator.predict_proba(X) probas = np.hstack([i for i in probas]) else: probas = self.estimator.predict_proba(X) #set space transformer for probability space if self.scale_query_space and not self.cumulative_target: self._query_space_scaler = QuantileTransformer().fit(probas) probas = self._query_space_scaler.transform(probas) probas = normalize(probas) else: # make a identity transformer in case of no scalling self._query_space_scaler = FunctionTransformer() self.knn_indexer.fit(probas) self.y_ = y return self def _query_idx_and_sim(self, query_vector, n_neighbors): # apply scaler query_vector = self._query_space_scaler.transform(query_vector) # query distances and indexes dist, idx = [], [] batches = make_batches(query_vector, batch_size = np.ceil(query_vector.shape[0]/100).astype(int)) print('Querying neighbors...') for batch in tqdm(batches): dist_i, idx_i = self.knn_indexer.kneighbors(batch, n_neighbors) dist.append(dist_i) idx.append(idx_i) dist = np.vstack(dist) idx = np.vstack(idx) if (self.knn_metric in ('minkowski', 'euclidean')) and (self.similarity_function is None): sim = minkowski_similarity(dist) else: sim = self.similarity_function(dist) # take l1 norm of similarity vectors to make a valid probability distribution sim = normalize(sim, norm='l1') return idx, sim + 1e-9 #ensure vector is not null def _sample_from_idx_sim(self, idx, sim, sample_size, noise_factor): samples = [] for i in np.arange(len(idx)): ys = self.y_[sample_multi_dim(idx[i], sample_size = sample_size, weights = sim[i], axis = 0)] if len(ys.shape) == 1: ys = ys.reshape(-1,1) if abs(noise_factor) > 0: noise = agg_smallest_distance(ys.reshape(1,*ys.shape), agg_func = np.median) ys = add_noise(ys, noise_factor*noise) samples.append(ys) return np.array(samples) def sample(self, X, sample_size=1000, n_neighbors=None, alpha=None, noise_factor=None): #handle args: n_neighbors, alpha, noise_factor = self._handle_similarity_sample_parameters( n_neighbors = n_neighbors, alpha = alpha, noise_factor = noise_factor) #get probas probas = self.estimator.predict_proba(X) if isinstance(probas, list): #handle multilabel probas probas = np.hstack([i for i in probas]) # get idx and sim using proba vector as query vector idx, sim = self._query_idx_and_sim(probas, n_neighbors) # sample indexes and data, add noise if not alpha is None: sim = normalize(sim**alpha, norm = 'l1') return self._sample_from_idx_sim(idx, sim, sample_size, noise_factor) def density(self, X, dist = 'empirical', sample_size=1000, n_neighbors=None, alpha=None, noise_factor=None, **dist_kws): samples = self.sample(X, sample_size, n_neighbors, alpha, noise_factor) print('fitting distribution objects...') rv_objects = [RandomVariable(keep_samples = False).fit(sample, dist, **dist_kws) for sample in tqdm(samples)] return RVArray(rv_objects) def _handle_similarity_sample_parameters(self, **kwargs): args = [] for key in kwargs: if kwargs[key] is None: if hasattr(self, key): args.append(getattr(self, key)) else: args.append(kwargs[key]) else: args.append(kwargs[key]) return args ###Output _____no_output_____ ###Markdown which distance to use? ###Code from scipy.spatial.distance import jensenshannon from sklearn.preprocessing import normalize from sklearn.metrics import pairwise_distances import numpy as np import matplotlib.pyplot as plt random_X = normalize(np.abs(np.random.random((10000,10))), norm = 'l2') js = 1- np.array([jensenshannon(random_X[0], i) for i in random_X]) cossim = (normalize(random_X)[0:1]@(normalize(random_X).T)).flatten() euclidean = pairwise_distances(random_X[0:1], random_X).flatten() msk = js > 0.0 js, cossim, euclidean = js[msk], cossim[msk], euclidean[msk] plt.scatter(js, 2/(1+np.exp(euclidean)), alpha = 0.02, label = r'$\frac{2}{1+exp(L2)}$') plt.scatter(js, 1/(1+euclidean), alpha = 0.02, label = r'$\frac{1}{1+L2}$') plt.scatter(js, cossim, alpha = 0.02, label = 'cossim similarity') plt.plot([0,1], [0,1]) plt.legend() ###Output _____no_output_____ ###Markdown Usage Example ###Code estim1 = ClassificationKernelEstimator(estim2.estimator, 20, prefit_estimator = False, n_neighbors = 100, scale_query_space = False) estim1.fit(X_train, y_train[:,1]) i = np.random.choice(np.arange(y_test.shape[0])) alpha = 1 noise_factor = 0.2 n_neighbors = 100 samples2 = estim1.sample(X_test[i:i+1], sample_size = 300, alpha = alpha, noise_factor = noise_factor, n_neighbors = n_neighbors) #prediction = density_estimator1.custom_predict( # X_test[i:i+1], agg_func = lambda x: np.mean(x,axis = 0), alpha = alpha, beta = beta, gamma = gamma) #naive_prediction = density_estimator1.predict(X_test[i:i+1]) if (len(samples2.shape) > 1) and (samples2.shape[-1] == 2): jntplot = sns.jointplot(samples2[0,:,0], samples2[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.1}) jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution') jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value') jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value') jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value') jntplot.ax_joint.legend() else: dst = sns.distplot(samples2, kde = True, bins = 20, hist_kws = {'label':'Infered Conditional Distribution'}) dst = sns.distplot(y_test[:,1], kde = True, bins = 20, hist_kws = {'label':'Target Total Distribution'}) dst._axes.axvline(y_test[i,1], color = 'r', label = 'True Value') dst._axes.legend() alpha = 1 noise_factor = 0.0 n_neighbors = 300 samples1 = estim1.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor, n_neighbors = n_neighbors) #samples2 = estim2.sample(X_test, 1000, alpha = alpha, noise_factor = noise_factor) ll1 = np.log2(kde_likelihood(y_test[:,1:2],samples1, frac = 0.1)) ll2 = np.log2(kde_likelihood(y_test[:,1:2],samples2, frac = 0.1)) sns.distplot(ll1[ll1 > -30], label = 'model1') sns.distplot(ll2[ll2 > -30], label = 'model2') plt.legend() print(np.median(ll1[ll1 > -1e10]), np.median(ll2[ll2 > -1e10])) entr1 = kde_entropy(samples1, frac = 0.1,sample_size = 1000) entr2 = kde_entropy(samples2, frac = 0.1,sample_size = 1000) sns.distplot(entr1, label = 'model1') sns.distplot(entr2, label = 'model2') plt.legend() print(entr1.mean(),entr2.mean()) q2 = quantile(y_test[:,1:],samples2) q1 = quantile(y_test[:,1:],samples1) sns.distplot(q1) sns.distplot(q2) ###Output _____no_output_____ ###Markdown Ensemble Tree methods TreeEstimatorMixin class>creates some functionalities for similarity sampling based random forests and naive sampling random forests ###Code #export #node quality functions def expected_likelihood(node_data, sample_size = 100): kde = KDE().fit(node_data) return np.mean(kde.evaluate(kde.rvs(size = sample_size))) def inverese_log_node_var(node_data): #makes no sense for multivariate distribtuions centroid = node_data.mean(axis = 0).reshape(1,-1) distances = cdist(node_data, centroid, 'seuclidean').flatten() return 1/np.log1p(np.mean(distances)) # datapoint-node functions def datapoint_pdf(node_data): return KDE().fit(node_data).pdf(node_data) def datapoint_gaussian_likelihood(node_data): centroid = node_data.mean(axis = 0).reshape(1,-1) distances = cdist(node_data, centroid, 'seuclidean').flatten() distance_std = distances.std() #if distance_std == 0: # return 1 z = (distances - distances.mean())/distance_std return 1/(distance_std*np.pi**(1/2))*np.exp(-1/2*z**2) def _bimodal_variance_fix_dim(x): if len(x.shape) == 1: return 1/np.log1p(bimodal_variance(_fix_one_dist_1d(x))) else: return 1/np.log1p(bimodal_variance(_fix_one_dist_2d(x))) AVALIBLE_NODE_AGG_FUNC = { 'expected_likelihood':expected_likelihood, 'inverse_log_variance':inverese_log_node_var, 'inverse_log_bimodal_variance': _bimodal_variance_fix_dim } AVALIBLE_DATAPOINT_WEIGHT_FUNC = { 'kde_likelihood': datapoint_pdf, 'gaussian_likelihood': datapoint_gaussian_likelihood } #export class TreeEstimatorMixin(): '''Base Class containing important methods for building Naive and Similarity Density Tree estimators''' @property def _node_data_generator(self): return self._make_node_data_generator(self.y_, self._raw_leaf_node_matrix) def _make_node_data_generator(self, y, node_matrix): ''' creates a generator from sparse matrix where each iter retrns a row ''' s1 = node_matrix.sum(axis = 0).cumsum().A.astype(int).flatten() s2 = np.concatenate([[0],s1[:-1]]) slices = [slice(i[0],i[1]) for i in zip(s2,s1)] idxs = node_matrix.tocsc().indices idxs = [idxs[s] for s in slices] return (y[idx] for idx in idxs) def _make_node_kde_array(self): #<- since kde esitmation is the best approach, save kde fitted instances for each node #to make use of it during node and node_data wieght inference #maybe its better to get data from multiple nodes before fitting kde raise NotImplementedError def _make_node_cdist_array(self): #<- gaussian likelihood works fine as well, so save cdist matrix for each node raise NotImplementedError def _apply(self, X): ''' A substitute for estimator.apply in case it returns 3d arrays (such as sklearns gradient boosting classifier) instead of 2d. In case returned array from estimator.apply returns a 2dim array, the returned value of the function is the same as the returned array of self.estimator.apply ''' applied_arr = self.estimator.apply(X) dim1_shape = applied_arr.shape[0] dim2_shape = np.prod(applied_arr.shape[1:]) return applied_arr.reshape(dim1_shape, dim2_shape) def _fit_leaf_node_matrix(self, X, y, node_rank_func, node_data_rank_func, max_nodes = None, max_data = None, sample_weight = None): nodes_array = self._apply(X) self._leaf_node_transformer = OneHotEncoder(handle_unknown = 'ignore') leaf_node_matrix = self._leaf_node_transformer.fit_transform(nodes_array) if max_nodes is None: self._keep_nodes_in_query = slice(None) else: if 0 < max_nodes <= 1: #case max_nodes is fraction max_nodes = max(1,int(max_nodes*leaf_node_matrix.shape[1])) self._keep_nodes_in_query = np.random.choice(np.arange(leaf_node_matrix.shape[1]), size = max_nodes, replace = False) if max_data is None: self._keep_data_in_query = slice(None) else: if 0 < max_data <= 1: #case max_data is fraction max_data = max(1,int(max_data*leaf_node_matrix.shape[0])) self._keep_data_in_query = np.random.choice(np.arange(leaf_node_matrix.shape[0]), size = max_nodes, replace = False, p = sample_weight) leaf_node_matrix = leaf_node_matrix[self._keep_data_in_query, :] leaf_node_matrix = leaf_node_matrix[:, self._keep_nodes_in_query] self._raw_leaf_node_matrix = leaf_node_matrix #self._node_data_generator = self.#self._make_node_data_generator(y, leaf_node_matrix) self._leaf_node_weights = self._calculate_node_weights(y, leaf_node_matrix, node_rank_func) self._leaf_node_matrix = self._make_weighted_query_space(y, leaf_node_matrix, node_data_rank_func)# <- try making this a property return self def _transform_query_matrix(self, X): node_matrix = self._leaf_node_transformer.transform(self._apply(X)) node_matrix = node_matrix[:, self._keep_nodes_in_query] return self._make_weighted_query_vector( agg_node_weights = self._leaf_node_weights, node_matrix = node_matrix) def _query_idx_and_sim(self, X, n_neighbors, lower_bound, beta, gamma): idx, sim = cos_sim_query( self._transform_query_matrix(X), self._leaf_node_matrix, n_neighbors=n_neighbors, lower_bound=lower_bound, beta = beta, gamma = gamma) return idx, sim + 1e-9 #ensure sim vector is not null def _entropy_estimator_sample(self, X, sample_size, weight_func,alpha, noise_factor): ''' samples from a forest embedding fitted linear entropy estimator. Works only for marginal distributions ''' nodes_array = self._apply(X) forest_embeddings = self._leaf_node_transformer.transform(nodes_array) samples = self.entropy_estimator_sampler.sample(forest_embeddings, sample_size, weight_func, alpha, noise_factor) return samples def _fit_entropy_estimator_sampler(self, X, y = None, **fit_kws): ''' fit a linear entropy estimator Works only for marginal distributions ''' nodes_array = self._apply(X) self._leaf_node_transformer = OneHotEncoder() forest_embeddings = self._leaf_node_transformer.fit_transform(nodes_array) self.entropy_estimator_sampler.fit(forest_embeddings, y, **fit_kws) return self def _kde_similarity_sample(self, X, sample_size, weight_func, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor, **rv_kwargs): idx, sim = self._query_idx_and_sim(X ,n_neighbors=n_neighbors, lower_bound=lower_bound,beta = beta, gamma = gamma) idx, sim = np.array(idx), np.array(sim) p = self._handle_sample_weights(weight_func = weight_func, sim = sim, alpha = alpha) samples = [] for i in np.arange(len(idx)): ys = self.y_[sample_multi_dim(idx[i], sample_size = sample_size, weights = p[i], axis = 0)] if len(ys.shape) == 1: ys = ys.reshape(-1,1) noise = agg_smallest_distance(ys.reshape(1,*ys.shape), agg_func = np.std) ys = add_noise(ys, noise_factor*noise) samples.append(RandomVariable(**rv_kwargs).fit(ys, sample_weight = None).sample(sample_size = sample_size)) return np.array(samples) def _similarity_sample(self, X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor): idx, sim = self._query_idx_and_sim( X ,n_neighbors=n_neighbors, lower_bound=lower_bound,beta = beta, gamma = gamma) idx, sim = np.array(idx), np.array(sim) p = self._handle_sample_weights(weight_func = weights, sim = sim, alpha = alpha) samples = [] for i in np.arange(len(idx)): ys = self.y_[sample_multi_dim(idx[i], sample_size = sample_size, weights = p[i], axis = 0)] if len(ys.shape) == 1: ys = ys.reshape(-1,1) noise = agg_smallest_distance(ys.reshape(1,*ys.shape), agg_func = np.std) ys = add_noise(ys, noise_factor*noise) samples.append(ys) return np.array(samples) def _density(self, X, dist, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor, **dist_kws): ''' returns a RVArray instance of RandomVariable objects fitted on sampled data based on X and other sample params ''' samples = self._similarity_sample(X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor) rv_objects = [RandomVariable(keep_samples = False).fit(sample, dist, **dist_kws) for sample in tqdm(samples)] return RVArray(rv_objects) def _similarity_sample_idx(self, X, sample_size, weight_func, n_neighbors, lower_bound, alpha, beta, gamma): idxs, sim = self._query_idx_and_sim(X ,n_neighbors=n_neighbors, lower_bound=lower_bound,beta = beta, gamma = gamma) idxs, sim = np.array(idxs), np.array(sim) p = self._handle_sample_weights(weight_func = weight_func, sim = sim, alpha = alpha) samples_idxs = sample_from_dist_array(idxs.reshape(*idxs.shape,1), sample_size, p) samples_idxs = samples_idxs.reshape(samples_idxs.shape[:-1]) return samples_idxs def _similarity_empirical_pdf(self, X, weights, n_neighbors, lower_bound, alpha, beta, gamma): idx, sim = cos_sim_query( self._transform_query_matrix(X), self._leaf_node_matrix, n_neighbors=n_neighbors, lower_bound=lower_bound, beta = beta, gamma = gamma) p = self._handle_sample_weights(weight_func = weights, sim = sim, alpha = alpha) return np.array([self.y_[i] for i in idx]), p def _custom_predict(self, X, agg_func, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor): ''' performs aggregation in a samples drawn for a specific X and returns the custom predicted value as the result of the aggregation. Could be mean, mode, median, std, entropy, likelihood... note that agg_func recieves an array of shape (n_samples, n_dims). If you want to perform aggregation along dimensions, dont forget to tell agg_func to perform operations along axis = 0 ''' samples = self._similarity_sample(X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor) return np.array([agg_func(sample) for sample in samples]) def _calculate_node_weights(self, y, node_matrix, node_rank_func): ''' calculates node weights that maultiplies the query space matrix, in order to make some nodes more relevant according to some target data node agg metric. input should be a list containing array of node samples as each one of its elements ''' if not node_rank_func is None: # cannot call in a vectorized fashion because data from nodes may have different sizes #node_weights = Parallel(n_jobs=-1, verbose=0, # **_joblib_parallel_args(prefer="threads"))( # delayed(node_rank_func)(X) # for X in self._node_data_generator) node_weights = [node_rank_func(X) for X in self._node_data_generator] else: node_weights = np.ones(node_matrix.shape[1]) return np.array(node_weights) def _calculate_node_datapoint_weights(self, y, node_matrix, node_data_rank_func): ''' Calculates node-datapoint(y values) weights. higher values meansa datapoint "belongs tighter" to that point and is more loleky to be sampled when that node is reached. some cases of node-datapount wieghts could be the likelihood of that point given the node pdf, or some sort of median/mean deviance from point to node samples ''' #datapoint_node_weights = Parallel(n_jobs=1, verbose=0, # **_joblib_parallel_args(prefer="threads"))( # delayed(node_data_rank_func)(X) # for X in node_data_generator) datapoint_node_weights = [node_data_rank_func(node_data) for node_data in self._node_data_generator] return datapoint_node_weights def _handle_sample_weights(self, weight_func, sim, alpha): ''' sampling wights should sum to 1, since its a sampling probability ''' if weight_func is None: return np.array([normalize((i**alpha).reshape(1,-1), norm = 'l1').flatten() for i in sim]) else: return np.array([normalize((weight_func(i)).reshape(1,-1), norm = 'l1').flatten() for i in sim]) def _make_weighted_query_vector(self, agg_node_weights, node_matrix): ''' multiplies elements of query vector by their respective weights the greater the weights, the better the "quality" of the nodes ''' if not isinstance(node_matrix, scipy.sparse.csr_matrix): node_matrix = scipy.sparse.csr_matrix(node_matrix) node_matrix.data = node_matrix.data*np.take(agg_node_weights, node_matrix.indices) return node_matrix def _make_weighted_query_space(self, y, node_matrix, node_data_rank_func = None): ''' query space is the leaf_node_matrix multiplied by node_data_weights the greater the value in the matrix, the better the "quality" of that data point ''' if not isinstance(node_matrix, scipy.sparse.csr_matrix): node_matrix = scipy.sparse.csr_matrix(node_matrix) if not node_data_rank_func is None: # datapoint_node_weights multiplication (columns) #make copy node_matrix = copy.deepcopy(node_matrix) #cast to csc to make .data order columnwise node_matrix = node_matrix.tocsc() datapoint_node_weights = self._calculate_node_datapoint_weights(y, node_matrix, node_data_rank_func) node_matrix.data = node_matrix.data*np.concatenate(datapoint_node_weights) #convert back to csr node_matrix = node_matrix.tocsr() else: pass return node_matrix ###Output _____no_output_____ ###Markdown KernelTreeEstimator - Estimates the conditional distribution based on samples from dataset taking into account the `leaf_node_matrix` ###Code #export #MAKE WARNING REGARDING NUMBER OF NODES IN TREE TAKING KNEIGHBORS QUERY INTO ACCOUNT, mayvbe set max_leaf_nodes automatically class KernelTreeEstimator(BaseEstimator, ClassifierMixin, DelegateEstimatorMixIn ,TreeEstimatorMixin): def __init__(self, estimator, entropy_estimator_sampler = None, alpha = 1, beta = 1, gamma = 1, node_rank_func = None, node_data_rank_func = None,n_neighbors = 30, lower_bound = 0.0): #assert estimator.min_samples_leaf >= 3, 'min_samples_leaf should be greater than 2' assert hasattr(estimator, 'apply'), 'estimator should have `apply` method' self.estimator = estimator self.n_neighbors = n_neighbors self.lower_bound = lower_bound self.alpha = alpha self.beta = beta self.gamma = gamma if node_rank_func is None: self.node_rank_func = node_rank_func else: try: self.node_rank_func = node_rank_func if callable(node_rank_func) else AVALIBLE_NODE_AGG_FUNC[node_rank_func] except KeyError: raise KeyError(f'if not callable, node_rank_func should be one of {list(AVALIBLE_NODE_AGG_FUNC)}, not {node_rank_func}') if node_data_rank_func is None: self.node_data_rank_func = node_data_rank_func else: try: self.node_data_rank_func = node_data_rank_func if callable(node_data_rank_func) else AVALIBLE_DATAPOINT_WEIGHT_FUNC[node_data_rank_func] except KeyError: raise KeyError(f'if not callable, node_rank_func should be one of {list(AVALIBLE_DATAPOINT_WEIGHT_FUNC)}, not {node_data_rank_func}') if not entropy_estimator_sampler is None: assert hasattr(entropy_estimator_sampler, 'sample'), f'entropy_estimator_sampler should implement `sample` method' self.entropy_estimator_sampler = entropy_estimator_sampler else: self.entropy_estimator_sampler = entropy_estimator_sampler return def __repr__(self): return self.__class__.__name__ def fit(self, X, y = None, sample_weight = None, **fit_kws): #fix y shape if len(y.shape) == 1: y = y.reshape(-1,1) try: self.estimator.fit(X, y, sample_weight = sample_weight, **fit_kws) except TypeError: self.estimator.fit(X, y, **fit_kws) if self.entropy_estimator_sampler is None: self._fit_leaf_node_matrix( X, y, node_rank_func = self.node_rank_func, node_data_rank_func = self.node_data_rank_func)# <- MAKE NODE WIEGHTED VERSION else: self._fit_entropy_estimator_sampler(X, y) self.y_ = y return self def density(self, X, dist = 'kde', sample_size = 1000, weight_func = None, n_neighbors = None, lower_bound = None, alpha = None, beta = None, gamma = None, noise_factor = 1e-7, **dist_kwargs): n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters( n_neighbors, lower_bound, alpha, beta, gamma) return super()._density(X, dist, sample_size, weight_func, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor, **dist_kwargs) def sample(self, X, sample_size = 1000, weight_func = None, n_neighbors = None, lower_bound = None, alpha = None, beta = None, gamma = None, noise_factor = 0): '''wieghts should be callable (recieves array returns array of same shape) or None''' n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters( n_neighbors, lower_bound, alpha, beta, gamma) if self.entropy_estimator_sampler is None: samples = super()._similarity_sample( X = X, sample_size = sample_size, weights = weight_func, n_neighbors = n_neighbors, lower_bound = lower_bound, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor ) else: samples = super()._entropy_estimator_sample(X, sample_size, weight_func,alpha, noise_factor) return samples def custom_predict( self, X, agg_func, sample_size = 1000, weights = None, n_neighbors = None, lower_bound = None, alpha = None, beta = None, gamma = None, noise_factor = 0 ): n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters(n_neighbors, lower_bound, alpha, beta, gamma) return self._custom_predict(X, agg_func, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor) def sample_histogram(self, X, weights, n_neighbors, lower_bound, alpha, beta, gamma): n_neighbors, lower_bound, alpha, beta, gamma = self._handle_similarity_sample_parameters(n_neighbors, lower_bound, alpha, beta, gamma) return self._similarity_empirical_pdf(X, weights, n_neighbors, lower_bound, alpha, beta, gamma) def _handle_similarity_sample_parameters(self, n_neighbors, lower_bound, alpha, beta, gamma): if n_neighbors is None: n_neighbors = self.n_neighbors if lower_bound is None: lower_bound = self.lower_bound if alpha is None: alpha = self.alpha if beta is None: beta = self.beta if gamma is None: gamma = self.gamma return n_neighbors, lower_bound, alpha, beta, gamma ###Output _____no_output_____ ###Markdown Usage ExampleWe can see that forest estimators are better in dealing with bimodal data ###Code estimator1 = ensemble.RandomForestRegressor(n_estimators = 10,min_samples_leaf = 5, warm_start = False) #estimator = ensemble.ExtraTreesRegressor(n_estimators = 10,min_samples_leaf = 10, warm_start = False) density_estimator1 = KernelTreeEstimator( estimator1,node_rank_func = None, node_data_rank_func = None).fit(X_train,y_train[:,:]) i = np.random.choice(np.arange(y_test.shape[0])) alpha, beta, gamma = 1,1,0 noise_factor = 1 samples = density_estimator1.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta,gamma = gamma, noise_factor = noise_factor) prediction = density_estimator1.custom_predict( X_test[i:i+1], agg_func = lambda x: np.mean(x,axis = 0), alpha = alpha, beta = beta, gamma = gamma, noise_factor = 1) naive_prediction = density_estimator1.predict(X_test[i:i+1]) if (len(samples.shape) > 1) and (samples.shape[-1] == 2): jntplot = sns.jointplot(samples[0,:,0], samples[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.1}) jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution') jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value') jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value') jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value') jntplot.ax_joint.legend() else: sns.distplot(samples, kde = True, bins = 20, hist_kws = {'label':'Model Samples'}) dst = sns.distplot(y_test, kde = True, bins = 20, hist_kws = {'label':'Target Distribution'}) dst._axes.axvline(y_test[i,1], color = 'r') dst._axes.legend() alpha, beta, gamma = 1,1,1 samples1 = density_estimator1.sample(X_test, sample_size = 70, alpha = alpha, beta = beta, gamma = gamma) kde_entropy(quantile(y_test,samples1)[:,0,:]), bimodal_variance(samples1).mean() alpha, beta, gamma = 1,1,1 samples1 = density_estimator1.sample(X_test, sample_size = 70, alpha = alpha, beta = beta, gamma = gamma) sns.jointplot(*quantile(y_test, samples1)[:,0,:].T) ###Output Querying 30 nearest neighbors, this can take a while... ###Markdown KernelTreeHistogramEstimator- appart from similarity factor $\alpha$, include node relevance factor $\beta$ in order to have $(\frac{NodeAggMetric}{NodeAggMetric_{max}})^\beta$ as node multipliers in the node - data adjacency matrix. NodeAggMetric could be variance, entropy, or user dfined metric, such as likelihood to some distribution.then sample according to $CosSim(A,B)^\alpha$May include KDE sampling for nodes, depending on ammount of nodesmake this framework default for every DensityTree (NaiveTree is a special case for alpha = 0 and beta = 0) ###Code # export class KernelTreeHistogramEstimator(KernelTreeEstimator): ''' An ensemble that learn representitons of data turning target into bins ''' def __init__(self, estimator, entropy_estimator_sampler=None, resolution='auto', cumulative_target = False, class_weight=None, alpha=1, beta=1, gamma=1, node_rank_func=None, node_data_rank_func=None, n_neighbors=30, lower_bound=0.0): assert hasattr(estimator, 'predict_proba') or 'predict_proba' in dir( estimator), 'estimator should implement `predict_proba` method' super().__init__(estimator, entropy_estimator_sampler, alpha, beta, gamma, node_rank_func, node_data_rank_func, n_neighbors, lower_bound) self.cumulative_target = cumulative_target self.class_weight = class_weight self.resolution = resolution def _q_transformer_fit(self, y): ''' fits self.q_transformer ''' if len(y.shape) == 1: y = _fix_X_1d(y) if type(self.resolution) == str: self.bin_edges = [np.histogram_bin_edges( col, bins=self.resolution) for col in y.T] print( f'base classifier will be trained with {[len(i) for i in self.bin_edges]} classes') return self.bin_edges elif type(self.resolution) == np.ndarray: self.bin_edges = [self.resolution for col in y.T] elif type(self.resolution) == int: self.q_transformer = QuantileTransformer( n_quantiles=self.resolution) self._q_minmax_scaler = MinMaxScaler() y = self.q_transformer.fit_transform(y) # for case when output_distribution != uniform self._q_minmax_scaler.fit(y) return self.q_transformer elif isinstance(self.resolution, list): assert len(self.resolution) == y.shape[-1], f'len of resolution list should be equal n_dims of y. got {len(self.resolution)} and {y.shape[-1]}' return self.resolution else: raise TypeError( f'self.resolution should be np.array of bin edges, str or int, got {self.resolution.__class__}') def _q_transformer_transform(self, y): ''' maps floats to int (bin_id in histogram) ''' if len(y.shape) == 1: y = _fix_X_1d(y) if type(self.resolution) in (str, np.ndarray): y_transformed = [np.digitize( y[:, i:i+1], self.bin_edges[i]) for i in range(y.shape[-1])] y_transformed = np.hstack(y_transformed) max_bin = [len(edges) for edges in self.bin_edges] elif type(self.resolution) == int: y_transformed = self.q_transformer.transform(y) # scale between 0 and 1 y_transformed = self._q_minmax_scaler.transform(y_transformed) y_transformed = np.around( y_transformed*(self.resolution - 1), decimals=0).astype(int) max_bin = [self.resolution for _ in range(y.shape[-1])] elif isinstance(self.resolution, list): y_transformed = [np.digitize( y[:, i:i+1], self.resolution[i]) for i in range(y.shape[-1])] y_transformed = np.hstack(y_transformed) max_bin = [len(resolution) if isinstance(resolution, (list, np.ndarray)) else resolution for resolution in self.resolution] else: raise TypeError( f'self.resolution should be np.array of bin edges, str or int, got {self.resolution.__class__}') if self.cumulative_target: #make cumulative vector y_transformed_list = [] for i in range(y_transformed.shape[-1]): y_transformed_i = np.zeros((y_transformed.shape[0],max_bin[i]), dtype = 'int8') for idx in range(len(y_transformed_i)): bin_idx = int(y_transformed[idx, i]) y_transformed_i[i, :bin_idx] = 1 y_transformed_list.append(y_transformed[:,:-1]) #dropa last percentile to avoid all zeros y_transformed = np.hstack(y_transformed_list) return y_transformed def _q_transformer_inverse_transform(self, y): ''' maps from bin_id in histogram (int) to float. beware that during transform, information is lost due to downsampling, so inverse_transform will not be an exact inverse_transform. ''' if len(y.shape) == 1: y = _fix_X_1d(y) if type(self.resolution) == int: y_transformed = (y/(self.resolution - 1)).astype(float) y_transformed = self._q_minmax_scaler.inverse_transform( y_transformed) # 1d asserted already return self.q_transformer.inverse_transform(y_transformed).flatten() else: raise NotImplementedError( 'inverse transform only implemented for case when self.resolution == int') def _preprocess_y(self, y): if len(y.shape) == 1: y = y.reshape(-1, 1) # make uniform quantile bins self._q_transformer_fit(y) y = self._q_transformer_transform(y) return y def _handle_sample_weight(self, sample_weight, y, sample_alpha): if self.class_weight == 'balanced': class_weight = compute_sample_weight(class_weight='balanced', y=y) if not sample_weight is None: sample_weight = sample_weight*class_weight**sample_alpha else: sample_weight = class_weight**sample_alpha return sample_weight def fit(self, X, y=None, y_prep=None, sample_weight=None, sample_alpha=1, **fit_kws): # digitize y if y_prep is None: y_prep = self._preprocess_y(y) else: assert y_prep.shape[0] == y.shape[0], f'y_prep and y should have same shape. got {y_prep.shape[0]} and {y.shape[0]}' sample_weight = self._handle_sample_weight( sample_weight, y_prep, sample_alpha) # fit base estimator try: self.estimator.fit( X, y_prep, sample_weight=sample_weight, **fit_kws) except TypeError: self.estimator.fit(X, y_prep, **fit_kws) # save y continuous values self.y_ = y # fit leaf node matrix with tree nodes and its respective continuous values (y) if self.entropy_estimator_sampler is None: self._fit_leaf_node_matrix( X, y, node_rank_func=self.node_rank_func, node_data_rank_func=self.node_data_rank_func) # <- MAKE NODE WIEGHTED VERSION else: self._fit_entropy_estimator_sampler(X, y) return self def predict_proba(self, X): ''' handling multilabel output ''' probas = self.estimator.predict_proba(X) if self.cumulative_target: probas = np.hstack([i for i in probas]) return probas ###Output _____no_output_____ ###Markdown Usage Example ###Code #estimator2 = ensemble.GradientBoostingClassifier(n_estimators = 4,learning_rate = 0.05,min_samples_leaf = 5, subsample = 0.4, max_features = 0.4, verbose = 2,) estimator2 = ensemble.RandomForestClassifier(n_jobs = -1,n_estimators = 10,min_samples_leaf = 20, warm_start = True, criterion = 'entropy') density_estimator2 = KernelTreeHistogramEstimator( estimator2,resolution = 'auto', cumulative_target = True, class_weight = 'balanced') density_estimator2.fit(X_train,y_train[:,:]) i = np.random.choice(np.arange(y_test.shape[0])) alpha, beta, gamma = 1,0,0 noise_factor = 1 sample_size = 700 n_neighbors = 100 samples = density_estimator2.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta, gamma = gamma,n_neighbors = n_neighbors, noise_factor = noise_factor) #samples = density_estimator2.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta, gamma = gamma) prediction = density_estimator2.custom_predict(X_test[i:i+1],agg_func = lambda x: np.mean(x, axis = 0) ,alpha = alpha, beta = beta, gamma = gamma) #naive_prediction = density_estimator.estimator.predict(X_test[i:i+1]) if (len(samples.shape) > 1) and (samples.shape[-1] == 2): jntplot = sns.jointplot(samples[0,:,0], samples[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.01}) jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution') jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value') jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Predicted Value') #jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value') jntplot.ax_joint.legend() else: sns.distplot(samples, kde = True, bins = 20, hist_kws = {'label':'Model Samples'}) dst = sns.distplot(y_test, kde = True, bins = 20, hist_kws = {'label':'Target Distribution'}) dst._axes.axvline(y_test[i,1], color = 'r') dst._axes.legend() alpha, beta, gamma = 1,2,0 noise_factor = 0.2 #KernelTreeEstimator samples1 = density_estimator1.sample(X_test, sample_size = 700, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor) #KernelTreeHistogramEstimator samples2 = density_estimator2.sample(X_test, sample_size = 700, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor) ###Output Querying 30 nearest neighbors, this can take a while... ###Markdown We can plot the entropies of the distributions and the negative log likelihood of the generated KDE against de actual y_test value ###Code ll1 = np.log2(kde_likelihood(y_test,samples1, frac = 0.2)) ll2 = np.log2(kde_likelihood(y_test,samples2, frac = 0.2)) print(np.median(ll1[ll1 > -10]), np.median(ll2[ll2 > -10])) sns.distplot(ll1[ll1 > -10]) sns.distplot(ll2[ll2 > -10]) entr1 = kde_entropy(samples1, sample_size = 200 , frac = 0.2) entr2 = kde_entropy(samples2, sample_size = 200 , frac = 0.2) print(entr1.mean(),entr2.mean()) sns.distplot(entr1) sns.distplot(entr2) #we want the likelihood of our datapoints to be higher than the average likelihood of the distribution # so we convert entropy and negative log likelihood to likelihoods and divide the point likelihood by the average dist likeliihood t1 = 2**ll1/2**-entr1 t2 = 2**ll2/2**-entr2 sns.distplot(t1[t1 < 10], hist_kws = dict(cumulative = True)) sns.distplot(t2[t2 < 10], hist_kws = dict(cumulative = True)) np.mean(t1[t1 < 1000]),np.mean(t2[t2 < 1000]) q2 = quantile(y_test,samples2) q1 = quantile(y_test,samples1) sns.jointplot(q1[:,0,0],q1[:,0,1]) f'{round(((q1 == 0).mean()+(q1 == 1).mean())*100,2)}% of data points out of sugested boundaries' sns.jointplot(q2[:,0,0],q2[:,0,1]) f'{round(((q2 == 0).mean()+(q2 == 1).mean())*100,2)}% of data points out of sugested boundaries' ###Output _____no_output_____ ###Markdown We can check the estimated KDE of the samples of each model against the y_test(target) distribution and the actual y_test value ###Code i+=1 kde = KDE().fit(samples1[i]) kde_cloud = kde.sample(100) jnt = sns.jointplot(kde_cloud[:,0],kde_cloud[:,1], kind = 'kde') jnt.ax_joint.scatter(density_estimator2.y_[:,0], density_estimator2.y_[:,1], color = 'r', alpha = 0.1) jnt.ax_joint.scatter(samples1[i,:,0], samples1[i,:,1], alpha = 0.05) jnt.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'yellow') kde = KDE().fit(samples2[i]) kde_cloud = kde.sample(100) jnt = sns.jointplot(kde_cloud[:,0],kde_cloud[:,1], kind = 'kde') jnt.ax_joint.scatter(density_estimator2.y_[:,0], density_estimator2.y_[:,1], color = 'r', alpha = 0.1) jnt.ax_joint.scatter(samples2[i,:,0], samples2[i,:,1], alpha = 0.05) jnt.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'yellow') ###Output _____no_output_____ ###Markdown CDFEstimator> tries to classify values as lower than some bin (maybe percentile bins?) and higher than some bin as a multilabel setting. the distribution is estimated querying the probability vectors.this might address the problem of disjoint bins in naive histogram setting ###Code class CDFEstimator(HistogramEstimator): def _q_transformer_transform(self, y): ''' maps floats to vector of binary variables (bin in cdf) ''' y = _fix_X_1d(y) if type(self.resolution) in (str, np.ndarray): hist_bins = np.digitize(y, self.bin_edges) max_bin = len(self.bin_edges) elif type(self.resolution) == int: hist_bins = self.q_transformer.transform(y) #scale between 0 and 1 hist_bins = self._q_minmax_scaler.transform(y_transformed) hist_bins = np.around(y_transformed*(self.resolution - 1), decimals = 0).astype(int) max_bin = self.resolution elif isinstance(self.resolution,np.ndarray): hist_bins = np.digitize(y, self.resolution) max_bin = self.resolution y_transformed = np.zeros((y.shape[0],max_bin), dtype = 'int8') for i in range(len(y_transformed)): bin_idx = int(hist_bins[i] + 1) y_transformed[i, :bin_idx] = 1 print(y_transformed.mean(axis = 0)) return y_transformed def fit(self, X, y = None, **estimator_fit_kws): #fit y transformer self._preprocess_y_fit(y) #transform y y_transformed = self._preprocess_y_transform(y) #fit calibrated classifier if not self.calibrated_classifier is None: self.calibrated_classifier.fit(X = X, y = y_transformed, **estimator_fit_kws) self.estimator = self.calibrated_classifier.calibrated_classifiers_[0].base_estimator else: #fit classifier print('fitting estimator') self.estimator.fit(X = X, y = y_transformed, **estimator_fit_kws) return self ###Output _____no_output_____ ###Markdown Joint and Chained Estimators ###Code # export class JointHistogramEstimator(MultiOutputClassifier): ''' Performs a joint entropy estimation based on stacked model of marginal distribution estimators. All the marginal distributions are merged using a KernelTreeEntropyEstimator, that nativelly supports joint estimation ''' def __init__(self, estimator, resolution = 'auto', joint_tree_estimator=None, stacking_method='auto', prefit = False, n_jobs=None, **joint_tree_kwargs): # make estimator iterable if not estimator.__class__ in (list,tuple,set): estimator = [estimator] else: estimator = list(estimator) #check if estimator is valid for estim in estimator: assert hasattr( estim, 'predict_proba') or ('predict_proba' in dir(estim)), f'Estimator {estim} should have `predict_proba` method' #instantiate MultiOutputClassifier super().__init__(estimator[0], n_jobs) #save fitted estimators if prefit if prefit: self.estimators_ = estimator #set joint_tree_estimator as default if joint_tree_estimator is None: rf = ensemble.RandomForestClassifier( n_estimators=100, max_leaf_nodes = 10000, n_jobs = -1) self.joint_tree_estimator = KernelTreeHistogramEstimator( rf, resolution=resolution, **joint_tree_kwargs) else: self.joint_tree_estimator = KernelTreeHistogramEstimator( joint_tree_estimator, resolution=resolution, **joint_tree_kwargs) self.prefit = prefit self.stacking_method = stacking_method self.resolution = resolution return def _make_stacked_predictors(self, X, stacking_method): if stacking_method == 'auto': attr_hierarchy = ( 'predict_proba', 'decision_function', 'predict', 'transform') predictors = [] for estim in self.estimators_: passed = False for attr in attr_hierarchy: if hasattr(estim, attr): predictors.append(getattr(estim, attr)(X)) passed = True break if passed == False: raise AttributeError( f'{estim} does not have any of these methods: {attr_hierarchy}') else: predictors = [getattr(estim, stacking_method)(X) for estim in self.estimators_] return np.hstack(predictors) def fit(self, X, y=None, sample_weight=None): y_prep = self.joint_tree_estimator._preprocess_y(y) if not self.prefit: super().fit(X, y_prep, sample_weight) marginal_results = self._make_stacked_predictors( X, self.stacking_method) self.joint_tree_estimator.fit(marginal_results, y, y_prep = y_prep) return self def sample(self, X, sample_size=10, weight_func=None, n_neighbors=None, lower_bound=None, alpha=None, beta=None, gamma=None, noise_factor=0,): marginal_results = self._make_stacked_predictors( X, self.stacking_method) return self.joint_tree_estimator.sample(marginal_results, sample_size, weight_func, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor,) def density(self, X, dist='kde', sample_size=10, weight_func=None, n_neighbors=None, lower_bound=None, alpha=None, beta=None, gamma=None, noise_factor=1e-07, **dist_kwargs,): marginal_results = self._make_stacked_predictors( X, self.stacking_method) return self.joint_tree_estimator.density(marginal_results, dist, sample_size, weight_func, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor, **dist_kwargs,) def custom_predict(self, X, agg_func, sample_size=100, weights=None, n_neighbors=None, lower_bound=None, alpha=None, beta=None, gamma=None, noise_factor=0,): marginal_results = self._make_stacked_predictors( X, self.stacking_method) return self.joint_tree_estimator.custom_predict(marginal_results, agg_func, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor,) class ChainedHistogramEstimator(MultiOutputClassifier): ''' make chained joint estimator based on previous estimations there are three ways to pass predictors to next estimator in the chain: pass only predictions from previous estimator based on stacking_method pass predictions from all previous estimators based on stacking_method pass predictions from all previous estimators based on stacking_method and also the features used in all estimators ''' def __init__(self): raise NotImplementedError('ChainedHistogramEstimator is not implemented yet') class JointKernelTreeEstimator(MultiOutputClassifier): '''Custom multioutput for multioutput estimator for `KernelTreeEstimator`s''' @property def y_(self,): '''stacked y_ attributes of each estimator (one for each dim)''' return np.hstack([_fix_X_1d(estim.y_) for estim in self.estimators_]) def _similarity_sample_idx(self, X, sample_size=100, weights=None, n_neighbors=10, lower_bound=0.0, alpha=1, beta=0, gamma=0): sampled_idxs = np.hstack([ _fix_X_1d(estim._similarity_sample_idx(X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma) ) for estim in self.estimators_ ]) return sampled_idxs def sample(self, X, sample_size=100, weights=None, n_neighbors=10, lower_bound=0.0, alpha=1, beta=0, gamma=0, noise_factor=0): idxs = self._similarity_sample_idx( X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma) samples = self.y_[[idx for idx in idxs]] # fix ndim if sampling for a single value (1, n_samples, n_dims) instead of (n_samples, n_dims) samples = samples if len( samples.shape) != 2 else _add_n_dists_axis(samples) # samples will have n_dims*sample_size samples, resample with no replacement to match sample_size samples = sample_from_dist_array( samples, sample_size=sample_size, weights=None, replace=False) # define noise to be added noise = agg_smallest_distance(samples, agg_func=np.std) noise = _add_n_samples_axis(noise) print(noise.shape, samples.shape) return add_noise(samples, noise_factor*noise) def custom_predict(self, X, agg_func, sample_size=100, weights=None, n_neighbors=10, lower_bound=0.0, alpha=1, beta=0, gamma=0, noise_factor=0): samples = self.sample(X, sample_size, weights, n_neighbors, lower_bound, alpha, beta, gamma, noise_factor) return np.array([agg_func(sample) for sample in samples]) ###Output _____no_output_____ ###Markdown Usage Example ###Code #base_estim = LogisticRegression(tol = 0.001, solver = 'sag') joint_estim = JointHistogramEstimator(estim1, resolution = 20, class_weight = 'balanced', prefit = True, cumulative_target = True, n_jobs = -1) joint_estim.fit(X_train, y_train[:,:]) i = np.random.choice(np.arange(y_test.shape[0])) alpha, beta, gamma = 1,0,0 noise_factor = 0 sample_size = 300 n_neighbors = 100 density = joint_estim.density(X_test[i:i+1], dist = 'empirical',sample_size = sample_size, n_neighbors= n_neighbors, alpha = alpha, beta = beta, gamma = gamma, noise_factor = noise_factor) samples = density.sample(sample_size) #samples = density_estimator2.sample(X_test[i:i+1], sample_size = 700, alpha = alpha, beta = beta, gamma = gamma) prediction = np.median(samples, axis = 1) #naive_prediction = density_estimator.estimator.predict(X_test[i:i+1]) if (len(samples.shape) > 1) and (samples.shape[-1] == 2): jntplot = sns.jointplot(samples[0,:,0], samples[0,:,1], joint_kws = {'label':'Model Samples', 'alpha':0.05}) jntplot.ax_joint.scatter(y[:,0], y[:,1], color = 'orange', alpha = 0.01, label = 'Target Distribution') jntplot.ax_joint.scatter(y_test[i,0], y_test[i,1], color = 'red', label = 'Target Value') jntplot.ax_joint.scatter(prediction[0,0], prediction[0,1], color = 'yellow', label = 'Dist Median') #jntplot.ax_joint.scatter(naive_prediction[0,0], naive_prediction[0,1], color = 'cyan', label = 'Naive Predicted Value') jntplot.ax_joint.legend() else: sns.distplot(samples, kde = True, bins = 20, hist_kws = {'label':'Model Samples'}) dst = sns.distplot(y_test, kde = True, bins = 20, hist_kws = {'label':'Target Distribution'}) dst._axes.axvline(y_test[i,1], color = 'r') dst._axes.legend() ###Output Querying 100 nearest neighbors, this can take a while... ###Markdown Export - ###Code #hide from nbdev.export import notebook2script notebook2script() ###Output Converted 01_ensemble.ipynb. Converted 02_core.random_variable.ipynb. Converted 03_utils.ipynb. Converted 04_metrics.ipynb. Converted 05_neighbors.ipynb. Converted 06_kde_baesyan_nets.ipynb. Converted index.ipynb.
docs/user_guide/extending/extending_elementwise_expr.ipynb
###Markdown Adding an Elementwise Operation This notebook will show you how to add a new elementwise operation to an existing backend.We are going to add `julianday`, a function supported by the SQLite database, to the SQLite Ibis backend.The Julian day of a date, is the number of days since January 1st, 4713 BC. For more information check the [Julian day](https://en.wikipedia.org/wiki/Julian_day) wikipedia page. Step 1: Define the Operation Let's define the `julianday` operation as a function that takes one string input argument and returns a float.```pythondef julianday(date: str) -> float: """Julian date"""``` ###Code import ibis.expr.datatypes as dt import ibis.expr.rules as rlz from ibis.expr.operations import ValueOp class JulianDay(ValueOp): arg = rlz.string output_type = rlz.shape_like('arg', 'float') ###Output _____no_output_____ ###Markdown We just defined a `JulianDay` class that takes one argument of type string or binary, and returns a float. Step 2: Define the API Because we know the output type of the operation, to make an expression out of ``JulianDay`` we simply need to construct it and call its `ibis.expr.types.Node.to_expr` method.We still need to add a method to `StringValue` and `BinaryValue` (this needs to work on both scalars and columns).When you add a method to any of the expression classes whose name matches `*Value` both the scalar and column child classes will pick it up, making it easy to define operations for both scalars and columns in one place.We can do this by defining a function and assigning it to the appropriate classof expressions. ###Code from ibis.expr.types import StringValue, BinaryValue def julianday(string_value): return JulianDay(string_value).to_expr() StringValue.julianday = julianday ###Output _____no_output_____ ###Markdown Interlude: Create some expressions with `sha1` ###Code import ibis t = ibis.table([('string_col', 'string')], name='t') t.string_col.julianday() ###Output _____no_output_____ ###Markdown Step 3: Turn the Expression into SQL ###Code import sqlalchemy as sa @ibis.sqlite.add_operation(JulianDay) def _julianday(translator, expr): # pull out the arguments to the expression arg, = expr.op().args # compile the argument compiled_arg = translator.translate(arg) # return a SQLAlchemy expression that calls into the SQLite julianday function return sa.func.julianday(compiled_arg) ###Output _____no_output_____ ###Markdown Step 4: Putting it all Together ###Code !curl -LsS -o $TEMPDIR/geography.db 'https://storage.googleapis.com/ibis-tutorial-data/geography.db' import os import tempfile import ibis db_fname = os.path.join(tempfile.gettempdir(), 'geography.db') con = ibis.sqlite.connect(db_fname) ###Output _____no_output_____ ###Markdown Create and execute a `julianday` expression ###Code independence = con.table('independence') independence day = independence.independence_date.cast('string') day julianday_expr = day.julianday() julianday_expr sql_expr = julianday_expr.compile() print(sql_expr) result = julianday_expr.execute() result.head() ###Output _____no_output_____ ###Markdown Because we've defined our operation on `StringValue`, and not just on `StringColumn` we get operations on both string scalars *and* string columns for free ###Code scalar = ibis.literal('2010-03-14') scalar julianday_scalar = scalar.julianday() con.execute(julianday_scalar) ###Output _____no_output_____
_notebooks/2020-04-07-Tutorial_MNIST_Data_Aug.ipynb
###Markdown "MNIST - Data Augmentation Gone Wrong"> What happens to MNIST accuracy when input data is horizontally flipped.- toc: false- branch: master- badges: true- comments: true- categories: [fastpages, jupyter]- image: images/some_folder/your_image.png 1) Import libraries, and setup file paths ###Code #collapse-hide from fastai2.vision.all import * from utils import * path = untar_data(URLs.MNIST) train_dir = path/'training' #val_dir = path/'testing' fns_train = get_image_files(train_dir) #fns_val = get_image_files(val_dir) print('train files: ', len(fns_train)) #print('val files: ', len(fns_val)) ###Output train files: 60000 ###Markdown 2) Setup two dataloaders: baseline, horizontal flip ###Code batch_tfms = [Flip(p=1)] # horizontal flip db = DataBlock( blocks = (ImageBlock, CategoryBlock), get_items = get_image_files, splitter = RandomSplitter(valid_pct=0.2, seed=42), get_y = parent_label, batch_tfms = None ) db_flip = DataBlock( blocks = (ImageBlock, CategoryBlock), get_items = get_image_files, splitter = RandomSplitter(valid_pct=0.2, seed=42), get_y = parent_label, batch_tfms = batch_tfms ) dls = db.dataloaders(train_dir, bs=256) dls_flip = db_flip.dataloaders(train_dir, bs=256) ###Output _____no_output_____ ###Markdown 3) Check each dataloader is working ###Code dls.show_batch(ncols=5,nrows=1) dls_flip.show_batch(ncols=5,nrows=1) ###Output _____no_output_____ ###Markdown 4) Train resnet18 on baseline, and check accuracy ###Code learn = cnn_learner(dls, resnet18, pretrained=False, metrics=accuracy) lr_min = learn.lr_find()[0] f'lr_min: {lr_min:0.05f}' # no horizontal flip learn.fit_one_cycle(5, lr_min) ###Output _____no_output_____ ###Markdown - With baseline MNIST, resnet18 is getting 99% accuracy- Note: train_loss and valid_loss are both low 5) Train new resnet18 on horizontally flipped dataset ###Code learn = cnn_learner(dls_flip, resnet18, pretrained=False, metrics=accuracy) lr_min = learn.lr_find()[0] f'lr_min: {lr_min:0.05f}' # yes horizontal flip learn.fit_one_cycle(5, lr_min) ###Output _____no_output_____ ###Markdown - With horizontally flipped numbers, accuracy dropped to ~41%- Note, train_loss is a lot lower than valid_loss -> overfitting 6) What happened? ###Code interp = ClassificationInterpretation.from_learner(learn) interp.plot_top_losses(4, nrows=1) ###Output _____no_output_____ ###Markdown - Model is predicting a 5, when seeing a 2 ###Code interp.plot_confusion_matrix() ###Output _____no_output_____ ###Markdown - Model is predicting 0, 1, 4, and 8 correctly -> 40% accuracy- Model confuses 5 for 2 | 6 for 2 | 3 for 8 | 9 for 8- Does this make sense? ###Code interp.most_confused()[:5] # top number is actual, bottom number is prediction learn.show_results(max_n=12) ###Output _____no_output_____
ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_tabular.ipynb
###Markdown Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages ###Code import sys if "google.colab" in sys.modules: USER_FLAG = "" else: USER_FLAG = "--user" !pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade ###Output _____no_output_____ ###Markdown Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages. ###Code # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ###Output _____no_output_____ ###Markdown Check the versions of the packages you installed. The KFP SDK version should be >=1.6. ###Code !python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" ###Output _____no_output_____ ###Markdown Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the AI Platform Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`. ###Code import os PROJECT_ID = "" # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) ###Output _____no_output_____ ###Markdown Otherwise, set your project ID here. ###Code if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"} ###Output _____no_output_____ ###Markdown TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. ###Code from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ###Output _____no_output_____ ###Markdown Authenticate your Google Cloud account**If you are using AI Platform Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"into the filter box, and select **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ###Code import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ###Output _____no_output_____ ###Markdown Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform. ###Code BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} REGION = "us-central1" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ###Output _____no_output_____ ###Markdown **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ###Code ! gsutil mb -l $REGION $BUCKET_NAME ###Output _____no_output_____ ###Markdown Finally, validate access to your Cloud Storage bucket by examining its contents: ###Code ! gsutil ls -al $BUCKET_NAME ###Output _____no_output_____ ###Markdown Import libraries and define constants Define some constants. ###Code PATH=%env PATH %env PATH={PATH}:/home/jupyter/.local/bin USER = "your-user-name" # <---CHANGE THIS PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER) PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Do some imports: ###Code import kfp from google_cloud_pipeline_components import aiplatform as gcc_aip from kfp.v2 import compiler from kfp.v2.google.client import AIPlatformClient ###Output _____no_output_____ ###Markdown Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training. ###Code TRAIN_FILE_NAME = "california_housing_train.csv" !gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/ gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}" ###Output _____no_output_____ ###Markdown Define the pipeline: ###Code @kfp.dsl.pipeline(name="automl-tab-training-v2") def pipeline(): dataset_create_op = gcc_aip.TabularDatasetCreateOp( project=PROJECT_ID, display_name="housing", gcs_source=gcs_csv_path ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=PROJECT_ID, display_name="train-housing-automl_1", optimization_prediction_type="regression", optimization_objective="minimize-rmse", column_transformations=[ {"numeric": {"column_name": "longitude"}}, {"numeric": {"column_name": "latitude"}}, {"numeric": {"column_name": "housing_median_age"}}, {"numeric": {"column_name": "total_rooms"}}, {"numeric": {"column_name": "total_bedrooms"}}, {"numeric": {"column_name": "population"}}, {"numeric": {"column_name": "households"}}, {"numeric": {"column_name": "median_income"}}, ], dataset=dataset_create_op.outputs["dataset"], target_column="longitude", ) deploy_op = gcc_aip.ModelDeployOp( # noqa: F841 model=training_op.outputs["model"], project=PROJECT_ID, machine_type="n1-standard-4", ) ###Output _____no_output_____ ###Markdown Compile and run the pipelineNow, you're ready to compile the pipeline: ###Code from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tab_regression_pipeline.json" ) ###Output _____no_output_____ ###Markdown The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object: ###Code from kfp.v2.google.client import AIPlatformClient # noqa: F811 api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION) ###Output _____no_output_____ ###Markdown Then, you run the defined pipeline like this: ###Code response = api_client.create_run_from_job_spec( "tab_regression_pipeline.json", pipeline_root=PIPELINE_ROOT ) ###Output _____no_output_____ ###Markdown Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint. ###Code # Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path. # ! gsutil -m rm -r $PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages ###Code import sys if "google.colab" in sys.modules: USER_FLAG = "" else: USER_FLAG = "--user" !python3 -m pip install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade ###Output _____no_output_____ ###Markdown Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages. ###Code # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ###Output _____no_output_____ ###Markdown Check the versions of the packages you installed. The KFP SDK version should be >=1.6. ###Code !python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" ###Output _____no_output_____ ###Markdown Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the AI Platform Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`. ###Code import os PROJECT_ID = "" # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) ###Output _____no_output_____ ###Markdown Otherwise, set your project ID here. ###Code if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"} ###Output _____no_output_____ ###Markdown TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. ###Code from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ###Output _____no_output_____ ###Markdown Authenticate your Google Cloud account**If you are using AI Platform Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"into the filter box, and select **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ###Code import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ###Output _____no_output_____ ###Markdown Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform. ###Code BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} REGION = "us-central1" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ###Output _____no_output_____ ###Markdown **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ###Code ! gsutil mb -l $REGION $BUCKET_NAME ###Output _____no_output_____ ###Markdown Finally, validate access to your Cloud Storage bucket by examining its contents: ###Code ! gsutil ls -al $BUCKET_NAME ###Output _____no_output_____ ###Markdown Import libraries and define constants Define some constants. ###Code PATH=%env PATH %env PATH={PATH}:/home/jupyter/.local/bin USER = "your-user-name" # <---CHANGE THIS PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER) PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Do some imports: ###Code import kfp from google_cloud_pipeline_components import aiplatform as gcc_aip from kfp.v2 import compiler from kfp.v2.google.client import AIPlatformClient ###Output _____no_output_____ ###Markdown Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training. ###Code TRAIN_FILE_NAME = "california_housing_train.csv" !gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/ gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}" ###Output _____no_output_____ ###Markdown Define the pipeline: ###Code @kfp.dsl.pipeline(name="automl-tab-training-v2") def pipeline(): dataset_create_op = gcc_aip.TabularDatasetCreateOp( project=PROJECT_ID, display_name="housing", gcs_source=gcs_csv_path ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=PROJECT_ID, display_name="train-housing-automl_1", optimization_prediction_type="regression", optimization_objective="minimize-rmse", column_transformations=[ {"numeric": {"column_name": "longitude"}}, {"numeric": {"column_name": "latitude"}}, {"numeric": {"column_name": "housing_median_age"}}, {"numeric": {"column_name": "total_rooms"}}, {"numeric": {"column_name": "total_bedrooms"}}, {"numeric": {"column_name": "population"}}, {"numeric": {"column_name": "households"}}, {"numeric": {"column_name": "median_income"}}, ], dataset=dataset_create_op.outputs["dataset"], target_column="longitude", ) deploy_op = gcc_aip.ModelDeployOp( # noqa: F841 model=training_op.outputs["model"], project=PROJECT_ID, machine_type="n1-standard-4", ) ###Output _____no_output_____ ###Markdown Compile and run the pipelineNow, you're ready to compile the pipeline: ###Code from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tab_regression_pipeline.json" ) ###Output _____no_output_____ ###Markdown The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object: ###Code from kfp.v2.google.client import AIPlatformClient # noqa: F811 api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION) ###Output _____no_output_____ ###Markdown Then, you run the defined pipeline like this: ###Code response = api_client.create_run_from_job_spec( "tab_regression_pipeline.json", pipeline_root=PIPELINE_ROOT ) ###Output _____no_output_____ ###Markdown Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint. ###Code # Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path. # ! gsutil -m rm -r $PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages ###Code import os # The Google Cloud Notebook product has specific requirements IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") # Google Cloud Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_GOOGLE_CLOUD_NOTEBOOK: USER_FLAG = "--user" !pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade ###Output _____no_output_____ ###Markdown Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages. ###Code # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ###Output _____no_output_____ ###Markdown Check the versions of the packages you installed. The KFP SDK version should be >=1.6. ###Code !python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" ###Output _____no_output_____ ###Markdown Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI, Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the Vertex Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`. ###Code import os PROJECT_ID = "" # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) ###Output _____no_output_____ ###Markdown Otherwise, set your project ID here. ###Code if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"} ###Output _____no_output_____ ###Markdown TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. ###Code from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ###Output _____no_output_____ ###Markdown Authenticate your Google Cloud account**If you are using Google Cloud Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ###Code import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # The Google Cloud Notebook product has specific requirements IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") # If on Google Cloud Notebooks, then don't execute this code if not IS_GOOGLE_CLOUD_NOTEBOOK: if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ###Output _____no_output_____ ###Markdown Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with Vertex AI. ###Code BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} REGION = "us-central1" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ###Output _____no_output_____ ###Markdown **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ###Code ! gsutil mb -l $REGION $BUCKET_NAME ###Output _____no_output_____ ###Markdown Finally, validate access to your Cloud Storage bucket by examining its contents: ###Code ! gsutil ls -al $BUCKET_NAME ###Output _____no_output_____ ###Markdown Import libraries and define constants Define some constants. ###Code PATH=%env PATH %env PATH={PATH}:/home/jupyter/.local/bin USER = "your-user-name" # <---CHANGE THIS PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER) PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Do some imports: ###Code import kfp from google_cloud_pipeline_components import aiplatform as gcc_aip from kfp.v2 import compiler from kfp.v2.google.client import AIPlatformClient ###Output _____no_output_____ ###Markdown Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training. ###Code TRAIN_FILE_NAME = "california_housing_train.csv" !gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/ gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}" ###Output _____no_output_____ ###Markdown Define the pipeline: ###Code @kfp.dsl.pipeline(name="automl-tab-training-v2") def pipeline(project: str = PROJECT_ID): dataset_create_op = gcc_aip.TabularDatasetCreateOp( project=project, display_name="housing", gcs_source=gcs_csv_path ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=project, display_name="train-housing-automl_1", optimization_prediction_type="regression", optimization_objective="minimize-rmse", column_transformations=[ {"numeric": {"column_name": "longitude"}}, {"numeric": {"column_name": "latitude"}}, {"numeric": {"column_name": "housing_median_age"}}, {"numeric": {"column_name": "total_rooms"}}, {"numeric": {"column_name": "total_bedrooms"}}, {"numeric": {"column_name": "population"}}, {"numeric": {"column_name": "households"}}, {"numeric": {"column_name": "median_income"}}, {"numeric": {"column_name": "median_house_value"}}, ], dataset=dataset_create_op.outputs["dataset"], target_column="median_house_value", ) deploy_op = gcc_aip.ModelDeployOp( # noqa: F841 model=training_op.outputs["model"], project=project, machine_type="n1-standard-4", ) ###Output _____no_output_____ ###Markdown Compile and run the pipelineNow, you're ready to compile the pipeline: ###Code from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tab_regression_pipeline.json" ) ###Output _____no_output_____ ###Markdown The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object: ###Code from kfp.v2.google.client import AIPlatformClient # noqa: F811 api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION) ###Output _____no_output_____ ###Markdown Then, you run the defined pipeline like this: ###Code response = api_client.create_run_from_job_spec( "tab_regression_pipeline.json", pipeline_root=PIPELINE_ROOT, parameter_values={"project": PROJECT_ID}, ) ###Output _____no_output_____ ###Markdown Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint. ###Code # Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path. # ! gsutil -m rm -r $PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages ###Code import sys if "google.colab" in sys.modules: USER_FLAG = "" else: USER_FLAG = "--user" !pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade ###Output _____no_output_____ ###Markdown Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages. ###Code # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ###Output _____no_output_____ ###Markdown Check the versions of the packages you installed. The KFP SDK version should be >=1.6. ###Code !python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" ###Output _____no_output_____ ###Markdown Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the AI Platform Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`. ###Code import os PROJECT_ID = "" # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) ###Output _____no_output_____ ###Markdown Otherwise, set your project ID here. ###Code if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"} ###Output _____no_output_____ ###Markdown TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. ###Code from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ###Output _____no_output_____ ###Markdown Authenticate your Google Cloud account**If you are using AI Platform Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"into the filter box, and select **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ###Code import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ###Output _____no_output_____ ###Markdown Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform. ###Code BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} REGION = "us-central1" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ###Output _____no_output_____ ###Markdown **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ###Code ! gsutil mb -l $REGION $BUCKET_NAME ###Output _____no_output_____ ###Markdown Finally, validate access to your Cloud Storage bucket by examining its contents: ###Code ! gsutil ls -al $BUCKET_NAME ###Output _____no_output_____ ###Markdown Import libraries and define constants Define some constants. ###Code PATH=%env PATH %env PATH={PATH}:/home/jupyter/.local/bin USER = "your-user-name" # <---CHANGE THIS PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER) PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Do some imports: ###Code import kfp from google_cloud_pipeline_components import aiplatform as gcc_aip from kfp.v2 import compiler from kfp.v2.google.client import AIPlatformClient ###Output _____no_output_____ ###Markdown Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training. ###Code TRAIN_FILE_NAME = "california_housing_train.csv" !gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/ gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}" ###Output _____no_output_____ ###Markdown Define the pipeline: ###Code @kfp.dsl.pipeline(name="automl-tab-training-v2") def pipeline(project: str = PROJECT_ID): dataset_create_op = gcc_aip.TabularDatasetCreateOp( project=project, display_name="housing", gcs_source=gcs_csv_path ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=project, display_name="train-housing-automl_1", optimization_prediction_type="regression", optimization_objective="minimize-rmse", column_transformations=[ {"numeric": {"column_name": "longitude"}}, {"numeric": {"column_name": "latitude"}}, {"numeric": {"column_name": "housing_median_age"}}, {"numeric": {"column_name": "total_rooms"}}, {"numeric": {"column_name": "total_bedrooms"}}, {"numeric": {"column_name": "population"}}, {"numeric": {"column_name": "households"}}, {"numeric": {"column_name": "median_income"}}, ], dataset=dataset_create_op.outputs["dataset"], target_column="longitude", ) deploy_op = gcc_aip.ModelDeployOp( # noqa: F841 model=training_op.outputs["model"], project=project, machine_type="n1-standard-4", ) ###Output _____no_output_____ ###Markdown Compile and run the pipelineNow, you're ready to compile the pipeline: ###Code from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tab_regression_pipeline.json" ) ###Output _____no_output_____ ###Markdown The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object: ###Code from kfp.v2.google.client import AIPlatformClient # noqa: F811 api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION) ###Output _____no_output_____ ###Markdown Then, you run the defined pipeline like this: ###Code response = api_client.create_run_from_job_spec( "tab_regression_pipeline.json", pipeline_root=PIPELINE_ROOT, parameter_values={"project": PROJECT_ID}, ) ###Output _____no_output_____ ###Markdown Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint. ###Code # Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path. # ! gsutil -m rm -r $PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Run in Colab View on GitHub Open in Google Cloud Notebooks Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components OverviewThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Tabular workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ObjectiveIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:- create a _Dataset_- train an AutoML Tabular model- deploy the trained model to an _endpoint_ for servingThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.htmlmodule-google_cloud_pipeline_components.aiplatform). Costs This tutorial uses billable components of Google Cloud:* Vertex AI Training and Serving* Cloud StorageLearn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages ###Code import sys if "google.colab" in sys.modules: USER_FLAG = "" else: USER_FLAG = "--user" !pip3 install {USER_FLAG} kfp google-cloud-pipeline-components --upgrade ###Output _____no_output_____ ###Markdown Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages. ###Code # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ###Output _____no_output_____ ###Markdown Check the versions of the packages you installed. The KFP SDK version should be >=1.6. ###Code !python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" ###Output _____no_output_____ ###Markdown Before you beginThis notebook does not require a GPU runtime. Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI, Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). 1. Follow the "**Configuring your project**" instructions from the Vertex Pipelines documentation.1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`. ###Code import os PROJECT_ID = "" # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) ###Output _____no_output_____ ###Markdown Otherwise, set your project ID here. ###Code if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"} ###Output _____no_output_____ ###Markdown TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. ###Code from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ###Output _____no_output_____ ###Markdown Authenticate your Google Cloud account**If you are using Google Cloud Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ###Code import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Google Cloud Notebooks, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ###Output _____no_output_____ ###Markdown Create a Cloud Storage bucket as necessaryYou will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with Vertex AI. ###Code BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} REGION = "us-central1" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ###Output _____no_output_____ ###Markdown **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ###Code ! gsutil mb -l $REGION $BUCKET_NAME ###Output _____no_output_____ ###Markdown Finally, validate access to your Cloud Storage bucket by examining its contents: ###Code ! gsutil ls -al $BUCKET_NAME ###Output _____no_output_____ ###Markdown Import libraries and define constants Define some constants. ###Code PATH=%env PATH %env PATH={PATH}:/home/jupyter/.local/bin USER = "your-user-name" # <---CHANGE THIS PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER) PIPELINE_ROOT ###Output _____no_output_____ ###Markdown Do some imports: ###Code import kfp from google_cloud_pipeline_components import aiplatform as gcc_aip from kfp.v2 import compiler from kfp.v2.google.client import AIPlatformClient ###Output _____no_output_____ ###Markdown Define an AutoML Tabular regression pipeline that uses components from `google_cloud_pipeline_components` Create a managed image dataset from a CSV file and train it using AutoML Tabular Training. ###Code TRAIN_FILE_NAME = "california_housing_train.csv" !gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/ gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}" ###Output _____no_output_____ ###Markdown Define the pipeline: ###Code @kfp.dsl.pipeline(name="automl-tab-training-v2") def pipeline(project: str = PROJECT_ID): dataset_create_op = gcc_aip.TabularDatasetCreateOp( project=project, display_name="housing", gcs_source=gcs_csv_path ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=project, display_name="train-housing-automl_1", optimization_prediction_type="regression", optimization_objective="minimize-rmse", column_transformations=[ {"numeric": {"column_name": "longitude"}}, {"numeric": {"column_name": "latitude"}}, {"numeric": {"column_name": "housing_median_age"}}, {"numeric": {"column_name": "total_rooms"}}, {"numeric": {"column_name": "total_bedrooms"}}, {"numeric": {"column_name": "population"}}, {"numeric": {"column_name": "households"}}, {"numeric": {"column_name": "median_income"}}, {"numeric": {"column_name": "median_house_value"}}, ], dataset=dataset_create_op.outputs["dataset"], target_column="median_house_value", ) deploy_op = gcc_aip.ModelDeployOp( # noqa: F841 model=training_op.outputs["model"], project=project, machine_type="n1-standard-4", ) ###Output _____no_output_____ ###Markdown Compile and run the pipelineNow, you're ready to compile the pipeline: ###Code from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tab_regression_pipeline.json" ) ###Output _____no_output_____ ###Markdown The pipeline compilation generates the `tab_regression_pipeline.json` job spec file.Next, instantiate an API client object: ###Code from kfp.v2.google.client import AIPlatformClient # noqa: F811 api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION) ###Output _____no_output_____ ###Markdown Then, you run the defined pipeline like this: ###Code response = api_client.create_run_from_job_spec( "tab_regression_pipeline.json", pipeline_root=PIPELINE_ROOT, parameter_values={"project": PROJECT_ID}, ) ###Output _____no_output_____ ###Markdown Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running: Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint. ###Code # Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path. # ! gsutil -m rm -r $PIPELINE_ROOT ###Output _____no_output_____
Homework 1/.ipynb_checkpoints/hw1-checkpoint.ipynb
###Markdown ENVECON 147*Assignment 1 : There are two parts to this assignment. The first asks you to work within this jupyter notebook. The second part can be submitted separately via bcourses*Credit to Eric Van Dusen for jupyter notebook support and Q1 ###Code from datascience import * import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pandas as pd plt.style.use('seaborn-muted') ###Output _____no_output_____ ###Markdown Question 1 `costs` is a table showing the Output, Average Fixed Cost and Total Cost. Use this information to calcuate the following and add them to the table `costs`.1. Total Fixed Cost *(Hint: check out [np.ones](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html))* 2. Total Variable Cost3. Marginal Cost4. Average Variable Cost5. Average Cost ###Code costs = Table.read_table('hw02.csv') costs #You can use as many cells as you want. Just add them below. #Do not change or delete this cell costs ###Output _____no_output_____
docs/samples/transformer/image_transformer/kfserving_sdk_transformer.ipynb
###Markdown Sample for using transformer with KFServing SDK The notebook shows how to use KFServing SDK to create InferenceService with transformer, predictor. ###Code from kubernetes import client from kfserving import KFServingClient from kfserving import constants from kfserving import V1alpha2EndpointSpec from kfserving import V1alpha2PredictorSpec from kfserving import V1alpha2TransformerSpec from kfserving import V1alpha2PyTorchSpec from kfserving import V1alpha2CustomSpec from kfserving import V1alpha2InferenceServiceSpec from kfserving import V1alpha2InferenceService from kubernetes.client import V1Container from kubernetes.client import V1ResourceRequirements import kubernetes.client import os import requests import json import numpy as np ###Output _____no_output_____ ###Markdown Define InferenceService with Transformer Add predictor and transformer on the endpoint spec ###Code api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION default_endpoint_spec = V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec( min_replicas=1, pytorch=V1alpha2PyTorchSpec( storage_uri='gs://kfserving-samples/models/pytorch/cifar10', model_class_name= "Net", resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'}))), transformer=V1alpha2TransformerSpec( min_replicas=1, custom=V1alpha2CustomSpec( container=V1Container( image='yuzisun/image-transformer:latest', name='user-container', resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'}))))) isvc = V1alpha2InferenceService(api_version=api_version, kind=constants.KFSERVING_KIND, metadata=client.V1ObjectMeta( name='cifar10', namespace='kubeflow'), spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec)) ###Output _____no_output_____ ###Markdown Create InferenceService with Transformer Call KFServingClient to create InferenceService. ###Code KFServing = KFServingClient() KFServing.create(isvc) ###Output _____no_output_____ ###Markdown Check the InferenceService ###Code KFServing.get('cifar10', namespace='kubeflow', watch=True, timeout_seconds=120) ###Output NAME READY DEFAULT_TRAFFIC CANARY_TRAFFIC URL cifar10 Unknown http://cifar10-predict.kubeflow.example.com cifar10 Unknown http://cifar10-predict.kubeflow.example.com cifar10 True http://cifar10-predict.kubeflow.example.com ###Markdown Predict the image ###Code api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) service = api_instance.read_namespaced_service("istio-ingressgateway", "istio-system", exact='true') cluster_ip = service.status.load_balancer.ingress[0].ip url = "http://" + cluster_ip + "/v1/models/cifar10:predict" headers = { 'Host': 'cifar10-predict.kubeflow.example.com' } with open('./input.json') as json_file: data = json.load(json_file) response = requests.post(url, json.dumps(data), headers=headers) probs = json.loads(response.content.decode('utf-8'))["predictions"] print(probs) print(np.argmax(probs)) ###Output [[-1.6099601984024048, -2.6461076736450195, 0.32844462990760803, 2.4825074672698975, 0.43524616956710815, 2.3108043670654297, 1.00056791305542, -0.4232763648033142, -0.5100948214530945, -1.7978394031524658]] 3 ###Markdown Delete the InferenceService ###Code KFServing.delete('cifar10', namespace='kubeflow') ###Output _____no_output_____ ###Markdown Sample for using transformer with KFServing SDK The notebook shows how to use KFServing SDK to create KFService with transformer, predictor. ###Code from kubernetes import client from kfserving import KFServingClient from kfserving import constants from kfserving import V1alpha2EndpointSpec from kfserving import V1alpha2PredictorSpec from kfserving import V1alpha2TransformerSpec from kfserving import V1alpha2PyTorchSpec from kfserving import V1alpha2CustomSpec from kfserving import V1alpha2KFServiceSpec from kfserving import V1alpha2KFService from kubernetes.client import V1Container from kubernetes.client import V1ResourceRequirements import kubernetes.client import os import requests import json import numpy as np ###Output _____no_output_____ ###Markdown Define KFService with Transformer Add predictor and transformer on the endpoint spec ###Code api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION default_endpoint_spec = V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec( min_replicas=1, pytorch=V1alpha2PyTorchSpec( storage_uri='gs://kfserving-samples/models/pytorch/cifar10', model_class_name= "Net", resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'}))), transformer=V1alpha2TransformerSpec( min_replicas=1, custom=V1alpha2CustomSpec( container=V1Container( image='yuzisun/image-transformer:latest', name='user-container', resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'}))))) kfsvc = V1alpha2KFService(api_version=api_version, kind=constants.KFSERVING_KIND, metadata=client.V1ObjectMeta( name='cifar10', namespace='kubeflow'), spec=V1alpha2KFServiceSpec(default=default_endpoint_spec)) ###Output _____no_output_____ ###Markdown Create KFService with Transformer Call KFServingClient to create KFService. ###Code KFServing = KFServingClient() KFServing.create(kfsvc) ###Output _____no_output_____ ###Markdown Check the KFService ###Code KFServing.get('cifar10', namespace='kubeflow', watch=True, timeout_seconds=120) ###Output NAME READY DEFAULT_TRAFFIC CANARY_TRAFFIC URL cifar10 Unknown http://cifar10-predict.kubeflow.example.com cifar10 Unknown http://cifar10-predict.kubeflow.example.com cifar10 True http://cifar10-predict.kubeflow.example.com ###Markdown Predict the image ###Code api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) service = api_instance.read_namespaced_service("istio-ingressgateway", "istio-system", exact='true') cluster_ip = service.status.load_balancer.ingress[0].ip url = "http://" + cluster_ip + "/v1/models/cifar10:predict" headers = { 'Host': 'cifar10-predict.kubeflow.example.com' } with open('./input.json') as json_file: data = json.load(json_file) response = requests.post(url, json.dumps(data), headers=headers) probs = json.loads(response.content.decode('utf-8'))["predictions"] print(probs) print(np.argmax(probs)) ###Output [[-1.6099601984024048, -2.6461076736450195, 0.32844462990760803, 2.4825074672698975, 0.43524616956710815, 2.3108043670654297, 1.00056791305542, -0.4232763648033142, -0.5100948214530945, -1.7978394031524658]] 3 ###Markdown Delete the KFService ###Code KFServing.delete('cifar10', namespace='kubeflow') ###Output _____no_output_____ ###Markdown Sample for using transformer with KFServing SDK The notebook shows how to use KFServing SDK to create InferenceService with transformer, predictor. ###Code from kubernetes import client from kfserving import KFServingClient from kfserving import constants from kfserving import V1alpha2EndpointSpec from kfserving import V1alpha2PredictorSpec from kfserving import V1alpha2TransformerSpec from kfserving import V1alpha2PyTorchSpec from kfserving import V1alpha2CustomSpec from kfserving import V1alpha2InferenceServiceSpec from kfserving import V1alpha2InferenceService from kubernetes.client import V1Container from kubernetes.client import V1ResourceRequirements import kubernetes.client import os import requests import json import numpy as np ###Output _____no_output_____ ###Markdown Define InferenceService with Transformer Add predictor and transformer on the endpoint spec ###Code api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION default_endpoint_spec = V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec( min_replicas=1, pytorch=V1alpha2PyTorchSpec( storage_uri='gs://kfserving-samples/models/pytorch/cifar10', model_class_name= "Net", resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'}))), transformer=V1alpha2TransformerSpec( min_replicas=1, custom=V1alpha2CustomSpec( container=V1Container( image='gcr.io/kubeflow-ci/kfserving/image-transformer:latest', name='user-container', resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'}))))) isvc = V1alpha2InferenceService(api_version=api_version, kind=constants.KFSERVING_KIND, metadata=client.V1ObjectMeta( name='cifar10', namespace='default'), spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec)) ###Output _____no_output_____ ###Markdown Create InferenceService with Transformer Call KFServingClient to create InferenceService. ###Code KFServing = KFServingClient() KFServing.create(isvc) ###Output _____no_output_____ ###Markdown Check the InferenceService ###Code KFServing.get('cifar10', namespace='default', watch=True, timeout_seconds=120) ###Output NAME READY DEFAULT_TRAFFIC CANARY_TRAFFIC URL cifar10 False cifar10 False cifar10 False cifar10 False cifar10 True 100 http://cifar10.default.example.com/v1/models/ci... ###Markdown Predict the image ###Code api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) service = api_instance.read_namespaced_service("istio-ingressgateway", "istio-system", exact='true') cluster_ip = service.status.load_balancer.ingress[0].ip url = "http://" + cluster_ip + "/v1/models/cifar10:predict" headers = { 'Host': 'cifar10.default.example.com' } with open('./input.json') as json_file: data = json.load(json_file) print(url, headers) response = requests.post(url, json.dumps(data), headers=headers) probs = json.loads(response.content.decode('utf-8'))["predictions"] print(probs) print(np.argmax(probs)) ###Output http://9.21.53.162/v1/models/cifar10:predict {'Host': 'cifar10.default.example.com'} [[-1.6099603176116943, -2.6461076736450195, 0.3284446597099304, 2.4825077056884766, 0.43524616956710815, 2.3108043670654297, 1.00056791305542, -0.4232763946056366, -0.5100947022438049, -1.797839641571045]] 3 ###Markdown Delete the InferenceService ###Code KFServing.delete('cifar10', namespace='default') ###Output _____no_output_____
sagemaker-lineage/sagemaker-lineage-multihop-queries.ipynb
###Markdown Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli ###Code # Fallback in case wheels are unavailable ! pip install sagemaker botocore boto3 awscli --upgrade import subprocess def execute_cmd(cmd): print(cmd) output = subprocess.getstatusoutput(cmd) return output def _download_from_s3(_file_path): _path = f"s3://reinvent21-sm-rc-wheels/{_file_path}" print(f"Path is {_path}") ls_cmd = f"aws s3 ls {_path}" print(execute_cmd(ls_cmd)) cmd = f"aws s3 cp {_path} /tmp/" print("Downloading: ", cmd) return execute_cmd(cmd) def _install_wheel(wheel_name): cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall" ret = execute_cmd(cmd) _name = wheel_name.split(".")[0] _, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'") for package in ["botocore", "sagemaker", "boto3", "awscli"]: print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'")) print(f"Installed {_name}:{_version}") return ret def install_sm_py_sdk(): pySDK_name = "sagemaker.tar.gz" exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz") if not exit_code: _install_wheel(pySDK_name) else: print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...") execute_cmd("pip install sagemaker") def install_boto_wheels(): WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"] for wheel_name in WHEELS: _path = f"boto3/{wheel_name}" exit_code, _ = _download_from_s3(_path) if not exit_code: _install_wheel(wheel_name) else: print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...") install_boto_wheels() install_sm_py_sdk() !pip install sagemaker-experiments pyvis ###Output _____no_output_____ ###Markdown Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint. ###Code import os import boto3 import sagemaker import pprint from botocore.config import Config boto_session = boto3.Session() config = Config(retries={"max_attempts": 50, "mode": "adaptive"}) sm_client = boto3.client("sagemaker", config=config) region = boto_session.region_name sagemaker_session = sagemaker.Session(sagemaker_client=sm_client, boto_session=boto_session) default_bucket = sagemaker_session.default_bucket() role = sagemaker.get_execution_role() # Helper function to print query outputs pp = pprint.PrettyPrinter() from datetime import datetime training_instance_type = "ml.m5.xlarge" inference_instance_type = "ml.m5.xlarge" s3_prefix = "multihop-example" unique_id = str(datetime.now().timestamp()).split(".")[0] ###Output _____no_output_____ ###Markdown Create an Experiment and Trial for a training job ###Code from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent experiment_name = f"MultihopQueryExperiment-{unique_id}" exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client) trial = Trial.create( experiment_name=exp.experiment_name, trial_name=f"MultihopQueryTrial-{unique_id}", sagemaker_boto_client=sm_client, ) print(exp.experiment_name) print(trial.trial_name) ###Output _____no_output_____ ###Markdown Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job. ###Code if not os.path.exists("./data/"): os.makedirs("./data/") print("Directory Created ") else: print("Directory already exists") # Download the processed abalone dataset files !curl https://sagemaker-sample-files.s3.amazonaws.com/datasets/tabular/uci_abalone/preprocessed/test.csv > ./data/test.csv !curl https://sagemaker-sample-files.s3.amazonaws.com/datasets/tabular/uci_abalone/preprocessed/train.csv > ./data/train.csv !curl https://sagemaker-sample-files.s3.amazonaws.com/datasets/tabular/uci_abalone/preprocessed/validation.csv > ./data/validation.csv # Upload the datasets to the SageMaker session default bucket !aws s3 cp data/train.csv s3://{default_bucket}/experiments-demo/train.csv !aws s3 cp data/validation.csv s3://{default_bucket}/experiments-demo/validation.csv training_data = f"s3://{default_bucket}/experiments-demo/train.csv" validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv" ###Output _____no_output_____ ###Markdown Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above. ###Code from sagemaker.estimator import Estimator model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model" training_instance_type = "ml.m5.large" image_uri = sagemaker.image_uris.retrieve( framework="xgboost", region=region, version="1.0-1", py_version="py3", instance_type=training_instance_type, ) xgb_train = Estimator( image_uri=image_uri, instance_type=training_instance_type, instance_count=1, output_path=model_path, sagemaker_session=sagemaker_session, role=role, ) xgb_train.set_hyperparameters( objective="reg:linear", num_round=50, max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.7, silent=0, ) from sagemaker.inputs import TrainingInput xgb_train.fit( inputs={ "train": TrainingInput( s3_data=training_data, content_type="text/csv", ), "validation": TrainingInput( s3_data=validation_data, content_type="text/csv", ), }, experiment_config={ "ExperimentName": experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "MultiHopQueryTrialComponent", }, ) ###Output _____no_output_____ ###Markdown Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model ###Code model_package_group_name = "lineage-test-" + unique_id mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name) mpg_arn = mpg["ModelPackageGroupArn"] ###Output _____no_output_____ ###Markdown Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API. ###Code inference_instance_type = "ml.m5.xlarge" model_package = xgb_train.register( model_package_group_name=mpg_arn, inference_instances=[inference_instance_type], transform_instances=[inference_instance_type], content_types=["text/csv"], response_types=["text/csv"], approval_status="Approved", ) model_package_arn = model_package.model_package_arn print("Model Package ARN : ", model_package_arn) ###Output _____no_output_____ ###Markdown Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements. ###Code endpoint_name = "lineage-test-endpoint-" + unique_id model_package.deploy( endpoint_name=endpoint_name, initial_instance_count=1, instance_type=inference_instance_type, ) # Get the endpoint ARN endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"] print(endpoint_arn) ###Output _____no_output_____ ###Markdown SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job. ###Code from sagemaker.lineage.context import Context, EndpointContext from sagemaker.lineage.action import Action from sagemaker.lineage.association import Association from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact from sagemaker.lineage.query import ( LineageQuery, LineageFilter, LineageSourceEnum, LineageEntityEnum, LineageQueryDirectionEnum, ) ###Output _____no_output_____ ###Markdown Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker. ###Code # Find the endpoint context and model artifact that should be used for the lineage queries. contexts = Context.list(source_uri=endpoint_arn) context_name = list(contexts)[0].context_name endpoint_context = EndpointContext.load(context_name=context_name) ###Output _____no_output_____ ###Markdown Find all datasets associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the datasets dataset_artifacts = [] for vertex in query_result.vertices: dataset_artifacts.append(vertex.to_lineage_object().source.source_uri) pp.pprint(dataset_artifacts) ###Output _____no_output_____ ###Markdown Find the models associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the model model_artifacts = [] for vertex in query_result.vertices: model_artifacts.append(vertex.to_lineage_object().source.source_uri) # The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with # the S3 URI to the model.tar.gz file associated with the model pp.pprint(model_artifacts) ###Output _____no_output_____ ###Markdown Find the trial components associated with the endpoint ###Code # Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`. query_filter = LineageFilter( entities=[LineageEntityEnum.TRIAL_COMPONENT], sources=[LineageSourceEnum.TRAINING_JOB], ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the ARNs of the training jobs associated with this Endpoint trial_components = [] for vertex in query_result.vertices: trial_components.append(vertex.arn) pp.pprint(trial_components) ###Output _____no_output_____ ###Markdown Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. RuntimeThis notebook takes approximately 15 minutes to run. Contents1. [Key Concepts](Key-Concepts)1. [Prerequisites](Prerequisites)1. [Notebook Overview](Notebook-Overview)1. [Create an Experiment and Trial for a training job](Create-an-Experiment-and-Trial-for-a-training-job)1. [Training Data](Training-Data)1. [Create a training job](Create-a-training-job)1. [Create a Model Package Group for the trained model to be registered](Create-a-Model-Package-Group-for-the-trained-model-to-be-registered)1. [Register the model in the Model Registry](Register-the-model-in-the-Model-Registry)1. [Deploy the model to a SageMaker Endpoint](Deploy-the-model-to-a-SageMaker-Endpoint)1. [SageMaker Lineage Queries](SageMaker-Lineage-Queries) 1. [Using the LineageQuery API to find entity associations](Using-the-LineageQuery-API-to-find-entity-associations) 1. [Find all datasets associated with an Endpoint](Find-all-datasets-associated-with-an-Endpoint) 1. [Find the models associated with an Endpoint](Find-the-models-associated-with-an-Endpoint) 1. [Find the trial components associated with an Endpoint](Find-the-trial-components-associated-with-an-Endpoint) 1. [Change the focal point of lineage](Change-the-focal-point-of-lineage) 1. [Use LineageQueryDirectionEnum.BOTH](Use-LineageQueryDirectionEnum.BOTH) 1. [Directions in LineageQuery: Ascendants vs. Descendants](Directions-in-LineageQuery:-Ascendants-vs.-Descendants) 1. [SDK helper functions](SDK-helper-functions) 1. [Lineage Graph Visualization](Lineage-Graph-Visualization)1. [Conclusion](Conclusion)1. [Cleanup](Cleanup) Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prerequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision two instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing the Python SDK, boto and AWS CLI. ###Code !pip install sagemaker botocore boto3 awscli --upgrade !pip install sagemaker-experiments pyvis ###Output _____no_output_____ ###Markdown Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint. ###Code import os import boto3 import sagemaker import pprint from botocore.config import Config config = Config(retries={"max_attempts": 50, "mode": "adaptive"}) sagemaker_session = sagemaker.Session() sm_client = sagemaker_session.sagemaker_client region = sagemaker_session.boto_region_name default_bucket = sagemaker_session.default_bucket() role = sagemaker.get_execution_role() # Helper function to print query outputs pp = pprint.PrettyPrinter() from datetime import datetime training_instance_type = "ml.m5.xlarge" inference_instance_type = "ml.m5.xlarge" s3_prefix = "multihop-example" unique_id = str(datetime.now().timestamp()).split(".")[0] ###Output _____no_output_____ ###Markdown Create an Experiment and Trial for a training job ###Code from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent experiment_name = f"MultihopQueryExperiment-{unique_id}" exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client) trial = Trial.create( experiment_name=exp.experiment_name, trial_name=f"MultihopQueryTrial-{unique_id}", sagemaker_boto_client=sm_client, ) print(exp.experiment_name) print(trial.trial_name) ###Output _____no_output_____ ###Markdown Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in the notebook [Orchestrating Jobs with Amazon SageMaker Model Building Pipelines](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job. ###Code default_bucket if not os.path.exists("./data/"): os.makedirs("./data/") print("Directory Created ") else: print("Directory already exists") # Download the processed abalone dataset files s3 = boto3.client("s3") s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/test.csv", "./data/test.csv", ) s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/train.csv", "./data/train.csv", ) s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/validation.csv", "./data/validation.csv", ) # Upload the datasets to the SageMaker session default bucket boto3.Session().resource("s3").Bucket(default_bucket).Object( "experiments-demo/train.csv" ).upload_file("data/train.csv") boto3.Session().resource("s3").Bucket(default_bucket).Object( "experiments-demo/validation.csv" ).upload_file("data/validation.csv") training_data = f"s3://{default_bucket}/experiments-demo/train.csv" validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv" ###Output _____no_output_____ ###Markdown Create a training jobWe train a simple XGBoost model on the Abalone dataset. `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above. ###Code from sagemaker.estimator import Estimator model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model" training_instance_type = "ml.m5.large" image_uri = sagemaker.image_uris.retrieve( framework="xgboost", region=region, version="1.0-1", py_version="py3", instance_type=training_instance_type, ) xgb_train = Estimator( image_uri=image_uri, instance_type=training_instance_type, instance_count=1, output_path=model_path, sagemaker_session=sagemaker_session, role=role, ) xgb_train.set_hyperparameters( objective="reg:linear", num_round=50, max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.7, silent=0, ) from sagemaker.inputs import TrainingInput xgb_train.fit( inputs={ "train": TrainingInput( s3_data=training_data, content_type="text/csv", ), "validation": TrainingInput( s3_data=validation_data, content_type="text/csv", ), }, experiment_config={ "ExperimentName": experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "MultiHopQueryTrialComponent", }, ) ###Output _____no_output_____ ###Markdown Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model. ###Code model_package_group_name = "lineage-test-" + unique_id mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name) mpg_arn = mpg["ModelPackageGroupArn"] ###Output _____no_output_____ ###Markdown Register the model in the Model RegistryOnce the model is registered, it appears in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API. ###Code inference_instance_type = "ml.m5.xlarge" model_package = xgb_train.register( model_package_group_name=mpg_arn, inference_instances=[inference_instance_type], transform_instances=[inference_instance_type], content_types=["text/csv"], response_types=["text/csv"], approval_status="Approved", ) model_package_arn = model_package.model_package_arn print("Model Package ARN : ", model_package_arn) ###Output _____no_output_____ ###Markdown Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements. ###Code endpoint_name = "lineage-test-endpoint-" + unique_id model_package.deploy( endpoint_name=endpoint_name, initial_instance_count=1, instance_type=inference_instance_type, ) # Get the endpoint ARN endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"] print(endpoint_arn) ###Output _____no_output_____ ###Markdown SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job. ###Code from sagemaker.lineage.context import Context, EndpointContext from sagemaker.lineage.action import Action from sagemaker.lineage.association import Association from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact from sagemaker.lineage.query import ( LineageQuery, LineageFilter, LineageSourceEnum, LineageEntityEnum, LineageQueryDirectionEnum, ) ###Output _____no_output_____ ###Markdown Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that is used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker. ###Code # Find the endpoint context and model artifact that should be used for the lineage queries. contexts = Context.list(source_uri=endpoint_arn) context_name = list(contexts)[0].context_name endpoint_context = EndpointContext.load(context_name=context_name) ###Output _____no_output_____ ###Markdown Find all datasets associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET] ) # Providing this `LineageFilter` to the `LineageQuery` constructs a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the datasets dataset_artifacts = [] for vertex in query_result.vertices: dataset_artifacts.append(vertex.to_lineage_object().source.source_uri) pp.pprint(dataset_artifacts) ###Output _____no_output_____ ###Markdown Find the models associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL] ) # Providing this `LineageFilter` to the `LineageQuery` constructs a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the model model_artifacts = [] for vertex in query_result.vertices: model_artifacts.append(vertex.to_lineage_object().source.source_uri) # The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with # the S3 URI to the model.tar.gz file associated with the model pp.pprint(model_artifacts) ###Output _____no_output_____ ###Markdown Find the trial components associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`. query_filter = LineageFilter( entities=[LineageEntityEnum.TRIAL_COMPONENT], sources=[LineageSourceEnum.TRAINING_JOB], ) # Providing this `LineageFilter` to the `LineageQuery` constructs a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the ARNs of the training jobs associated with this Endpoint trial_components = [] for vertex in query_result.vertices: trial_components.append(vertex.arn) pp.pprint(trial_components) ###Output _____no_output_____ ###Markdown Change the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which changes the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.** ###Code # Get the ModelArtifact model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0] model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn) query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that descend from the model, i.e. the endpoint direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that ascend from the model, i.e. the datasets direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Use LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal takes place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` shows both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it appears in the query result. ###Code query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # This specifies that the query should look for associations both ascending and descending for the start direction=LineageQueryDirectionEnum.BOTH, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Directions in LineageQuery: Ascendants vs. DescendantsTo understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query returns the endpoint. If the direction is `ASCENDANTS`, the query returns the dataset." ###Code # In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[ LineageSourceEnum.ENDPOINT, LineageSourceEnum.MODEL, LineageSourceEnum.DATASET, LineageSourceEnum.TRAINING_JOB, ], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) ascendant_artifacts = [] # The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a # lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN. for vertex in query_result.vertices: try: ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: ascendant_artifacts.append(vertex.arn) print("Ascendant artifacts:") pp.pprint(ascendant_artifacts) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) descendant_artifacts = [] for vertex in query_result.vertices: try: descendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: # Handling TrialComponents. descendant_artifacts.append(vertex.arn) print("Descendant artifacts:") pp.pprint(descendant_artifacts) ###Output _____no_output_____ ###Markdown SDK helper functionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage. ###Code # Find all the datasets associated with the endpoint datasets = [] dataset_artifacts = endpoint_context.dataset_artifacts() for dataset in dataset_artifacts: datasets.append(dataset.source.source_uri) print("Datasets : ", datasets) # Find the training jobs associated with the endpoint training_job_artifacts = endpoint_context.training_job_arns() training_jobs = [] for training_job in training_job_artifacts: training_jobs.append(training_job) print("Training Jobs : ", training_jobs) # Get the ARN for the pipeline execution associated with this endpoint (if any) pipeline_executions = endpoint_context.pipeline_execution_arn() if pipeline_executions: for pipeline in pipelines_executions: print(pipeline) # Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model dataset_artifacts = model_artifact.dataset_artifacts() endpoint_contexts = model_artifact.endpoint_contexts() datasets = [dataset.source.source_uri for dataset in dataset_artifacts] endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Datasets associated with this model : ") pp.pprint(datasets) print("Endpoints associated with this model : ") pp.pprint(endpoints) # Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset # Find the artifact associated with the dataset dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn) # Find the endpoints that used this training dataset endpoint_contexts = dataset_artifact.endpoint_contexts() endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Endpoints associated with the training dataset {}".format(training_data)) pp.pprint(endpoints) ###Output _____no_output_____ ###Markdown Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` is displayed. From the `StartArns` the visualization shows the relationships with the other lineage entities returned in the `query_lineage` API call. ###Code # Graph APIs # Here we use the boto3 `query_lineage` API to generate the query response to plot. from visualizer import Visualizer query_response = sm_client.query_lineage( StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True ) viz = Visualizer() viz.render(query_response, "Endpoint") query_response = sm_client.query_lineage( StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True ) viz.render(query_response, "Model") ###Output _____no_output_____ ###Markdown ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we clean up the resources created in this notebook. ###Code # Delete endpoint sm_client.delete_endpoint(EndpointName=endpoint_name) # # Delete the model package sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn) # Delete the model package group sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name) # Delete the experiment and trial within it import time def delete_experiment(experiment): for trial_summary in experiment.list_trials(): trial = Trial.load(trial_name=trial_summary.trial_name) for trial_component_summary in trial.list_trial_components(): tc = TrialComponent.load( trial_component_name=trial_component_summary.trial_component_name ) trial.remove_trial_component(tc) try: # comment out to keep trial components tc.delete() except: # tc is associated with another trial continue # to prevent throttling time.sleep(0.5) trial.delete() experiment_name = experiment.experiment_name experiment.delete() print(f"\nExperiment {experiment_name} deleted") # Delete the Experiment and Trials within it experiment = Experiment.load(experiment_name=exp.experiment_name) delete_experiment(experiment) ###Output _____no_output_____ ###Markdown Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli ###Code # Fallback in case wheels are unavailable ! pip install sagemaker botocore boto3 awscli --upgrade import subprocess def execute_cmd(cmd): print(cmd) output = subprocess.getstatusoutput(cmd) return output def _download_from_s3(_file_path): _path = f"s3://reinvent21-sm-rc-wheels/{_file_path}" print(f"Path is {_path}") ls_cmd = f"aws s3 ls {_path}" print(execute_cmd(ls_cmd)) cmd = f"aws s3 cp {_path} /tmp/" print("Downloading: ", cmd) return execute_cmd(cmd) def _install_wheel(wheel_name): cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall" ret = execute_cmd(cmd) _name = wheel_name.split(".")[0] _, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'") for package in ["botocore", "sagemaker", "boto3", "awscli"]: print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'")) print(f"Installed {_name}:{_version}") return ret def install_sm_py_sdk(): pySDK_name = "sagemaker.tar.gz" exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz") if not exit_code: _install_wheel(pySDK_name) else: print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...") execute_cmd("pip install sagemaker") def install_boto_wheels(): WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"] for wheel_name in WHEELS: _path = f"boto3/{wheel_name}" exit_code, _ = _download_from_s3(_path) if not exit_code: _install_wheel(wheel_name) else: print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...") install_boto_wheels() install_sm_py_sdk() !pip install sagemaker-experiments pyvis ###Output _____no_output_____ ###Markdown Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint. ###Code import os import boto3 import sagemaker import pprint from botocore.config import Config config = Config(retries={"max_attempts": 50, "mode": "adaptive"}) sagemaker_session = sagemaker.Session() sm_client = sagemaker_session.sagemaker_client region = sagemaker_session.boto_region_name default_bucket = sagemaker_session.default_bucket() role = sagemaker.get_execution_role() # Helper function to print query outputs pp = pprint.PrettyPrinter() from datetime import datetime training_instance_type = "ml.m5.xlarge" inference_instance_type = "ml.m5.xlarge" s3_prefix = "multihop-example" unique_id = str(datetime.now().timestamp()).split(".")[0] ###Output _____no_output_____ ###Markdown Create an Experiment and Trial for a training job ###Code from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent experiment_name = f"MultihopQueryExperiment-{unique_id}" exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client) trial = Trial.create( experiment_name=exp.experiment_name, trial_name=f"MultihopQueryTrial-{unique_id}", sagemaker_boto_client=sm_client, ) print(exp.experiment_name) print(trial.trial_name) ###Output _____no_output_____ ###Markdown Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job. ###Code default_bucket if not os.path.exists("./data/"): os.makedirs("./data/") print("Directory Created ") else: print("Directory already exists") # Download the processed abalone dataset files s3 = boto3.client("s3") s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/test.csv", "./data/test.csv", ) s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/train.csv", "./data/train.csv", ) s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/validation.csv", "./data/validation.csv", ) # Upload the datasets to the SageMaker session default bucket boto3.Session().resource("s3").Bucket(default_bucket).Object( "experiments-demo/train.csv" ).upload_file("data/train.csv") boto3.Session().resource("s3").Bucket(default_bucket).Object( "experiments-demo/validation.csv" ).upload_file("data/validation.csv") training_data = f"s3://{default_bucket}/experiments-demo/train.csv" validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv" ###Output _____no_output_____ ###Markdown Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above. ###Code from sagemaker.estimator import Estimator model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model" training_instance_type = "ml.m5.large" image_uri = sagemaker.image_uris.retrieve( framework="xgboost", region=region, version="1.0-1", py_version="py3", instance_type=training_instance_type, ) xgb_train = Estimator( image_uri=image_uri, instance_type=training_instance_type, instance_count=1, output_path=model_path, sagemaker_session=sagemaker_session, role=role, ) xgb_train.set_hyperparameters( objective="reg:linear", num_round=50, max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.7, silent=0, ) from sagemaker.inputs import TrainingInput xgb_train.fit( inputs={ "train": TrainingInput( s3_data=training_data, content_type="text/csv", ), "validation": TrainingInput( s3_data=validation_data, content_type="text/csv", ), }, experiment_config={ "ExperimentName": experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "MultiHopQueryTrialComponent", }, ) ###Output _____no_output_____ ###Markdown Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model ###Code model_package_group_name = "lineage-test-" + unique_id mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name) mpg_arn = mpg["ModelPackageGroupArn"] ###Output _____no_output_____ ###Markdown Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API. ###Code inference_instance_type = "ml.m5.xlarge" model_package = xgb_train.register( model_package_group_name=mpg_arn, inference_instances=[inference_instance_type], transform_instances=[inference_instance_type], content_types=["text/csv"], response_types=["text/csv"], approval_status="Approved", ) model_package_arn = model_package.model_package_arn print("Model Package ARN : ", model_package_arn) ###Output _____no_output_____ ###Markdown Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements. ###Code endpoint_name = "lineage-test-endpoint-" + unique_id model_package.deploy( endpoint_name=endpoint_name, initial_instance_count=1, instance_type=inference_instance_type, ) # Get the endpoint ARN endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"] print(endpoint_arn) ###Output _____no_output_____ ###Markdown SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job. ###Code from sagemaker.lineage.context import Context, EndpointContext from sagemaker.lineage.action import Action from sagemaker.lineage.association import Association from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact from sagemaker.lineage.query import ( LineageQuery, LineageFilter, LineageSourceEnum, LineageEntityEnum, LineageQueryDirectionEnum, ) ###Output _____no_output_____ ###Markdown Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker. ###Code # Find the endpoint context and model artifact that should be used for the lineage queries. contexts = Context.list(source_uri=endpoint_arn) context_name = list(contexts)[0].context_name endpoint_context = EndpointContext.load(context_name=context_name) ###Output _____no_output_____ ###Markdown Find all datasets associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the datasets dataset_artifacts = [] for vertex in query_result.vertices: dataset_artifacts.append(vertex.to_lineage_object().source.source_uri) pp.pprint(dataset_artifacts) ###Output _____no_output_____ ###Markdown Find the models associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the model model_artifacts = [] for vertex in query_result.vertices: model_artifacts.append(vertex.to_lineage_object().source.source_uri) # The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with # the S3 URI to the model.tar.gz file associated with the model pp.pprint(model_artifacts) ###Output _____no_output_____ ###Markdown Find the trial components associated with the endpoint ###Code # Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`. query_filter = LineageFilter( entities=[LineageEntityEnum.TRIAL_COMPONENT], sources=[LineageSourceEnum.TRAINING_JOB], ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the ARNs of the training jobs associated with this Endpoint trial_components = [] for vertex in query_result.vertices: trial_components.append(vertex.arn) pp.pprint(trial_components) ###Output _____no_output_____ ###Markdown Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.** ###Code # Get the ModelArtifact model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0] model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn) query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that descend from the model, i.e. the endpoint direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that ascend from the model, i.e. the datasets direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result. ###Code query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # This specifies that the query should look for associations both ascending and descending for the start direction=LineageQueryDirectionEnum.BOTH, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset." ###Code # In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[ LineageSourceEnum.ENDPOINT, LineageSourceEnum.MODEL, LineageSourceEnum.DATASET, LineageSourceEnum.TRAINING_JOB, ], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) ascendant_artifacts = [] # The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a # lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN. for vertex in query_result.vertices: try: ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: ascendant_artifacts.append(vertex.arn) print("Ascendant artifacts : ") pp.pprint(ascendant_artifacts) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) descendant_artifacts = [] for vertex in query_result.vertices: try: descendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: # Handling TrialComponents. descendant_artifacts.append(vertex.arn) print("Descendant artifacts : ") pp.pprint(descendant_artifacts) ###Output _____no_output_____ ###Markdown SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage. ###Code # Find all the datasets associated with this endpoint datasets = [] dataset_artifacts = endpoint_context.dataset_artifacts() for dataset in dataset_artifacts: datasets.append(dataset.source.source_uri) print("Datasets : ", datasets) # Find the training jobs associated with the endpoint training_job_artifacts = endpoint_context.training_job_arns() training_jobs = [] for training_job in training_job_artifacts: training_jobs.append(training_job) print("Training Jobs : ", training_jobs) # Get the ARN for the pipeline execution associated with this endpoint (if any) pipeline_executions = endpoint_context.pipeline_execution_arn() if pipeline_executions: for pipeline in pipelines_executions: print(pipeline) # Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model dataset_artifacts = model_artifact.dataset_artifacts() endpoint_contexts = model_artifact.endpoint_contexts() datasets = [dataset.source.source_uri for dataset in dataset_artifacts] endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Datasets associated with this model : ") pp.pprint(datasets) print("Endpoints associated with this model : ") pp.pprint(endpoints) # Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset # Find the artifact associated with the dataset dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn) # Find the endpoints that used this training dataset endpoint_contexts = dataset_artifact.endpoint_contexts() endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Endpoints associated with the training dataset {}".format(training_data)) pp.pprint(endpoints) ###Output _____no_output_____ ###Markdown Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call. ###Code # Graph APIs # Here we use the boto3 `query_lineage` API to generate the query response to plot. from visualizer import Visualizer query_response = sm_client.query_lineage( StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True ) viz = Visualizer() viz.render(query_response, "Endpoint") query_response = sm_client.query_lineage( StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True ) viz.render(query_response, "Model") ###Output _____no_output_____ ###Markdown ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook. ###Code # Delete endpoint sm_client.delete_endpoint(EndpointName=endpoint_name) # # Delete the model package sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn) # Delete the model package group sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name) # Delete the experiment and trial within it import time def delete_experiment(experiment): for trial_summary in experiment.list_trials(): trial = Trial.load(trial_name=trial_summary.trial_name) for trial_component_summary in trial.list_trial_components(): tc = TrialComponent.load( trial_component_name=trial_component_summary.trial_component_name ) trial.remove_trial_component(tc) try: # comment out to keep trial components tc.delete() except: # tc is associated with another trial continue # to prevent throttling time.sleep(0.5) trial.delete() experiment_name = experiment.experiment_name experiment.delete() print(f"\nExperiment {experiment_name} deleted") # Delete the Experiment and Trials within it experiment = Experiment.load(experiment_name=exp.experiment_name) delete_experiment(experiment) ###Output _____no_output_____ ###Markdown Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli ###Code # Fallback in case wheels are unavailable ! pip install sagemaker botocore boto3 awscli --upgrade import subprocess def execute_cmd(cmd): print(cmd) output = subprocess.getstatusoutput(cmd) return output def _download_from_s3(_file_path): _path = f"s3://reinvent21-sm-rc-wheels/{_file_path}" print(f"Path is {_path}") ls_cmd = f"aws s3 ls {_path}" print(execute_cmd(ls_cmd)) cmd = f"aws s3 cp {_path} /tmp/" print("Downloading: ", cmd) return execute_cmd(cmd) def _install_wheel(wheel_name): cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall" ret = execute_cmd(cmd) _name = wheel_name.split(".")[0] _, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'") for package in ["botocore", "sagemaker", "boto3", "awscli"]: print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'")) print(f"Installed {_name}:{_version}") return ret def install_sm_py_sdk(): pySDK_name = "sagemaker.tar.gz" exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz") if not exit_code: _install_wheel(pySDK_name) else: print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...") execute_cmd("pip install sagemaker") def install_boto_wheels(): WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"] for wheel_name in WHEELS: _path = f"boto3/{wheel_name}" exit_code, _ = _download_from_s3(_path) if not exit_code: _install_wheel(wheel_name) else: print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...") install_boto_wheels() install_sm_py_sdk() !pip install sagemaker-experiments pyvis ###Output _____no_output_____ ###Markdown Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint. ###Code import os import boto3 import sagemaker import pprint from botocore.config import Config boto_session = boto3.Session() config = Config(retries={"max_attempts": 50, "mode": "adaptive"}) sm_client = boto3.client("sagemaker", config=config) region = boto_session.region_name sagemaker_session = sagemaker.Session(sagemaker_client=sm_client, boto_session=boto_session) default_bucket = sagemaker_session.default_bucket() role = sagemaker.get_execution_role() # Helper function to print query outputs pp = pprint.PrettyPrinter() from datetime import datetime training_instance_type = "ml.m5.xlarge" inference_instance_type = "ml.m5.xlarge" s3_prefix = "multihop-example" unique_id = str(datetime.now().timestamp()).split(".")[0] ###Output _____no_output_____ ###Markdown Create an Experiment and Trial for a training job ###Code from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent experiment_name = f"MultihopQueryExperiment-{unique_id}" exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client) trial = Trial.create( experiment_name=exp.experiment_name, trial_name=f"MultihopQueryTrial-{unique_id}", sagemaker_boto_client=sm_client, ) print(exp.experiment_name) print(trial.trial_name) ###Output _____no_output_____ ###Markdown Training DataUpload the training data provided in `data/` to S3 so that it can be used in the training job. The data in the folder has been created by preprocessing the [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. ###Code # Download the processed abalone dataset files !aws s3 cp s3://sagemaker-sample-files/datasets/tabular/uci_abalone/preprocessed/train.csv data/train.csv !aws s3 cp s3://sagemaker-sample-files/datasets/tabular/uci_abalone/preprocessed/test.csv data/test.csv !aws s3 cp s3://sagemaker-sample-files/datasets/tabular/uci_abalone/preprocessed/validation.csv data/validation.csv # Upload the datasets to the SageMaker session default bucket !aws s3 cp data/train.csv s3://{default_bucket}/experiments-demo/train.csv !aws s3 cp data/validation.csv s3://{default_bucket}/experiments-demo/validation.csv training_data = f"s3://{default_bucket}/experiments-demo/train.csv" validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv" ###Output _____no_output_____ ###Markdown Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above. ###Code from sagemaker.estimator import Estimator model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model" training_instance_type = "ml.m5.large" image_uri = sagemaker.image_uris.retrieve( framework="xgboost", region=region, version="1.0-1", py_version="py3", instance_type=training_instance_type, ) xgb_train = Estimator( image_uri=image_uri, instance_type=training_instance_type, instance_count=1, output_path=model_path, sagemaker_session=sagemaker_session, role=role, ) xgb_train.set_hyperparameters( objective="reg:linear", num_round=50, max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.7, silent=0, ) from sagemaker.inputs import TrainingInput xgb_train.fit( inputs={ "train": TrainingInput( s3_data=training_data, content_type="text/csv", ), "validation": TrainingInput( s3_data=validation_data, content_type="text/csv", ), }, experiment_config={ "ExperimentName": experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "MultiHopQueryTrialComponent", }, ) ###Output _____no_output_____ ###Markdown Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model ###Code model_package_group_name = "lineage-test-" + unique_id mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name) mpg_arn = mpg["ModelPackageGroupArn"] ###Output _____no_output_____ ###Markdown Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API. ###Code inference_instance_type = "ml.m5.xlarge" model_package = xgb_train.register( model_package_group_name=mpg_arn, inference_instances=[inference_instance_type], transform_instances=[inference_instance_type], content_types=["text/csv"], response_types=["text/csv"], approval_status="Approved", ) model_package_arn = model_package.model_package_arn print("Model Package ARN : ", model_package_arn) ###Output _____no_output_____ ###Markdown Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements. ###Code endpoint_name = "lineage-test-endpoint-" + unique_id model_package.deploy( endpoint_name=endpoint_name, initial_instance_count=1, instance_type=inference_instance_type, ) # Get the endpoint ARN endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"] print(endpoint_arn) ###Output _____no_output_____ ###Markdown SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job. ###Code from sagemaker.lineage.context import Context, EndpointContext from sagemaker.lineage.action import Action from sagemaker.lineage.association import Association from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact from sagemaker.lineage.query import ( LineageQuery, LineageFilter, LineageSourceEnum, LineageEntityEnum, LineageQueryDirectionEnum, ) ###Output _____no_output_____ ###Markdown Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker. ###Code # Find the endpoint context and model artifact that should be used for the lineage queries. contexts = Context.list(source_uri=endpoint_arn) context_name = list(contexts)[0].context_name endpoint_context = EndpointContext.load(context_name=context_name) ###Output _____no_output_____ ###Markdown Find all datasets associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the datasets dataset_artifacts = [] for vertex in query_result.vertices: dataset_artifacts.append(vertex.to_lineage_object().source.source_uri) pp.pprint(dataset_artifacts) ###Output _____no_output_____ ###Markdown Find the models associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the model model_artifacts = [] for vertex in query_result.vertices: model_artifacts.append(vertex.to_lineage_object().source.source_uri) # The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with # the S3 URI to the model.tar.gz file associated with the model pp.pprint(model_artifacts) ###Output _____no_output_____ ###Markdown Find the trial components associated with the endpoint ###Code # Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`. query_filter = LineageFilter( entities=[LineageEntityEnum.TRIAL_COMPONENT], sources=[LineageSourceEnum.TRAINING_JOB], ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the ARNs of the training jobs associated with this Endpoint trial_components = [] for vertex in query_result.vertices: trial_components.append(vertex.arn) pp.pprint(trial_components) ###Output _____no_output_____ ###Markdown Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.** ###Code # Get the ModelArtifact model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0] model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn) query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that descend from the model, i.e. the endpoint direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that ascend from the model, i.e. the datasets direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result. ###Code query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # This specifies that the query should look for associations both ascending and descending for the start direction=LineageQueryDirectionEnum.BOTH, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset." ###Code # In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[ LineageSourceEnum.ENDPOINT, LineageSourceEnum.MODEL, LineageSourceEnum.DATASET, LineageSourceEnum.TRAINING_JOB, ], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) ascendant_artifacts = [] # The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a # lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN. for vertex in query_result.vertices: try: ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: ascendant_artifacts.append(vertex.arn) print("Ascendant artifacts : ") pp.pprint(ascendant_artifacts) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) descendant_artifacts = [] for vertex in query_result.vertices: try: descendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: # Handling TrialComponents. descendant_artifacts.append(vertex.arn) print("Descendant artifacts : ") pp.pprint(descendant_artifacts) ###Output _____no_output_____ ###Markdown SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage. ###Code # Find all the datasets associated with this endpoint datasets = [] dataset_artifacts = endpoint_context.dataset_artifacts() for dataset in dataset_artifacts: datasets.append(dataset.source.source_uri) print("Datasets : ", datasets) # Find the training jobs associated with the endpoint training_job_artifacts = endpoint_context.training_job_arns() training_jobs = [] for training_job in training_job_artifacts: training_jobs.append(training_job) print("Training Jobs : ", training_jobs) # Get the ARN for the pipeline execution associated with this endpoint (if any) pipeline_executions = endpoint_context.pipeline_execution_arn() if pipeline_executions: for pipeline in pipelines_executions: print(pipeline) # Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model dataset_artifacts = model_artifact.dataset_artifacts() endpoint_contexts = model_artifact.endpoint_contexts() datasets = [dataset.source.source_uri for dataset in dataset_artifacts] endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Datasets associated with this model : ") pp.pprint(datasets) print("Endpoints associated with this model : ") pp.pprint(endpoints) # Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset # Find the artifact associated with the dataset dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn) # Find the endpoints that used this training dataset endpoint_contexts = dataset_artifact.endpoint_contexts() endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Endpoints associated with the training dataset {}".format(training_data)) pp.pprint(endpoints) ###Output _____no_output_____ ###Markdown Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call. ###Code # Graph APIs # Here we use the boto3 `query_lineage` API to generate the query response to plot. from visualizer import Visualizer query_response = sm_client.query_lineage( StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True ) viz = Visualizer() viz.render(query_response, "Endpoint") query_response = sm_client.query_lineage( StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True ) viz.render(query_response, "Model") ###Output _____no_output_____ ###Markdown ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook. ###Code # Delete endpoint sm_client.delete_endpoint(EndpointName=endpoint_name) # # Delete the model package sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn) # Delete the model package group sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name) # Delete the experiment and trial within it import time def delete_experiment(experiment): for trial_summary in experiment.list_trials(): trial = Trial.load(trial_name=trial_summary.trial_name) for trial_component_summary in trial.list_trial_components(): tc = TrialComponent.load( trial_component_name=trial_component_summary.trial_component_name ) trial.remove_trial_component(tc) try: # comment out to keep trial components tc.delete() except: # tc is associated with another trial continue # to prevent throttling time.sleep(0.5) trial.delete() experiment_name = experiment.experiment_name experiment.delete() print(f"\nExperiment {experiment_name} deleted") # Delete the Experiment and Trials within it experiment = Experiment.load(experiment_name=exp.experiment_name) delete_experiment(experiment) ###Output _____no_output_____ ###Markdown Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.** ###Code # Get the ModelArtifact model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0] model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn) query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that descend from the model, i.e. the endpoint direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that ascend from the model, i.e. the datasets direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result. ###Code query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # This specifies that the query should look for associations both ascending and descending for the start direction=LineageQueryDirectionEnum.BOTH, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset." ###Code # In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[ LineageSourceEnum.ENDPOINT, LineageSourceEnum.MODEL, LineageSourceEnum.DATASET, LineageSourceEnum.TRAINING_JOB, ], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) ascendant_artifacts = [] # The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a # lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN. for vertex in query_result.vertices: try: ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: ascendant_artifacts.append(vertex.arn) print("Ascendant artifacts : ") pp.pprint(ascendant_artifacts) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) descendant_artifacts = [] for vertex in query_result.vertices: try: descendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: # Handling TrialComponents. descendant_artifacts.append(vertex.arn) print("Descendant artifacts : ") pp.pprint(descendant_artifacts) ###Output _____no_output_____ ###Markdown SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage. ###Code # Find all the datasets associated with this endpoint datasets = [] dataset_artifacts = endpoint_context.dataset_artifacts() for dataset in dataset_artifacts: datasets.append(dataset.source.source_uri) print("Datasets : ", datasets) # Find the training jobs associated with the endpoint training_job_artifacts = endpoint_context.training_job_arns() training_jobs = [] for training_job in training_job_artifacts: training_jobs.append(training_job) print("Training Jobs : ", training_jobs) # Get the ARN for the pipeline execution associated with this endpoint (if any) pipeline_executions = endpoint_context.pipeline_execution_arn() if pipeline_executions: for pipeline in pipelines_executions: print(pipeline) # Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model dataset_artifacts = model_artifact.dataset_artifacts() endpoint_contexts = model_artifact.endpoint_contexts() datasets = [dataset.source.source_uri for dataset in dataset_artifacts] endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Datasets associated with this model : ") pp.pprint(datasets) print("Endpoints associated with this model : ") pp.pprint(endpoints) # Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset # Find the artifact associated with the dataset dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn) # Find the endpoints that used this training dataset endpoint_contexts = dataset_artifact.endpoint_contexts() endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Endpoints associated with the training dataset {}".format(training_data)) pp.pprint(endpoints) ###Output _____no_output_____ ###Markdown Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call. ###Code # Graph APIs # Here we use the boto3 `query_lineage` API to generate the query response to plot. from visualizer import Visualizer query_response = sm_client.query_lineage( StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True ) viz = Visualizer() viz.render(query_response, "Endpoint") query_response = sm_client.query_lineage( StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True ) viz.render(query_response, "Model") ###Output _____no_output_____ ###Markdown ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook. ###Code # Delete endpoint sm_client.delete_endpoint(EndpointName=endpoint_name) # # Delete the model package sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn) # Delete the model package group sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name) # Delete the experiment and trial within it import time def delete_experiment(experiment): for trial_summary in experiment.list_trials(): trial = Trial.load(trial_name=trial_summary.trial_name) for trial_component_summary in trial.list_trial_components(): tc = TrialComponent.load( trial_component_name=trial_component_summary.trial_component_name ) trial.remove_trial_component(tc) try: # comment out to keep trial components tc.delete() except: # tc is associated with another trial continue # to prevent throttling time.sleep(0.5) trial.delete() experiment_name = experiment.experiment_name experiment.delete() print(f"\nExperiment {experiment_name} deleted") # Delete the Experiment and Trials within it experiment = Experiment.load(experiment_name=exp.experiment_name) delete_experiment(experiment) ###Output _____no_output_____ ###Markdown Amazon SageMaker Multi-hop Lineage QueriesAmazon SageMaker Lineage tracks events that happen within SageMaker allowing the relationships between them to be traced via a graph structure. SageMaker Lineage introduces a new API called `LineageQuery` that allows customers to query the lineage graph structure to discover relationship across their Machine Learning entities. Your machine learning workflows can generate deeply nested relationships, the lineage APIs allow you to answer questions about these relationships. For example find all Data Sets that trained the model deployed to a given Endpoint or find all Models trained by a Data Set.The lineage graph is created automatically by SageMaker and you can directly create or modify your own lineage.In addition to the `LineageQuery` API, the SageMaker SDK provides wrapper functions that make it easy to run queries that span across multiple hops of the entity relationship graph. These APIs and helper functions are described in this notebook. Key Concepts* **Lineage Graph** - A connected graph tracing your machine learning workflow end to end. * **Artifacts** - Represents a URI addressable object or data. Artifacts are typically inputs or outputs to Actions. * **Actions** - Represents an action taken such as a computation, transformation, or job. * **Contexts** - Provides a method to logically group other entities.* **Associations** - A directed edge in the lineage graph that links two entities.* **Lineage Traversal** - Starting from an arbitrary point trace the lineage graph to discover and analyze relationships between steps in your workflow.* **Experiments** - Experiment entites (Experiments, Trials, and Trial Components) are also part of the lineage graph and can be associated wtih Artifacts, Actions, or Contexts. Prequisites[`sagemaker-experiments`](https://github.com/aws/sagemaker-experiments) and [`pyvis`]((https://pyvis.readthedocs.io/en/latest/)) are two Python libraries that need to be installed as part of this notebook execution. `pyvis` is a library designed for interactive network visualization and `sagemaker-experiments` gives users the ability to use SageMaker's Experiment Tracking capabilities. This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.If running in SageMaker Classic Notebooks, use the `conda_python3` kernel. The AWS account running this notebook should have access to provision 2 instances of type `ml.m5.xlarge`. These instances are used for training and deploying a model. Let's start by installing preview wheels of the Python SDK, boto and aws cli ###Code # Fallback in case wheels are unavailable ! pip install sagemaker botocore boto3 awscli --upgrade import subprocess def execute_cmd(cmd): print(cmd) output = subprocess.getstatusoutput(cmd) return output def _download_from_s3(_file_path): _path = f"s3://reinvent21-sm-rc-wheels/{_file_path}" print(f"Path is {_path}") ls_cmd = f"aws s3 ls {_path}" print(execute_cmd(ls_cmd)) cmd = f"aws s3 cp {_path} /tmp/" print("Downloading: ", cmd) return execute_cmd(cmd) def _install_wheel(wheel_name): cmd = f"pip install --no-deps --log /tmp/output3.log /tmp/{wheel_name} --force-reinstall" ret = execute_cmd(cmd) _name = wheel_name.split(".")[0] _, _version = execute_cmd(f"python -c 'import {_name}; print({_name}.__version__)'") for package in ["botocore", "sagemaker", "boto3", "awscli"]: print(execute_cmd(f"python -c 'import {package}; print({package}.__version__)'")) print(f"Installed {_name}:{_version}") return ret def install_sm_py_sdk(): pySDK_name = "sagemaker.tar.gz" exit_code, _ = _download_from_s3("dist/sagemaker.tar.gz") if not exit_code: _install_wheel(pySDK_name) else: print(f"'{pySDK_name}' is not present in S3 Bucket. Installing from public PyPi...") execute_cmd("pip install sagemaker") def install_boto_wheels(): WHEELS = ["botocore.tar.gz", "boto3.tar.gz", "awscli.tar.gz"] for wheel_name in WHEELS: _path = f"boto3/{wheel_name}" exit_code, _ = _download_from_s3(_path) if not exit_code: _install_wheel(wheel_name) else: print(f"'{wheel_name}' is not present in S3 Bucket. Ignoring...") install_boto_wheels() install_sm_py_sdk() !pip install sagemaker-experiments pyvis ###Output _____no_output_____ ###Markdown Notebook OverviewThis notebook demonstrates how to use SageMaker Lineage APIs to query multi-hop relationships across the lineage graph. Multi-hop relationships are those that span beyond single entity relationships, e.g. Model -> Endpoint, Training Job -> Model. Multi-hop queries allow users to search for distant relationships across the Lineage Graph such as Endpoint -> Data Set.To demonstrate these capabilities, in this notebook we create a training job, register a model to the Model Registry, and deploy the model to an Endpoint. ###Code import os import boto3 import sagemaker import pprint from botocore.config import Config boto_session = boto3.Session() config = Config(retries={"max_attempts": 50, "mode": "adaptive"}) sm_client = boto3.client("sagemaker", config=config) region = boto_session.region_name sagemaker_session = sagemaker.Session(sagemaker_client=sm_client, boto_session=boto_session) default_bucket = sagemaker_session.default_bucket() role = sagemaker.get_execution_role() # Helper function to print query outputs pp = pprint.PrettyPrinter() from datetime import datetime training_instance_type = "ml.m5.xlarge" inference_instance_type = "ml.m5.xlarge" s3_prefix = "multihop-example" unique_id = str(datetime.now().timestamp()).split(".")[0] ###Output _____no_output_____ ###Markdown Create an Experiment and Trial for a training job ###Code from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent experiment_name = f"MultihopQueryExperiment-{unique_id}" exp = Experiment.create(experiment_name=experiment_name, sagemaker_boto_client=sm_client) trial = Trial.create( experiment_name=exp.experiment_name, trial_name=f"MultihopQueryTrial-{unique_id}", sagemaker_boto_client=sm_client, ) print(exp.experiment_name) print(trial.trial_name) ###Output _____no_output_____ ###Markdown Training DataCreating a `data/` directory to store the preprocessed [UCI Abalone](https://archive.ics.uci.edu/ml/datasets/abalone) dataset. The preprocessing is done using the preprocessing script defined in [this](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-pipelines/tabular/abalone_build_train_deploy/sagemaker-pipelines-preprocess-train-evaluate-batch-transform.ipynb) notebook. Then training and validation data is uploaded to S3 so that it can be used in the training and inference job. ###Code default_bucket if not os.path.exists("./data/"): os.makedirs("./data/") print("Directory Created ") else: print("Directory already exists") # Download the processed abalone dataset files s3 = boto3.client("s3") s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/test.csv", "./data/test.csv", ) s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/train.csv", "./data/train.csv", ) s3.download_file( f"sagemaker-sample-files", "datasets/tabular/uci_abalone/preprocessed/validation.csv", "./data/validation.csv", ) # Upload the datasets to the SageMaker session default bucket boto3.Session().resource("s3").Bucket(default_bucket).Object( "experiments-demo/train.csv" ).upload_file("data/train.csv") boto3.Session().resource("s3").Bucket(default_bucket).Object( "experiments-demo/validation.csv" ).upload_file("data/validation.csv") training_data = f"s3://{default_bucket}/experiments-demo/train.csv" validation_data = f"s3://{default_bucket}/experiments-demo/validation.csv" ###Output _____no_output_____ ###Markdown Create a training jobWe train a simple XGBoost model on the [Abalone dataset](https://www.google.com/search?client=firefox-b-1-d&q=abalone+dataset). `sagemaker.image_uris.retrieve()` is used to get the sagemaker container for XGBoost so that it can be used in the Estimator. In the `.fit()` function, we pass in a training and validation dataset along with an `experiment_config`. The `experiment_config` ensures that the metrics, parameters, and artifats associated with this training job are logged to the experiment and trial created above. ###Code from sagemaker.estimator import Estimator model_path = f"s3://{default_bucket}/{s3_prefix}/xgb_model" training_instance_type = "ml.m5.large" image_uri = sagemaker.image_uris.retrieve( framework="xgboost", region=region, version="1.0-1", py_version="py3", instance_type=training_instance_type, ) xgb_train = Estimator( image_uri=image_uri, instance_type=training_instance_type, instance_count=1, output_path=model_path, sagemaker_session=sagemaker_session, role=role, ) xgb_train.set_hyperparameters( objective="reg:linear", num_round=50, max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.7, silent=0, ) from sagemaker.inputs import TrainingInput xgb_train.fit( inputs={ "train": TrainingInput( s3_data=training_data, content_type="text/csv", ), "validation": TrainingInput( s3_data=validation_data, content_type="text/csv", ), }, experiment_config={ "ExperimentName": experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "MultiHopQueryTrialComponent", }, ) ###Output _____no_output_____ ###Markdown Create a Model Package Group for the trained model to be registeredCreate a new Model Package Group or use an existing one to register the model ###Code model_package_group_name = "lineage-test-" + unique_id mpg = sm_client.create_model_package_group(ModelPackageGroupName=model_package_group_name) mpg_arn = mpg["ModelPackageGroupArn"] ###Output _____no_output_____ ###Markdown Register the model in the Model RegistryOnce the model is registered, you will see it in the Model Registry tab of the SageMaker Studio UI. The model is registered with the `approval_status` set to "Approved". By default, the model is registered with the `approval_status` set to "PendingManualApproval". Users can then navigate to the Model Registry to manually approve the model based on any criteria set for model evaluation or this can be done via API. ###Code inference_instance_type = "ml.m5.xlarge" model_package = xgb_train.register( model_package_group_name=mpg_arn, inference_instances=[inference_instance_type], transform_instances=[inference_instance_type], content_types=["text/csv"], response_types=["text/csv"], approval_status="Approved", ) model_package_arn = model_package.model_package_arn print("Model Package ARN : ", model_package_arn) ###Output _____no_output_____ ###Markdown Deploy the model to a SageMaker EndpointA SageMaker Endpoint is used to host a model that can be used for inference. The type of endpoint deployed in this notebook is a real time inference endpoint. This is ideal for inference workloads where you have real-time, interactive, low latency requirements. ###Code endpoint_name = "lineage-test-endpoint-" + unique_id model_package.deploy( endpoint_name=endpoint_name, initial_instance_count=1, instance_type=inference_instance_type, ) # Get the endpoint ARN endpoint_arn = sm_client.describe_endpoint(EndpointName=endpoint_name)["EndpointArn"] print(endpoint_arn) ###Output _____no_output_____ ###Markdown SageMaker Lineage QueriesWe explore SageMaker's lineage capabilities to traverse the relationships between the entities created in this notebook - datasets, model, endpoint, and training job. ###Code from sagemaker.lineage.context import Context, EndpointContext from sagemaker.lineage.action import Action from sagemaker.lineage.association import Association from sagemaker.lineage.artifact import Artifact, ModelArtifact, DatasetArtifact from sagemaker.lineage.query import ( LineageQuery, LineageFilter, LineageSourceEnum, LineageEntityEnum, LineageQueryDirectionEnum, ) ###Output _____no_output_____ ###Markdown Using the LineageQuery API to find entity associationsIn this section we use two APIs, `LineageQuery` and `LineageFilter` to construct queries to answer questions about the Lineage Graph and extract entity relationships. LineageQuery parameters:* `start_arns`: A list of ARNs that will be used as the starting point for the query.* `direction`: The direction of the query.* `include_edges`: If true, return edges in addition to vertices.* `query_filter`: The query filter.LineageFilter paramters:* `entities`: A list of entity types (Artifact, Association, Action) to filter for when returning the results on LineageQuery* `sources`: A list of source types (Endpoint, Model, Dataset) to filter for when returning the results of LineageQueryA `Context` is automatically created when a SageMaker Endpoint is created, an `Artifact` is automatically created when a Model is created in SageMaker. ###Code # Find the endpoint context and model artifact that should be used for the lineage queries. contexts = Context.list(source_uri=endpoint_arn) context_name = list(contexts)[0].context_name endpoint_context = EndpointContext.load(context_name=context_name) ###Output _____no_output_____ ###Markdown Find all datasets associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `DATASET`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.DATASET] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the datasets dataset_artifacts = [] for vertex in query_result.vertices: dataset_artifacts.append(vertex.to_lineage_object().source.source_uri) pp.pprint(dataset_artifacts) ###Output _____no_output_____ ###Markdown Find the models associated with an Endpoint ###Code # Define the LineageFilter to look for entities of type `ARTIFACT` and the source of type `MODEL`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.MODEL] ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the lineage objects corresponding to the model model_artifacts = [] for vertex in query_result.vertices: model_artifacts.append(vertex.to_lineage_object().source.source_uri) # The results of the `LineageQuery` API call return the ARN of the model deployed to the endpoint along with # the S3 URI to the model.tar.gz file associated with the model pp.pprint(model_artifacts) ###Output _____no_output_____ ###Markdown Find the trial components associated with the endpoint ###Code # Define the LineageFilter to look for entities of type `TRIAL_COMPONENT` and the source of type `TRAINING_JOB`. query_filter = LineageFilter( entities=[LineageEntityEnum.TRIAL_COMPONENT], sources=[LineageSourceEnum.TRAINING_JOB], ) # Providing this `LineageFilter` to the `LineageQuery` will construct a query that traverses through the given context `endpoint_context` # and find all datasets. query_result = LineageQuery(sagemaker_session).query( start_arns=[endpoint_context.context_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) # Parse through the query results to get the ARNs of the training jobs associated with this Endpoint trial_components = [] for vertex in query_result.vertices: trial_components.append(vertex.arn) pp.pprint(trial_components) ###Output _____no_output_____ ###Markdown Changing the focal point of lineageThe `LineageQuery` can be modified to have different `start_arns` which will change the focal point of lineage. In addition, the `LineageFilter` can take multiple sources and entities to expand the scope of the query. **Here we use the model as the lineage focal point and find the Endpoints and Datasets associated with it.** ###Code # Get the ModelArtifact model_artifact_summary = list(Artifact.list(source_uri=model_package_arn))[0] model_artifact = ModelArtifact.load(artifact_arn=model_artifact_summary.artifact_arn) query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that descend from the model, i.e. the endpoint direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # Find all the entities that ascend from the model, i.e. the datasets direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Using LineageQueryDirectionEnum.BOTHWhen the direction is set to `BOTH`, when the query traverses the graph to find ascendant and descendant relationships, the traversal will take place not only from the starting node, but from each node that is visited. e.g. If the training job is run twice and both models generated by the training job are deployed to endpoints, this result of the query with direction set to `BOTH` will show both endpoints. This is because the same image is used for training and deploying the model. Since the image is common to the model (`start_arn`) and both the endpoints, it will appear in the query result. ###Code query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[LineageSourceEnum.ENDPOINT, LineageSourceEnum.DATASET], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], # Model is the starting artifact query_filter=query_filter, # This specifies that the query should look for associations both ascending and descending for the start direction=LineageQueryDirectionEnum.BOTH, include_edges=False, ) associations = [] for vertex in query_result.vertices: associations.append(vertex.to_lineage_object().source.source_uri) pp.pprint(associations) ###Output _____no_output_____ ###Markdown Directions in `LineageQuery` - `ASCENDANTS` vs. `DESCENDANTS`To understand the direction in the Lineage Graph, take the following entity relationship graph - Dataset -> Training Job -> Model -> EndpointThe endpoint is a **descendant** of the model, and the model is a **descendant** of the dataset. Similarly, the model is an **ascendant** of the endpoint The `direction` parameter can be used to specify whether the query should return entities that are descendants or ascendants of the entity in start_arns. If `start_arns` contains a model and the direction is `DESCENDANTS`, the query will return the endpoint. If the direction is `ASCENDANTS`, the query will return the dataset." ###Code # In this example, we'll look at the impact of specifying the direction as ASCENDANT or DESCENDANT in a `LineageQuery`. query_filter = LineageFilter( entities=[LineageEntityEnum.ARTIFACT], sources=[ LineageSourceEnum.ENDPOINT, LineageSourceEnum.MODEL, LineageSourceEnum.DATASET, LineageSourceEnum.TRAINING_JOB, ], ) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.ASCENDANTS, include_edges=False, ) ascendant_artifacts = [] # The lineage entity returned for the Training Job is a TrialComponent which can't be converted to a # lineage object using the method `to_lineage_object()` so we extract the TrialComponent ARN. for vertex in query_result.vertices: try: ascendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: ascendant_artifacts.append(vertex.arn) print("Ascendant artifacts : ") pp.pprint(ascendant_artifacts) query_result = LineageQuery(sagemaker_session).query( start_arns=[model_artifact.artifact_arn], query_filter=query_filter, direction=LineageQueryDirectionEnum.DESCENDANTS, include_edges=False, ) descendant_artifacts = [] for vertex in query_result.vertices: try: descendant_artifacts.append(vertex.to_lineage_object().source.source_uri) except: # Handling TrialComponents. descendant_artifacts.append(vertex.arn) print("Descendant artifacts : ") pp.pprint(descendant_artifacts) ###Output _____no_output_____ ###Markdown SDK helper FunctionsThe classes `EndpointContext`, `ModelArtifact`, and `DatasetArtifact`have helper functions that are wrappers over the `LineageQuery` API to make certain lineage queries easier to leverage. ###Code # Find all the datasets associated with this endpoint datasets = [] dataset_artifacts = endpoint_context.dataset_artifacts() for dataset in dataset_artifacts: datasets.append(dataset.source.source_uri) print("Datasets : ", datasets) # Find the training jobs associated with the endpoint training_job_artifacts = endpoint_context.training_job_arns() training_jobs = [] for training_job in training_job_artifacts: training_jobs.append(training_job) print("Training Jobs : ", training_jobs) # Get the ARN for the pipeline execution associated with this endpoint (if any) pipeline_executions = endpoint_context.pipeline_execution_arn() if pipeline_executions: for pipeline in pipelines_executions: print(pipeline) # Here we use the `ModelArtifact` class to find all the datasets and endpoints associated with the model dataset_artifacts = model_artifact.dataset_artifacts() endpoint_contexts = model_artifact.endpoint_contexts() datasets = [dataset.source.source_uri for dataset in dataset_artifacts] endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Datasets associated with this model : ") pp.pprint(datasets) print("Endpoints associated with this model : ") pp.pprint(endpoints) # Here we use the `DatasetArtifact` class to find all the endpoints hosting models that were trained with a particular dataset # Find the artifact associated with the dataset dataset_artifact_arn = list(Artifact.list(source_uri=training_data))[0].artifact_arn dataset_artifact = DatasetArtifact.load(artifact_arn=dataset_artifact_arn) # Find the endpoints that used this training dataset endpoint_contexts = dataset_artifact.endpoint_contexts() endpoints = [endpoint.source.source_uri for endpoint in endpoint_contexts] print("Endpoints associated with the training dataset {}".format(training_data)) pp.pprint(endpoints) ###Output _____no_output_____ ###Markdown Lineage Graph VisualizationA helper class `Visualizer()` is provided in `visualizer.py` to help plot the lineage graph. When the query response is rendered, a graph with the lineage relationships from the `StartArns` will be displayed. From the `StartArns` the visualization will show the relationships with the other lineage entities returned in the `query_lineage` API call. ###Code # Graph APIs # Here we use the boto3 `query_lineage` API to generate the query response to plot. from visualizer import Visualizer query_response = sm_client.query_lineage( StartArns=[endpoint_context.context_arn], Direction="Ascendants", IncludeEdges=True ) viz = Visualizer() viz.render(query_response, "Endpoint") query_response = sm_client.query_lineage( StartArns=[model_artifact.artifact_arn], Direction="Ascendants", IncludeEdges=True ) viz.render(query_response, "Model") ###Output _____no_output_____ ###Markdown ConclusionThis notebook demostrated the capabilities of SageMaker Lineage that make it easy for users to keep track of their complex ML workflows. Users can construct their own lineage queries using the `LineageQuery` API and `LineageFilter` or they can use the functions provided on the `EndpointContext`, `ModelArtifact`, and `DatasetArtifact` classes. In addition, the responses from lineage queries can be plotting using the helper class `Visualizer()` to better understand the relationship between the lineage entities. When using SageMaker Pipelines as part of their ML workflows, users can find Pipeline execution ARNs using the lineage APIs described in this notebook. CleanupIn this section we will cleanup the resources created in this notebook. ###Code # Delete endpoint sm_client.delete_endpoint(EndpointName=endpoint_name) # # Delete the model package sm_client.delete_model_package(ModelPackageName=model_package.model_package_arn) # Delete the model package group sm_client.delete_model_package_group(ModelPackageGroupName=model_package_group_name) # Delete the experiment and trial within it import time def delete_experiment(experiment): for trial_summary in experiment.list_trials(): trial = Trial.load(trial_name=trial_summary.trial_name) for trial_component_summary in trial.list_trial_components(): tc = TrialComponent.load( trial_component_name=trial_component_summary.trial_component_name ) trial.remove_trial_component(tc) try: # comment out to keep trial components tc.delete() except: # tc is associated with another trial continue # to prevent throttling time.sleep(0.5) trial.delete() experiment_name = experiment.experiment_name experiment.delete() print(f"\nExperiment {experiment_name} deleted") # Delete the Experiment and Trials within it experiment = Experiment.load(experiment_name=exp.experiment_name) delete_experiment(experiment) ###Output _____no_output_____
NeolithicMath.ipynb
###Markdown [Digital Mathematics Curriculum](http://wikieducator.org/Digital_Math) Neolithic MathNeolithic Math provides a way to get back to basics, but without sacrificing the heuristics now believed to have guided many an ancient culture. Even though the tools were relatively primitive, we have ample evidence that geographic and astronomic information was embedded in these arts and crafts. We look at Stonehenge and further back to early hominids to establish our direction in time (towards the past), however our historical approach is welcome to bring the storylines right up to the present. Then we switch to science fiction mode to speculate about the future, in Martian Math.In each age along the timeline, civilization takes risks ([Casino Math](CasinoMath.ipynb)) and expresses some economy or logistics capability ([Supermarket Math](SuperMarketMath.ipynb)).Given the historical approach, expect to focus on the evolution of mathematical concepts and notations, with attention to how it gets passed on through various institutions (not only schools). Time & SpaceOur work here involves keeping track of time using calendars, and relating these calendars to astronomical cycles and relationships. With astronomical considerations comes geography and geodessy, and the history of map making. What is a map? What is a calendar?We will study the Gregorian Calendar, but also several others.We will not neglect to share multiple cosomologies right up to those of the present day. ###Code import datetime jan1_0001 = datetime.datetime(1,1,1) jan1_0001.toordinal() # uncomment the line below to see source code ?? jan1_0001 ###Output _____no_output_____
notebooks_to_get_image_data_for_cnns/get_images_for_both.ipynb
###Markdown Create new directory to store images, store image file names as indices to correspond to dataframe entries ###Code # import urllib.request # first_reset = False # anger_count = 0 # non_anger_count = 0 # for i, row in df_anger.iterrows(): # print('image # : ' + str(i) + ' ' + row['image']) # print(row['label numerical']) # # train # if i < 2410: # if row['label numerical']: # urllib.request.urlretrieve(row['image'], 'train/anger/anger_{}.jpg'.format(str(anger_count))) # anger_count += 1 # else: # urllib.request.urlretrieve(row['image'], 'train/non_anger/non_anger_{}.jpg'.format(str(non_anger_count))) # non_anger_count += 1 # # test # else: # if first_reset == False: # first_reset = True # anger_count = 0 # non_anger_count = 0 # if row ['label numerical']: # urllib.request.urlretrieve(row['image'], 'validation/anger/anger_{}.jpg'.format(str(anger_count))) # anger_count += 1 # else: # urllib.request.urlretrieve(row['image'], 'validation/non_anger/non_anger_{}.jpg'.format(str(non_anger_count))) # non_anger_count += 1 # # urllib.request.urlretrieve(row['image'], 'images_from_dataset/{}.jpg'.format(str(i))) import urllib.request first_reset = False both_count = 0 non_both_count = 0 for i, row in df_both.iterrows(): print('image # : ' + str(i) + ' ' + row['image']) print(row['label numerical']) if row['label numerical']: urllib.request.urlretrieve(row['image'], 'both_classification/both/both_{}.jpg'.format(str(both_count))) both_count += 1 else: urllib.request.urlretrieve(row['image'], 'both_classification/non_both/non_both_{}.jpg'.format(str(non_both_count))) non_both_count += 1 # from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img # train_datagen = ImageDataGenerator( # rotation_range = 40, # width_shift_range = 0.2, # height_shift_range = 0.2, # rescale = 1./255, # shear_range = 0.2, # zoom_range = 0.2, # horizontal_flip = True) # validation_datagen = ImageDataGenerator(rescale=1./255) # train_generator = train_datagen.flow_from_directory( # 'train', # batch_size=32, # class_mode='binary') # validation_generator = validation_datagen.flow_from_directory( # 'validation', # batch_size=32, # class_mode='binary') ###Output _____no_output_____
docs/r2/image_summaries.ipynb
###Markdown Copyright 2019 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Displaying image data in TensorBoard View on TensorFlow.org Run in Google Colab View source on GitHub OverviewUsing the **TensorFlow Image Summary API,** you can easily log tensors and arbitrary images and view them in TensorBoard. This can be extremely helpful to sample and examine your input data, or to [visualize layer weights](http://cs231n.github.io/understanding-cnn/) and [generated tensors](https://hub.packtpub.com/generative-adversarial-networks-using-keras/). You can also log diagnostic data as images that can be helpful in the course of your model development.In this tutorial, you will use learn how to use the Image Summary API to visualize tensors as images. You will also learn how to take an arbitrary image, convert it to a tensor, and visualize it in TensorBoard. You will work through a simple but real example that uses Image Summaries to help you understand how your model is performing. Setup ###Code # Ensure TensorFlow 2.0 is installed. !pip install -q tf-nightly-2.0-preview # Load the TensorBoard notebook extension. %load_ext tensorboard.notebook from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import io import itertools from packaging import version from six.moves import range import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import numpy as np import sklearn.metrics print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." ###Output TensorFlow version: 2.0.0-dev20190228 ###Markdown Download the Fashion-MNIST datasetYou're going to construct a simple neural network to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 28x28 grayscale images of fashion products from 10 categories, with 7,000 images per category.First, download the data: ###Code # Download the data. The data is already divided into train and test. # The labels are integers representing classes. fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = \ fashion_mnist.load_data() # Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc. class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ###Output Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 8192/5148 [===============================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/step ###Markdown Visualizing a single imageTo understand how the Image Summary API works, you're now going to simply log the first training image in your training set in TensorBoard.Before you do that, examine the shape of your training data: ###Code print("Shape: ", train_images[0].shape) print("Label: ", train_labels[0], "->", class_names[train_labels[0]]) ###Output Shape: (28, 28) Label: 9 -> Ankle boot ###Markdown Notice that the shape of each image in the data set is a rank-2 tensor of shape (28, 28), representing the height and the width.However, ```tf.summary.image()``` expects a rank-4 tensor containing ```(batch_size, height, width, channels)```. Therefore, the tensors need to be reshaped. You're logging only one image, so ```batch_size``` is 1. The images are grayscale, so set ```channels``` to 1. ###Code # Reshape the image for the Summary API. img = np.reshape(train_images[0], (-1, 28, 28, 1)) ###Output _____no_output_____ ###Markdown You're now ready to log this image and view it in TensorBoard. ###Code # Clear out any prior log data. !rm -rf logs # Sets up a timestamped log directory. logdir = "logs/train_data/" + datetime.now().strftime("%Y%m%d-%H%M%S") # Creates a file writer for the log directory. file_writer = tf.summary.create_file_writer(logdir) # Using the file writer, log the reshaped image. with file_writer.as_default(): tf.summary.image("Training data", img, step=0) ###Output _____no_output_____ ###Markdown Now, use TensorBoard to examine the image. Wait a few seconds for the UI to spin up. ###Code %tensorboard --logdir logs/train_data ###Output _____no_output_____ ###Markdown The "Images" tab displays the image you just logged. It's an "ankle boot". The image is scaled to a default size for easier viewing. If you want to view the unscaled original image, check "Show actual image size" at the upper left. Play with the brightness and contrast sliders to see how they affect the image pixels. Visualizing multiple imagesLogging one tensor is great, but what if you wanted to log multiple training examples?Simply specify the number of images you want to log when passing data to ```tf.summary.image()```. ###Code with file_writer.as_default(): # Don't forget to reshape. images = np.reshape(train_images[0:25], (-1, 28, 28, 1)) tf.summary.image("25 training data examples", images, max_outputs=25, step=0) %tensorboard --logdir logs/train_data ###Output _____no_output_____ ###Markdown Logging arbitrary image dataWhat if you want to visualize an image that's not a tensor, such as an image generated by [matplotlib](https://matplotlib.org/)?You need some boilerplate code to convert the plot to a tensor, but after that, you're good to go.In the code below, you'll log the first 25 images as a nice grid using matplotlib's ```subplot()``` function. You'll then view the grid in TensorBoard: ###Code # Clear out prior logging data. !rm -rf logs/plots logdir = "logs/plots/" + datetime.now().strftime("%Y%m%d-%H%M%S") file_writer = tf.summary.create_file_writer(logdir) def plot_to_image(figure): """Converts the matplotlib plot specified by 'figure' to a PNG image and returns it. The supplied figure is closed and inaccessible after this call.""" # Save the plot to a PNG in memory. buf = io.BytesIO() plt.savefig(buf, format='png') # Closing the figure prevents it from being displayed directly inside # the notebook. plt.close(figure) buf.seek(0) # Convert PNG buffer to TF image image = tf.image.decode_png(buf.getvalue(), channels=4) # Add the batch dimension image = tf.expand_dims(image, 0) return image def image_grid(): """Return a 5x5 grid of the MNIST images as a matplotlib figure.""" # Create a figure to contain the plot. figure = plt.figure(figsize=(10,10)) for i in range(25): # Start next subplot. plt.subplot(5, 5, i + 1, title=class_names[train_labels[i]]) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) return figure # Prepare the plot figure = image_grid() # Convert to image and log with file_writer.as_default(): tf.summary.image("Training data", plot_to_image(figure), step=0) %tensorboard --logdir logs/plots ###Output _____no_output_____ ###Markdown Building an image classifierNow put this all together with a real example. After all, you're here to do machine learning and not plot pretty pictures!You're going to use image summaries to understand how well your model is doing while training a simple classifier for the Fashion-MNIST dataset. First, create a very simple model and compile it, setting up the optimizer and loss function. The compile step also specifies that you want to log the accuracy of the classifier along the way. ###Code model = keras.models.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(10, activation='softmax') ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) ###Output _____no_output_____ ###Markdown When training a classifier, it's useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.Define a function that calculates the confusion matrix. You'll use a convenient [Scikit-learn](https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) function to do this, and then plot it using matplotlib. ###Code def plot_confusion_matrix(cm, class_names): """ Returns a matplotlib figure containing the plotted confusion matrix. Args: cm (array, shape = [n, n]): a confusion matrix of integer classes class_names (array, shape = [n]): String names of the integer classes """ figure = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title("Confusion matrix") plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) # Normalize the confusion matrix. cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2) # Use white text if squares are dark; otherwise black. threshold = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure ###Output _____no_output_____ ###Markdown You're now ready to train the classifier and regularly log the confusion matrix along the way.Here's what you'll do:1. Create the [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics2. Create a [Keras LambdaCallback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LambdaCallback) to log the confusion matrix at the end of every epoch3. Train the model using Model.fit(), making sure to pass both callbacksAs training progresses, scroll down to see TensorBoard start up. ###Code # Clear out prior logging data. !rm -rf logs/image logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S") # Define the basic TensorBoard callback. tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) file_writer_cm = tf.summary.create_file_writer(logdir + '/cm') def log_confusion_matrix(epoch, logs): # Use the model to predict the values from the validation dataset. test_pred_raw = model.predict(test_images) test_pred = np.argmax(test_pred_raw, axis=1) # Calculate the confusion matrix. cm = sklearn.metrics.confusion_matrix(test_labels, test_pred) # Log the confusion matrix as an image summary. figure = plot_confusion_matrix(cm, class_names=class_names) cm_image = plot_to_image(figure) # Log the confusion matrix as an image summary. with file_writer_cm.as_default(): tf.summary.image("Confusion Matrix", cm_image, step=epoch) # Define the per-epoch callback. cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix) # Start TensorBoard. %tensorboard --logdir logs/image # Train the classifier. model.fit( train_images, train_labels, epochs=5, verbose=0, # Suppress chatty output callbacks=[tensorboard_callback, cm_callback], validation_data=(test_images, test_labels), ) ###Output _____no_output_____ ###Markdown Copyright 2019 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Displaying image data in TensorBoard View on TensorFlow.org Run in Google Colab View source on GitHub OverviewUsing the **TensorFlow Image Summary API,** you can easily log tensors and arbitrary images and view them in TensorBoard. This can be extremely helpful to sample and examine your input data, or to [visualize layer weights](http://cs231n.github.io/understanding-cnn/) and [generated tensors](https://hub.packtpub.com/generative-adversarial-networks-using-keras/). You can also log diagnostic data as images that can be helpful in the course of your model development.In this tutorial, you will use learn how to use the Image Summary API to visualize tensors as images. You will also learn how to take an arbitrary image, convert it to a tensor, and visualize it in TensorBoard. You will work through a simple but real example that uses Image Summaries to help you understand how your model is performing. Setup ###Code # Ensure TensorFlow 2.0 is installed. !pip install -q tf-nightly-2.0-preview # Load the TensorBoard notebook extension. %load_ext tensorboard from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import io import itertools from packaging import version from six.moves import range import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import numpy as np import sklearn.metrics print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." ###Output TensorFlow version: 2.0.0-dev20190228 ###Markdown Download the Fashion-MNIST datasetYou're going to construct a simple neural network to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 28x28 grayscale images of fashion products from 10 categories, with 7,000 images per category.First, download the data: ###Code # Download the data. The data is already divided into train and test. # The labels are integers representing classes. fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = \ fashion_mnist.load_data() # Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc. class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ###Output Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 8192/5148 [===============================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/step ###Markdown Visualizing a single imageTo understand how the Image Summary API works, you're now going to simply log the first training image in your training set in TensorBoard.Before you do that, examine the shape of your training data: ###Code print("Shape: ", train_images[0].shape) print("Label: ", train_labels[0], "->", class_names[train_labels[0]]) ###Output Shape: (28, 28) Label: 9 -> Ankle boot ###Markdown Notice that the shape of each image in the data set is a rank-2 tensor of shape (28, 28), representing the height and the width.However, ```tf.summary.image()``` expects a rank-4 tensor containing ```(batch_size, height, width, channels)```. Therefore, the tensors need to be reshaped. You're logging only one image, so ```batch_size``` is 1. The images are grayscale, so set ```channels``` to 1. ###Code # Reshape the image for the Summary API. img = np.reshape(train_images[0], (-1, 28, 28, 1)) ###Output _____no_output_____ ###Markdown You're now ready to log this image and view it in TensorBoard. ###Code # Clear out any prior log data. !rm -rf logs # Sets up a timestamped log directory. logdir = "logs/train_data/" + datetime.now().strftime("%Y%m%d-%H%M%S") # Creates a file writer for the log directory. file_writer = tf.summary.create_file_writer(logdir) # Using the file writer, log the reshaped image. with file_writer.as_default(): tf.summary.image("Training data", img, step=0) ###Output _____no_output_____ ###Markdown Now, use TensorBoard to examine the image. Wait a few seconds for the UI to spin up. ###Code %tensorboard --logdir logs/train_data ###Output _____no_output_____ ###Markdown The "Images" tab displays the image you just logged. It's an "ankle boot". The image is scaled to a default size for easier viewing. If you want to view the unscaled original image, check "Show actual image size" at the upper left. Play with the brightness and contrast sliders to see how they affect the image pixels. Visualizing multiple imagesLogging one tensor is great, but what if you wanted to log multiple training examples?Simply specify the number of images you want to log when passing data to ```tf.summary.image()```. ###Code with file_writer.as_default(): # Don't forget to reshape. images = np.reshape(train_images[0:25], (-1, 28, 28, 1)) tf.summary.image("25 training data examples", images, max_outputs=25, step=0) %tensorboard --logdir logs/train_data ###Output _____no_output_____ ###Markdown Logging arbitrary image dataWhat if you want to visualize an image that's not a tensor, such as an image generated by [matplotlib](https://matplotlib.org/)?You need some boilerplate code to convert the plot to a tensor, but after that, you're good to go.In the code below, you'll log the first 25 images as a nice grid using matplotlib's ```subplot()``` function. You'll then view the grid in TensorBoard: ###Code # Clear out prior logging data. !rm -rf logs/plots logdir = "logs/plots/" + datetime.now().strftime("%Y%m%d-%H%M%S") file_writer = tf.summary.create_file_writer(logdir) def plot_to_image(figure): """Converts the matplotlib plot specified by 'figure' to a PNG image and returns it. The supplied figure is closed and inaccessible after this call.""" # Save the plot to a PNG in memory. buf = io.BytesIO() plt.savefig(buf, format='png') # Closing the figure prevents it from being displayed directly inside # the notebook. plt.close(figure) buf.seek(0) # Convert PNG buffer to TF image image = tf.image.decode_png(buf.getvalue(), channels=4) # Add the batch dimension image = tf.expand_dims(image, 0) return image def image_grid(): """Return a 5x5 grid of the MNIST images as a matplotlib figure.""" # Create a figure to contain the plot. figure = plt.figure(figsize=(10,10)) for i in range(25): # Start next subplot. plt.subplot(5, 5, i + 1, title=class_names[train_labels[i]]) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) return figure # Prepare the plot figure = image_grid() # Convert to image and log with file_writer.as_default(): tf.summary.image("Training data", plot_to_image(figure), step=0) %tensorboard --logdir logs/plots ###Output _____no_output_____ ###Markdown Building an image classifierNow put this all together with a real example. After all, you're here to do machine learning and not plot pretty pictures!You're going to use image summaries to understand how well your model is doing while training a simple classifier for the Fashion-MNIST dataset. First, create a very simple model and compile it, setting up the optimizer and loss function. The compile step also specifies that you want to log the accuracy of the classifier along the way. ###Code model = keras.models.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(10, activation='softmax') ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) ###Output _____no_output_____ ###Markdown When training a classifier, it's useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.Define a function that calculates the confusion matrix. You'll use a convenient [Scikit-learn](https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) function to do this, and then plot it using matplotlib. ###Code def plot_confusion_matrix(cm, class_names): """ Returns a matplotlib figure containing the plotted confusion matrix. Args: cm (array, shape = [n, n]): a confusion matrix of integer classes class_names (array, shape = [n]): String names of the integer classes """ figure = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title("Confusion matrix") plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) # Normalize the confusion matrix. cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2) # Use white text if squares are dark; otherwise black. threshold = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure ###Output _____no_output_____ ###Markdown You're now ready to train the classifier and regularly log the confusion matrix along the way.Here's what you'll do:1. Create the [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics2. Create a [Keras LambdaCallback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LambdaCallback) to log the confusion matrix at the end of every epoch3. Train the model using Model.fit(), making sure to pass both callbacksAs training progresses, scroll down to see TensorBoard start up. ###Code # Clear out prior logging data. !rm -rf logs/image logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S") # Define the basic TensorBoard callback. tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) file_writer_cm = tf.summary.create_file_writer(logdir + '/cm') def log_confusion_matrix(epoch, logs): # Use the model to predict the values from the validation dataset. test_pred_raw = model.predict(test_images) test_pred = np.argmax(test_pred_raw, axis=1) # Calculate the confusion matrix. cm = sklearn.metrics.confusion_matrix(test_labels, test_pred) # Log the confusion matrix as an image summary. figure = plot_confusion_matrix(cm, class_names=class_names) cm_image = plot_to_image(figure) # Log the confusion matrix as an image summary. with file_writer_cm.as_default(): tf.summary.image("Confusion Matrix", cm_image, step=epoch) # Define the per-epoch callback. cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix) # Start TensorBoard. %tensorboard --logdir logs/image # Train the classifier. model.fit( train_images, train_labels, epochs=5, verbose=0, # Suppress chatty output callbacks=[tensorboard_callback, cm_callback], validation_data=(test_images, test_labels), ) ###Output _____no_output_____ ###Markdown Copyright 2019 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Displaying image data in TensorBoard View on TensorFlow.org Run in Google Colab View source on GitHub OverviewUsing the **TensorFlow Image Summary API,** you can easily log tensors and arbitrary images and view them in TensorBoard. This can be extremely helpful to sample and examine your input data, or to [visualize layer weights](http://cs231n.github.io/understanding-cnn/) and [generated tensors](https://hub.packtpub.com/generative-adversarial-networks-using-keras/). You can also log diagnostic data as images that can be helpful in the course of your model development.In this tutorial, you will use learn how to use the Image Summary API to visualize tensors as images. You will also learn how to take an arbitrary image, convert it to a tensor, and visualize it in TensorBoard. You will work through a simple but real example that uses Image Summaries to help you understand how your model is performing. Setup ###Code # Ensure TensorFlow 2.0 is installed. !pip install -q tf-nightly-2.0-preview # Load the TensorBoard notebook extension. %load_ext tensorboard.notebook from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import io import itertools from packaging import version from six.moves import range import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import numpy as np import sklearn.metrics print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." ###Output TensorFlow version: 2.0.0-dev20190228 ###Markdown Download the Fashion-MNIST datasetYou're going to construct a simple neural network to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 28x28 grayscale images of fashion products from 10 categories, with 7,000 images per category.First, download the data: ###Code # Download the data. The data is already divided into train and test. # The labels are integers representing classes. fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = \ fashion_mnist.load_data() # Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc. class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ###Output Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 8192/5148 [===============================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/step ###Markdown Visualizing a single imageTo understand how the Image Summary API works, you're now going to simply log the first training image in your training set in TensorBoard.Before you do that, examine the shape of your training data: ###Code print("Shape: ", train_images[0].shape) print("Label: ", train_labels[0], "->", class_names[train_labels[0]]) ###Output Shape: (28, 28) Label: 9 -> Ankle boot ###Markdown Notice that the shape of each image in the data set is a rank-2 tensor of shape (28, 28), representing the height and the width.However, ```tf.summary.image()``` expects a rank-4 tensor containing ```(batch_size, height, width, channels)```. Therefore, the tensors need to be reshaped. You're logging only one image, so ```batch_size``` is 1. The images are grayscale, so set ```channels``` to 1. ###Code # Reshape the image for the Summary API. img = np.reshape(train_images[0], (-1, 28, 28, 1)) ###Output _____no_output_____ ###Markdown You're now ready to log this image and view it in TensorBoard. ###Code # Clear out any prior log data. !rm -rf logs # Sets up a timestamped log directory. logdir = "logs/train_data/" + datetime.now().strftime("%Y%m%d-%H%M%S") # Creates a file writer for the log directory. file_writer = tf.summary.create_file_writer(logdir) # Using the file writer, log the reshaped image. with file_writer.as_default(): tf.summary.image("Training data", img, step=0) ###Output _____no_output_____ ###Markdown Now, use TensorBoard to examine the image. Wait a few seconds for the UI to spin up. ###Code %tensorboard --logdir logs/train_data ###Output _____no_output_____ ###Markdown The "Images" tab displays the image you just logged. It's an "ankle boot". The image is scaled to a default size for easier viewing. If you want to view the unscaled original image, check "Show actual image size" at the upper left. Play with the brightness and contrast sliders to see how they affect the image pixels. Visualizing multiple imagesLogging one tensor is great, but what if you wanted to log multiple training examples?Simply specify the number of images you want to log when passing data to ```tf.summary.image()```. ###Code with file_writer.as_default(): # Don't forget to reshape. images = np.reshape(train_images[0:25], (-1, 28, 28, 1)) tf.summary.image("25 training data examples", images, max_outputs=25, step=0) %tensorboard --logdir logs/train_data ###Output _____no_output_____ ###Markdown Logging arbitrary image dataWhat if you want to visualize an image that's not a tensor, such as an image generated by [matplotlib](https://matplotlib.org/)?You need some boilerplate code to convert the plot to a tensor, but after that, your're good to go.In the code below, you'll log the first 25 images as a nice grid using matplotlib's ```subplot()``` function. You'll then view the grid in TensorBoard: ###Code # Clear out prior logging data. !rm -rf logs/plots logdir = "logs/plots/" + datetime.now().strftime("%Y%m%d-%H%M%S") file_writer = tf.summary.create_file_writer(logdir) def plot_to_image(figure): """Converts the matplotlib plot specified by 'figure' to a PNG image and returns it. The supplied figure is closed and inaccessible after this call.""" # Save the plot to a PNG in memory. buf = io.BytesIO() plt.savefig(buf, format='png') # Closing the figure prevents it from being displayed directly inside # the notebook. plt.close(figure) buf.seek(0) # Convert PNG buffer to TF image image = tf.image.decode_png(buf.getvalue(), channels=4) # Add the batch dimension image = tf.expand_dims(image, 0) return image def image_grid(): """Return a 5x5 grid of the MNIST images as a matplotlib figure.""" # Create a figure to contain the plot. figure = plt.figure(figsize=(10,10)) for i in range(25): # Start next subplot. plt.subplot(5, 5, i + 1, title=class_names[train_labels[i]]) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) return figure # Prepare the plot figure = image_grid() # Convert to image and log with file_writer.as_default(): tf.summary.image("Training data", plot_to_image(figure), step=0) %tensorboard --logdir logs/plots ###Output _____no_output_____ ###Markdown Building an image classifierNow put this all together with a real example. After all, you're here to do machine learning and not plot pretty pictures!You're going to use image summaries to understand how well your model is doing while training a simple classifier for the Fashion-MNIST dataset. First, create a very simple model and compile it, setting up the optimizer and loss function. The compile step also specifies that you want to log the accuracy of the classifier along the way. ###Code model = keras.models.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(10, activation='softmax') ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) ###Output _____no_output_____ ###Markdown When training a classifier, it's useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.Define a function that calculates the confusion matrix. You'll use a convenient [Scikit-learn](https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) function to do this, and then plot it using matplotlib. ###Code def plot_confusion_matrix(cm, class_names): """ Returns a matplotlib figure containing the plotted confusion matrix. Args: cm (array, shape = [n, n]): a confusion matrix of integer classes class_names (array, shape = [n]): String names of the integer classes """ figure = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title("Confusion matrix") plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) # Normalize the confusion matrix. cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2) # Use white text if squares are dark; otherwise black. threshold = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure ###Output _____no_output_____ ###Markdown You're now ready to train the classifier and regularly log the confusion matrix along the way.Here's what you'll do:1. Create the [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics2. Create a [Keras LambdaCallback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LambdaCallback) to log the confusion matrix at the end of every epoch3. Train the model using Model.fit(), making sure to pass both callbacksAs training progresses, scroll down to see TensorBoard start up. ###Code # Clear out prior logging data. !rm -rf logs/image logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S") # Define the basic TensorBoard callback. tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) file_writer_cm = tf.summary.create_file_writer(logdir + '/cm') def log_confusion_matrix(epoch, logs): # Use the model to predict the values from the validation dataset. test_pred_raw = model.predict(test_images) test_pred = np.argmax(test_pred_raw, axis=1) # Calculate the confusion matrix. cm = sklearn.metrics.confusion_matrix(test_labels, test_pred) # Log the confusion matrix as an image summary. figure = plot_confusion_matrix(cm, class_names=class_names) cm_image = plot_to_image(figure) # Log the confusion matrix as an image summary. with file_writer_cm.as_default(): tf.summary.image("Confusion Matrix", cm_image, step=epoch) # Define the per-epoch callback. cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix) # Start TensorBoard. %tensorboard --logdir logs/image # Train the classifier. model.fit( train_images, train_labels, epochs=5, verbose=0, # Suppress chatty output callbacks=[tensorboard_callback, cm_callback], validation_data=(test_images, test_labels), ) ###Output _____no_output_____
finalproject/irmiger_006165257/irmiger_006165257.ipynb
###Markdown Next few inputs are getting general picture of my data ###Code print(df.target.value_counts()) sns.countplot(x="target", data=df, palette="bwr") plt.show() #1 = male, 0 = female print(df.sex.value_counts()) gen = sns.FacetGrid(df) gen.map(plt.hist,"sex") print(df.age.describe()) sns.set() ages = sns.FacetGrid(df) ages.map(plt.hist,"age", color="orange") plt.figure(figsize=(16, 8)) plt.scatter( df['chol'], df['cp'], c='black' ) plt.xlabel("cholesterol") plt.ylabel("chest pain type") plt.show() g = sns.lmplot(x="thal", y="target", data=df, y_jitter=.02, logistic=True) #g.set(xlim=(0, 80), ylim=(-.05, 1.05)) g = sns.lmplot(x="chol", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="thalach", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="exang", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="slope", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="ca", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="trestbps", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="fbs", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="oldpeak", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="restecg", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="sex", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="age", y="target", data=df, y_jitter=.02, logistic=True) g = sns.lmplot(x="restecg", y="target", data=df, y_jitter=.02, logistic=True) cols_to_keep =['target', 'age','oldpeak','ca','thal','chol', 'thalach', 'trestbps'] data=df[cols_to_keep] data.head(5) #set independent variables xData = data[data.columns[1:]] yData = data['target'] logit = sm.Logit(yData,xData) result = logit.fit() print(result.summary()) #oods ratios params = result.params conf = result.conf_int() conf['Odds Ratios'] = params conf.columns = ['2.5%','97.5%','Odds Ratios'] print(np.exp(conf)) ###Output 2.5% 97.5% Odds Ratios age 0.952641 1.015873 0.983749 oldpeak 1.433580 2.686392 1.962437 ca 1.559020 2.984194 2.156946 thal 1.940516 5.179187 3.170220 chol 0.998258 1.011059 1.004638 thalach 0.950650 0.975131 0.962813 trestbps 0.994029 1.027458 1.010605
python-qutip/opensystem.ipynb
###Markdown H:hamiltonian;W:oscillator;W=1\begin{equation}H=-\frac{W}{2}\sigma_{z}\end{equation} ###Code H=-(1/2)*sigmaz()#hamlitonian H=(-w/2)*sigmaz,oscillator frequency=1 P=0.36 H rho=P*fock_dm(2,0)+(1-P)*fock_dm(2,1) psi0=fock(2,0) rho times = np.linspace(0.0, 10.0, 100) result = mesolve(H, psi0, times, [], []) result.states result2= mesolve(H, rho, times, [], []) result2.states ket=basis(2,0) ket1=basis(2,1) jumpOP=ket*ket1.dag() jumpOP result3= mesolve(H, rho, times, [jumpOP], []) result3.states result3= mesolve(H, rho, times, [np.sqrt(0.0004) *jumpOP], [sigmax(),sigmay(),sigmaz()]) fig, ax = plt.subplots() ax.plot(times, result3.expect[2]) fig, ax = plt.subplots() ax.plot(times, result3.expect[2]) ###Output _____no_output_____
Incremental Clustering Assignment/Incremental Clustering.ipynb
###Markdown Imports ###Code from sklearn.cluster import MiniBatchKMeans from sklearn.datasets import load_iris import numpy as np import cv2 ###Output _____no_output_____ ###Markdown Iris ###Code data = load_iris() X = data.data y = data.target y_names = data.target_names print('X =>', X) print('y =>', y) print('y_names =>', y_names) ###Output X => [[5.1 3.5 1.4 0.2] [4.9 3. 1.4 0.2] [4.7 3.2 1.3 0.2] [4.6 3.1 1.5 0.2] [5. 3.6 1.4 0.2] [5.4 3.9 1.7 0.4] [4.6 3.4 1.4 0.3] [5. 3.4 1.5 0.2] [4.4 2.9 1.4 0.2] [4.9 3.1 1.5 0.1] [5.4 3.7 1.5 0.2] [4.8 3.4 1.6 0.2] [4.8 3. 1.4 0.1] [4.3 3. 1.1 0.1] [5.8 4. 1.2 0.2] [5.7 4.4 1.5 0.4] [5.4 3.9 1.3 0.4] [5.1 3.5 1.4 0.3] [5.7 3.8 1.7 0.3] [5.1 3.8 1.5 0.3] [5.4 3.4 1.7 0.2] [5.1 3.7 1.5 0.4] [4.6 3.6 1. 0.2] [5.1 3.3 1.7 0.5] [4.8 3.4 1.9 0.2] [5. 3. 1.6 0.2] [5. 3.4 1.6 0.4] [5.2 3.5 1.5 0.2] [5.2 3.4 1.4 0.2] [4.7 3.2 1.6 0.2] [4.8 3.1 1.6 0.2] [5.4 3.4 1.5 0.4] [5.2 4.1 1.5 0.1] [5.5 4.2 1.4 0.2] [4.9 3.1 1.5 0.2] [5. 3.2 1.2 0.2] [5.5 3.5 1.3 0.2] [4.9 3.6 1.4 0.1] [4.4 3. 1.3 0.2] [5.1 3.4 1.5 0.2] [5. 3.5 1.3 0.3] [4.5 2.3 1.3 0.3] [4.4 3.2 1.3 0.2] [5. 3.5 1.6 0.6] [5.1 3.8 1.9 0.4] [4.8 3. 1.4 0.3] [5.1 3.8 1.6 0.2] [4.6 3.2 1.4 0.2] [5.3 3.7 1.5 0.2] [5. 3.3 1.4 0.2] [7. 3.2 4.7 1.4] [6.4 3.2 4.5 1.5] [6.9 3.1 4.9 1.5] [5.5 2.3 4. 1.3] [6.5 2.8 4.6 1.5] [5.7 2.8 4.5 1.3] [6.3 3.3 4.7 1.6] [4.9 2.4 3.3 1. ] [6.6 2.9 4.6 1.3] [5.2 2.7 3.9 1.4] [5. 2. 3.5 1. ] [5.9 3. 4.2 1.5] [6. 2.2 4. 1. ] [6.1 2.9 4.7 1.4] [5.6 2.9 3.6 1.3] [6.7 3.1 4.4 1.4] [5.6 3. 4.5 1.5] [5.8 2.7 4.1 1. ] [6.2 2.2 4.5 1.5] [5.6 2.5 3.9 1.1] [5.9 3.2 4.8 1.8] [6.1 2.8 4. 1.3] [6.3 2.5 4.9 1.5] [6.1 2.8 4.7 1.2] [6.4 2.9 4.3 1.3] [6.6 3. 4.4 1.4] [6.8 2.8 4.8 1.4] [6.7 3. 5. 1.7] [6. 2.9 4.5 1.5] [5.7 2.6 3.5 1. ] [5.5 2.4 3.8 1.1] [5.5 2.4 3.7 1. ] [5.8 2.7 3.9 1.2] [6. 2.7 5.1 1.6] [5.4 3. 4.5 1.5] [6. 3.4 4.5 1.6] [6.7 3.1 4.7 1.5] [6.3 2.3 4.4 1.3] [5.6 3. 4.1 1.3] [5.5 2.5 4. 1.3] [5.5 2.6 4.4 1.2] [6.1 3. 4.6 1.4] [5.8 2.6 4. 1.2] [5. 2.3 3.3 1. ] [5.6 2.7 4.2 1.3] [5.7 3. 4.2 1.2] [5.7 2.9 4.2 1.3] [6.2 2.9 4.3 1.3] [5.1 2.5 3. 1.1] [5.7 2.8 4.1 1.3] [6.3 3.3 6. 2.5] [5.8 2.7 5.1 1.9] [7.1 3. 5.9 2.1] [6.3 2.9 5.6 1.8] [6.5 3. 5.8 2.2] [7.6 3. 6.6 2.1] [4.9 2.5 4.5 1.7] [7.3 2.9 6.3 1.8] [6.7 2.5 5.8 1.8] [7.2 3.6 6.1 2.5] [6.5 3.2 5.1 2. ] [6.4 2.7 5.3 1.9] [6.8 3. 5.5 2.1] [5.7 2.5 5. 2. ] [5.8 2.8 5.1 2.4] [6.4 3.2 5.3 2.3] [6.5 3. 5.5 1.8] [7.7 3.8 6.7 2.2] [7.7 2.6 6.9 2.3] [6. 2.2 5. 1.5] [6.9 3.2 5.7 2.3] [5.6 2.8 4.9 2. ] [7.7 2.8 6.7 2. ] [6.3 2.7 4.9 1.8] [6.7 3.3 5.7 2.1] [7.2 3.2 6. 1.8] [6.2 2.8 4.8 1.8] [6.1 3. 4.9 1.8] [6.4 2.8 5.6 2.1] [7.2 3. 5.8 1.6] [7.4 2.8 6.1 1.9] [7.9 3.8 6.4 2. ] [6.4 2.8 5.6 2.2] [6.3 2.8 5.1 1.5] [6.1 2.6 5.6 1.4] [7.7 3. 6.1 2.3] [6.3 3.4 5.6 2.4] [6.4 3.1 5.5 1.8] [6. 3. 4.8 1.8] [6.9 3.1 5.4 2.1] [6.7 3.1 5.6 2.4] [6.9 3.1 5.1 2.3] [5.8 2.7 5.1 1.9] [6.8 3.2 5.9 2.3] [6.7 3.3 5.7 2.5] [6.7 3. 5.2 2.3] [6.3 2.5 5. 1.9] [6.5 3. 5.2 2. ] [6.2 3.4 5.4 2.3] [5.9 3. 5.1 1.8]] y => [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] y_names => ['setosa' 'versicolor' 'virginica'] ###Markdown Incremental Clustering for IRIS dataset ###Code minibatchKmeans = MiniBatchKMeans(n_clusters=3,random_state=0,batch_size=6) minibatchKmeans.fit(X) y_pred = minibatchKmeans.predict(X) print("accuracy:", (1-(np.count_nonzero(y_pred == y) / len(y))) * 100) ###Output accuracy: 91.33333333333333 ###Markdown Intrusion Detection ###Code def detect(): font = cv2.FONT_HERSHEY_SIMPLEX cam = cv2.VideoCapture(1) cam.set(3, 640) cam.set(4, 480) image_list = [] count = 0 while(True): ret, img = cam.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if count >= 1: temp = res_list minibatchKmeans = MiniBatchKMeans(n_clusters=3,random_state=0) res_list = minibatchKmeans.fit(gray) if count < 1: temp = res_list count+=1 pred = temp.predict(gray) if False in (temp.labels_ == pred): cv2.putText(img, "Detected", (320, 240), font, 1, (255,255,255), 2) else: cv2.putText(img, "Clean", (320, 240), font, 1, (255,255,255), 2) cv2.imshow('image', img) k = cv2.waitKey(100) & 0xff if k == 27: break cam.release() cv2.destroyAllWindows() detect() ###Output _____no_output_____ ###Markdown Extras ###Code minibatchKmeans = MiniBatchKMeans(n_clusters=3,random_state=0) image1 = cv2.imread('1.jpg') gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY) image2 = cv2.imread('2.jpg') gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) res_list = minibatchKmeans.fit(gray1) #static pred = minibatchKmeans.predict(gray2) #with person False in (res_list.labels_ == pred) res_list.labels_ == pred ###Output _____no_output_____
notebooks/classifier_scikitlearn_BaggingClassifier.ipynb
###Markdown Adversarial-Robustness-Toolbox for scikit-learn DecisionTreeClassifier ###Code from sklearn.ensemble import BaggingClassifier from sklearn.datasets import load_iris import numpy as np from matplotlib import pyplot as plt from art.classifiers import SklearnClassifier from art.attacks import ZooAttack from art.utils import load_mnist import warnings warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown 1 Training scikit-learn BaggingClassifier and attacking with ART Zeroth Order Optimization attack ###Code def get_adversarial_examples(x_train, y_train): # Fit BaggingClassifier model = BaggingClassifier() model.fit(X=x_train, y=y_train) # Create ART classifier for scikit-learn BaggingClassifier art_classifier = SklearnClassifier(model=model) # Create ART Zeroth Order Optimization attack zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=20, binary_search_steps=10, initial_const=1e-3, abort_early=True, use_resize=False, use_importance=False, nb_parallel=1, batch_size=1, variable_h=0.2) # Generate adversarial samples with ART Zeroth Order Optimization attack x_train_adv = zoo.generate(x_train) return x_train_adv, model ###Output _____no_output_____ ###Markdown 1.1 Utility functions ###Code def get_data(num_classes): x_train, y_train = load_iris(return_X_y=True) x_train = x_train[y_train < num_classes][:, [0, 1]] y_train = y_train[y_train < num_classes] x_train[:, 0][y_train == 0] *= 2 x_train[:, 1][y_train == 2] *= 2 x_train[:, 0][y_train == 0] -= 3 x_train[:, 1][y_train == 2] -= 2 x_train[:, 0] = (x_train[:, 0] - 4) / (9 - 4) x_train[:, 1] = (x_train[:, 1] - 1) / (6 - 1) return x_train, y_train def plot_results(model, x_train, y_train, x_train_adv, num_classes): fig, axs = plt.subplots(1, num_classes, figsize=(num_classes * 5, 5)) colors = ['orange', 'blue', 'green'] for i_class in range(num_classes): # Plot difference vectors for i in range(y_train[y_train == i_class].shape[0]): x_1_0 = x_train[y_train == i_class][i, 0] x_1_1 = x_train[y_train == i_class][i, 1] x_2_0 = x_train_adv[y_train == i_class][i, 0] x_2_1 = x_train_adv[y_train == i_class][i, 1] if x_1_0 != x_2_0 or x_1_1 != x_2_1: axs[i_class].plot([x_1_0, x_2_0], [x_1_1, x_2_1], c='black', zorder=1) # Plot benign samples for i_class_2 in range(num_classes): axs[i_class].scatter(x_train[y_train == i_class_2][:, 0], x_train[y_train == i_class_2][:, 1], s=20, zorder=2, c=colors[i_class_2]) axs[i_class].set_aspect('equal', adjustable='box') # Show predicted probability as contour plot h = .01 x_min, x_max = 0, 1 y_min, y_max = 0, 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z_proba = model.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z_proba = Z_proba[:, i_class].reshape(xx.shape) im = axs[i_class].contourf(xx, yy, Z_proba, levels=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], vmin=0, vmax=1) if i_class == num_classes - 1: cax = fig.add_axes([0.95, 0.2, 0.025, 0.6]) plt.colorbar(im, ax=axs[i_class], cax=cax) # Plot adversarial samples for i in range(y_train[y_train == i_class].shape[0]): x_1_0 = x_train[y_train == i_class][i, 0] x_1_1 = x_train[y_train == i_class][i, 1] x_2_0 = x_train_adv[y_train == i_class][i, 0] x_2_1 = x_train_adv[y_train == i_class][i, 1] if x_1_0 != x_2_0 or x_1_1 != x_2_1: axs[i_class].scatter(x_2_0, x_2_1, zorder=2, c='red', marker='X') axs[i_class].set_xlim((x_min, x_max)) axs[i_class].set_ylim((y_min, y_max)) axs[i_class].set_title('class ' + str(i_class)) axs[i_class].set_xlabel('feature 1') axs[i_class].set_ylabel('feature 2') ###Output _____no_output_____ ###Markdown 2 Example: Iris dataset legend- colored background: probability of class i- orange circles: class 1- blue circles: class 2- green circles: class 3- red crosses: adversarial samples for class i ###Code num_classes = 2 x_train, y_train = get_data(num_classes=num_classes) x_train_adv, model = get_adversarial_examples(x_train, y_train) plot_results(model, x_train, y_train, x_train_adv, num_classes) num_classes = 3 x_train, y_train = get_data(num_classes=num_classes) x_train_adv, model = get_adversarial_examples(x_train, y_train) plot_results(model, x_train, y_train, x_train_adv, num_classes) ###Output _____no_output_____ ###Markdown 3 Example: MNIST 3.1 Load and transform MNIST dataset ###Code (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist() n_samples_train = x_train.shape[0] n_features_train = x_train.shape[1] * x_train.shape[2] * x_train.shape[3] n_samples_test = x_test.shape[0] n_features_test = x_test.shape[1] * x_test.shape[2] * x_test.shape[3] x_train = x_train.reshape(n_samples_train, n_features_train) x_test = x_test.reshape(n_samples_test, n_features_test) y_train = np.argmax(y_train, axis=1) y_test = np.argmax(y_test, axis=1) n_samples_max = 200 x_train = x_train[0:n_samples_max] y_train = y_train[0:n_samples_max] x_test = x_test[0:n_samples_max] y_test = y_test[0:n_samples_max] ###Output _____no_output_____ ###Markdown 3.2 Train BaggingClassifier classifier ###Code model = BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0) model.fit(X=x_train, y=y_train) ###Output _____no_output_____ ###Markdown 3.3 Create and apply Zeroth Order Optimization Attack with ART ###Code art_classifier = SklearnClassifier(model=model) zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=100, binary_search_steps=20, initial_const=1e-3, abort_early=True, use_resize=False, use_importance=False, nb_parallel=10, batch_size=1, variable_h=0.25) x_train_adv = zoo.generate(x_train) x_test_adv = zoo.generate(x_test) ###Output _____no_output_____ ###Markdown 3.4 Evaluate BaggingClassifier on benign and adversarial samples ###Code score = model.score(x_train, y_train) print("Benign Training Score: %.4f" % score) plt.matshow(x_train[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_train[0:1, :])[0] print("Benign Training Predicted Label: %i" % prediction) score = model.score(x_train_adv, y_train) print("Adversarial Training Score: %.4f" % score) plt.matshow(x_train_adv[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_train_adv[0:1, :])[0] print("Adversarial Training Predicted Label: %i" % prediction) score = model.score(x_test, y_test) print("Benign Test Score: %.4f" % score) plt.matshow(x_test[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_test[0:1, :])[0] print("Benign Test Predicted Label: %i" % prediction) score = model.score(x_test_adv, y_test) print("Adversarial Test Score: %.4f" % score) plt.matshow(x_test_adv[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_test_adv[0:1, :])[0] print("Adversarial Test Predicted Label: %i" % prediction) ###Output Adversarial Test Predicted Label: 6 ###Markdown Adversarial-Robustness-Toolbox for scikit-learn DecisionTreeClassifier ###Code from sklearn.ensemble import BaggingClassifier from sklearn.datasets import load_iris import numpy as np from matplotlib import pyplot as plt from art.estimators.classification import SklearnClassifier from art.attacks.evasion import ZooAttack from art.utils import load_mnist import warnings warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown 1 Training scikit-learn BaggingClassifier and attacking with ART Zeroth Order Optimization attack ###Code def get_adversarial_examples(x_train, y_train): # Fit BaggingClassifier model = BaggingClassifier() model.fit(X=x_train, y=y_train) # Create ART classifier for scikit-learn BaggingClassifier art_classifier = SklearnClassifier(model=model) # Create ART Zeroth Order Optimization attack zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=20, binary_search_steps=10, initial_const=1e-3, abort_early=True, use_resize=False, use_importance=False, nb_parallel=1, batch_size=1, variable_h=0.2) # Generate adversarial samples with ART Zeroth Order Optimization attack x_train_adv = zoo.generate(x_train) return x_train_adv, model ###Output _____no_output_____ ###Markdown 1.1 Utility functions ###Code def get_data(num_classes): x_train, y_train = load_iris(return_X_y=True) x_train = x_train[y_train < num_classes][:, [0, 1]] y_train = y_train[y_train < num_classes] x_train[:, 0][y_train == 0] *= 2 x_train[:, 1][y_train == 2] *= 2 x_train[:, 0][y_train == 0] -= 3 x_train[:, 1][y_train == 2] -= 2 x_train[:, 0] = (x_train[:, 0] - 4) / (9 - 4) x_train[:, 1] = (x_train[:, 1] - 1) / (6 - 1) return x_train, y_train def plot_results(model, x_train, y_train, x_train_adv, num_classes): fig, axs = plt.subplots(1, num_classes, figsize=(num_classes * 5, 5)) colors = ['orange', 'blue', 'green'] for i_class in range(num_classes): # Plot difference vectors for i in range(y_train[y_train == i_class].shape[0]): x_1_0 = x_train[y_train == i_class][i, 0] x_1_1 = x_train[y_train == i_class][i, 1] x_2_0 = x_train_adv[y_train == i_class][i, 0] x_2_1 = x_train_adv[y_train == i_class][i, 1] if x_1_0 != x_2_0 or x_1_1 != x_2_1: axs[i_class].plot([x_1_0, x_2_0], [x_1_1, x_2_1], c='black', zorder=1) # Plot benign samples for i_class_2 in range(num_classes): axs[i_class].scatter(x_train[y_train == i_class_2][:, 0], x_train[y_train == i_class_2][:, 1], s=20, zorder=2, c=colors[i_class_2]) axs[i_class].set_aspect('equal', adjustable='box') # Show predicted probability as contour plot h = .01 x_min, x_max = 0, 1 y_min, y_max = 0, 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z_proba = model.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z_proba = Z_proba[:, i_class].reshape(xx.shape) im = axs[i_class].contourf(xx, yy, Z_proba, levels=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], vmin=0, vmax=1) if i_class == num_classes - 1: cax = fig.add_axes([0.95, 0.2, 0.025, 0.6]) plt.colorbar(im, ax=axs[i_class], cax=cax) # Plot adversarial samples for i in range(y_train[y_train == i_class].shape[0]): x_1_0 = x_train[y_train == i_class][i, 0] x_1_1 = x_train[y_train == i_class][i, 1] x_2_0 = x_train_adv[y_train == i_class][i, 0] x_2_1 = x_train_adv[y_train == i_class][i, 1] if x_1_0 != x_2_0 or x_1_1 != x_2_1: axs[i_class].scatter(x_2_0, x_2_1, zorder=2, c='red', marker='X') axs[i_class].set_xlim((x_min, x_max)) axs[i_class].set_ylim((y_min, y_max)) axs[i_class].set_title('class ' + str(i_class)) axs[i_class].set_xlabel('feature 1') axs[i_class].set_ylabel('feature 2') ###Output _____no_output_____ ###Markdown 2 Example: Iris dataset legend- colored background: probability of class i- orange circles: class 1- blue circles: class 2- green circles: class 3- red crosses: adversarial samples for class i ###Code num_classes = 2 x_train, y_train = get_data(num_classes=num_classes) x_train_adv, model = get_adversarial_examples(x_train, y_train) plot_results(model, x_train, y_train, x_train_adv, num_classes) num_classes = 3 x_train, y_train = get_data(num_classes=num_classes) x_train_adv, model = get_adversarial_examples(x_train, y_train) plot_results(model, x_train, y_train, x_train_adv, num_classes) ###Output ZOO: 100%|██████████| 150/150 [00:24<00:00, 6.23it/s] ###Markdown 3 Example: MNIST 3.1 Load and transform MNIST dataset ###Code (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist() n_samples_train = x_train.shape[0] n_features_train = x_train.shape[1] * x_train.shape[2] * x_train.shape[3] n_samples_test = x_test.shape[0] n_features_test = x_test.shape[1] * x_test.shape[2] * x_test.shape[3] x_train = x_train.reshape(n_samples_train, n_features_train) x_test = x_test.reshape(n_samples_test, n_features_test) y_train = np.argmax(y_train, axis=1) y_test = np.argmax(y_test, axis=1) n_samples_max = 200 x_train = x_train[0:n_samples_max] y_train = y_train[0:n_samples_max] x_test = x_test[0:n_samples_max] y_test = y_test[0:n_samples_max] ###Output _____no_output_____ ###Markdown 3.2 Train BaggingClassifier classifier ###Code model = BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0) model.fit(X=x_train, y=y_train) ###Output _____no_output_____ ###Markdown 3.3 Create and apply Zeroth Order Optimization Attack with ART ###Code art_classifier = SklearnClassifier(model=model) zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=100, binary_search_steps=20, initial_const=1e-3, abort_early=True, use_resize=False, use_importance=False, nb_parallel=10, batch_size=1, variable_h=0.25) x_train_adv = zoo.generate(x_train) x_test_adv = zoo.generate(x_test) ###Output ZOO: 100%|██████████| 200/200 [05:53<00:00, 1.77s/it] ###Markdown 3.4 Evaluate BaggingClassifier on benign and adversarial samples ###Code score = model.score(x_train, y_train) print("Benign Training Score: %.4f" % score) plt.matshow(x_train[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_train[0:1, :])[0] print("Benign Training Predicted Label: %i" % prediction) score = model.score(x_train_adv, y_train) print("Adversarial Training Score: %.4f" % score) plt.matshow(x_train_adv[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_train_adv[0:1, :])[0] print("Adversarial Training Predicted Label: %i" % prediction) score = model.score(x_test, y_test) print("Benign Test Score: %.4f" % score) plt.matshow(x_test[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_test[0:1, :])[0] print("Benign Test Predicted Label: %i" % prediction) score = model.score(x_test_adv, y_test) print("Adversarial Test Score: %.4f" % score) plt.matshow(x_test_adv[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_test_adv[0:1, :])[0] print("Adversarial Test Predicted Label: %i" % prediction) ###Output Adversarial Test Predicted Label: 6
notebooks/1.0_DataCompile.ipynb
###Markdown Load the hourly historical data ###Code %load_ext autoreload %autoreload 2 %matplotlib inline import sys sys.path.append('../') import pandas as pd from matplotlib import pyplot as plt import seaborn as sns import os from dotenv import load_dotenv, find_dotenv # data directories load_dotenv(find_dotenv(), verbose=True) dir_project = os.getenv('PROJECT_ROOT') dir_data_raw = os.path.join(dir_project, 'data/raw/') dir_data_interim = os.path.join(dir_project, 'data/interim/') dir_data_processed = os.path.join(dir_project, 'data/processed/') dir_data_external = os.path.join(dir_project, 'data/external/') dir_models = os.path.join(dir_project, 'models/') all_dfs_hourly = pd.read_excel(os.path.join(dir_data_raw, 'DATA HYDRO IVADO Data VF 2017-2020YTD.xlsx'), sheet_name=None) ###Output _____no_output_____ ###Markdown The original excel file has several sheets: ###Code all_dfs_hourly.keys() for df_key in all_dfs_hourly: print(df_key, len(all_dfs_hourly[df_key])) ###Output Demand 30824 Wind 30828 Zonal Demand 30828 Weather 30816 HOEP Price 30820 Intertie Flow 30816 Generator Output 30816 Hourly by Generator Name 30816 ###Markdown Sheet-wise preprocess Some of the sheets have different time format, we unify them first: - `Weather` ###Code df_weather = all_dfs_hourly['Weather'] # Fusion date and hour df_weather['Fusionné'] = df_weather['Date'].dt.strftime('%m/%d/%Y') + ' ' + (df_weather['Hour']-1).astype(str) + ':00' df_weather['Fusionné'] = pd.to_datetime(df_weather['Fusionné']) df_weather.head() ###Output _____no_output_____ ###Markdown - `Intertie Flow` & `Hourly by Generator Name` Similar to the above: ###Code sheet = 'Intertie Flow' df_temp = all_dfs_hourly[sheet] df_temp['Fusionné'] = df_temp['date'].dt.strftime('%m/%d/%Y') + ' ' + (df_temp['hour'].astype(int)-1).astype(str) + ':00' df_temp['Fusionné'] = pd.to_datetime(df_temp['Fusionné']) sheet = 'Hourly by Generator Name' df_temp = all_dfs_hourly[sheet] df_temp['Fusionné'] = df_temp['Date'].dt.strftime('%m/%d/%Y') + ' ' + (df_temp['Hour']-1).astype(str) + ':00' df_temp['Fusionné'] = pd.to_datetime(df_temp['Fusionné']) ###Output _____no_output_____ ###Markdown - Unify the col name for `Wind` ###Code df_wind = all_dfs_hourly['Wind'] df_wind.rename({'Date': 'Fusionné'}, axis='columns', inplace=True) # clarify the col name df_wind.rename({'Réalisé': 'wind_realized', 'Prévisionnel': 'wind_provisional'}, axis='columns', inplace=True) ###Output _____no_output_____ ###Markdown - Drop duplicated `Ontario Demand` from one of the `Demand` and `Zonal Demand` sheets. ###Code df_demand = all_dfs_hourly['Demand'] df_demand.drop(columns=['Ontario Demand'], inplace=True) ###Output _____no_output_____ ###Markdown Concatenate different sheets (variables) ###Code all_dfs_hourly.pop('key Generator name - fuel type') from functools import reduce df_merged = reduce(lambda left,right: pd.merge(left,right,on=['Fusionné'], how='outer'), all_dfs_hourly.values()) df_merged.rename({'Fusionné': 'date'}, axis='columns', inplace=True) len(df_merged.columns) len(df_merged) ###Output _____no_output_____ ###Markdown Output: ###Code df_merged.to_csv(os.path.join(dir_data_interim, 'hourly_data_merged_all_variables.csv'), index=False) ###Output _____no_output_____
week05_nlp/part2_pytorch.ipynb
###Markdown Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow). довольно хороший ноутбук нужно потом будет вернуться к нему и сделать ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription). ###Code data = pd.read_csv("./Train_rev1.csv", index_col=None) data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32') text_columns = ["Title", "FullDescription"] categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"] target_column = "Log1pSalary" data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string data.sample(3) ###Output _____no_output_____ ###Markdown The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization. ###Code print("Before") print(data["Title"][::100000]) import nltk tokenizer = nltk.tokenize.WordPunctTokenizer() for col in text_columns: data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower()))) ###Output _____no_output_____ ###Markdown Now we can assume that our text is a space-separated list of tokens: ###Code print("After") print(data["Title"][::100000]) ###Output _____no_output_____ ###Markdown Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words. ###Code from collections import Counter token_counts = Counter() # Count how many times does each token occur in "Title" and "FullDescription" <YOUR CODE HERE> print("Total unique tokens :", len(token_counts)) print('\n'.join(map(str, token_counts.most_common(n=5)))) print('...') print('\n'.join(map(str, token_counts.most_common()[-3:]))) assert token_counts.most_common(1)[0][1] in range(2600000, 2700000) assert len(token_counts) in range(200000, 210000) print('Correct!') # Let's see how many words are there for each count _=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True) plt.xlabel("Counts") ###Output _____no_output_____ ###Markdown __Task 1.1__ Get a list of all tokens that occur at least 10 times. ###Code min_count = 10 # tokens from token_counts keys that had at least min_count occurrences throughout the dataset tokens = <YOUR CODE HERE> # Add a special tokens for unknown and empty words UNK, PAD = "UNK", "PAD" tokens = [UNK, PAD] + tokens print("Tokens left:", len(tokens)) assert type(tokens)==list assert len(tokens) in range(32000,35000) assert 'me' in tokens assert UNK in tokens print("Correct!") ###Output _____no_output_____ ###Markdown __Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int) ###Code token_to_id = <your code here> assert isinstance(token_to_id, dict) assert len(token_to_id) == len(tokens) for tok in tokens: assert tokens[token_to_id[tok]] == tok print("Correct!") ###Output _____no_output_____ ###Markdown And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices. ###Code UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD]) def as_matrix(sequences, max_len=None): """ Convert a list of tokens into a matrix with padding """ if isinstance(sequences[0], str): sequences = list(map(str.split, sequences)) max_len = min(max(map(len, sequences)), max_len or float('inf')) matrix = np.full((len(sequences), max_len), np.int32(PAD_IX)) for i,seq in enumerate(sequences): row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]] matrix[i, :len(row_ix)] = row_ix return matrix #### print("Lines:") print('\n'.join(data["Title"][::100000].values), end='\n\n') print("Matrix:") print(as_matrix(data["Title"][::100000])) ###Output _____no_output_____ ###Markdown Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding. ###Code from sklearn.feature_extraction import DictVectorizer # we only consider top-1k most frequent companies to minimize memory usage top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000)) recognized_companies = set(top_companies) data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other") categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False) categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1)) ###Output _____no_output_____ ###Markdown The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes. ###Code from sklearn.model_selection import train_test_split data_train, data_val = train_test_split(data, test_size=0.1, random_state=42) print("Train size = ", len(data_train)) print("Validation size = ", len(data_val)) def generate_batch(data, batch_size=None, replace=True, max_len=None): """ Creates a pytorch-friendly dict from the batch data. :returns: a dict with {'title' : int64[batch, title_max_len] """ if batch_size is not None: data = data.sample(batch_size, replace=replace) batch = {} for col in text_columns: batch[col] = as_matrix(data[col].values, max_len) batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1)) if target_column in data.columns: batch[target_column] = data[target_column].values return batch generate_batch(data_train, 3, max_len=10) ###Output _____no_output_____ ###Markdown Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary.![scheme](https://github.com/yandexdataschool/Practical_DL/raw/master/homework04/conv_salary_architecture.png) By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time. ###Code import torch, torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class GlobalMaxPooling(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, x): return x.max(dim=self.dim)[0] class TitleEncoder(nn.Module): def __init__(self, n_tokens=len(tokens), out_size=64): """ A simple sequential encoder for titles. x -> emb -> conv -> global_max -> relu -> dense """ super(self.__class__, self).__init__() self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX) self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1) self.pool1 = GlobalMaxPooling() self.dense = nn.Linear(out_size, out_size) def forward(self, text_ix): """ :param text_ix: int64 Variable of shape [batch_size, max_len] :returns: float32 Variable of shape [batch_size, out_size] """ h = self.emb(text_ix) # we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order h = torch.transpose(h, 1, 2) # Apply the layers as defined above. Add some ReLUs before dense. <YOUR CODE> return <YOUR CODE> title_encoder = TitleEncoder(out_size=64) dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title'])) dummy_v = title_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del title_encoder print("Seems fine") ###Output _____no_output_____ ###Markdown __Task 2.1__ Create description encoder ###Code # Define an encoder for job descriptions. # Use any means you want so long as it's torch.nn.Module. <YOUR CODE HERE> desc_encoder = <Create description encoder> dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription'])) dummy_v = desc_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del desc_encoder print("Seems fine too") ###Output _____no_output_____ ###Markdown __ Task 2.2__ Build one network ~~to rule them all~~ ###Code class FullNetwork(nn.Module): """ This class does all the steps from (title, desc, categorical) features -> predicted target It unites title & desc encoders you defined above as long as some layers for head and categorical branch. """ def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)): super(self.__class__, self).__init__() self.title_encoder = TitleEncoder(out_size=64) self.desc_encoder = <YOUR CODE> # define layers for categorical features. A few dense layers would do. <YOUR CODE> # define "output" layers that process depend the three encoded vectors into answer <YOUR CODE> def forward(self, title_ix, desc_ix, cat_features): """ :param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix :param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix :param cat_features: float32 Variable [batch, n_cat_features] :returns: float32 Variable 1d [batch], predicted log1p-salary """ # process each data source with it's respective encoder title_h = self.title_encoder(title_ix) desc_h = <YOUR CODE> # apply categorical encoder cat_h = <YOUR CODE> # concatenate all vectors together... joint_h = torch.cat([title_h, desc_h, cat_h], dim=1) # ... and stack a few more layers at the top <YOUR CODE> # Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs # Note 2: please do not use output nonlinearities. return <YOUR CODE> model = FullNetwork() opt = torch.optim.Adam(model.parameters(), lr=1e-3) # test it on one batch batch = generate_batch(data_train, 32) title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0] def compute_loss(reference, prediction): """ Computes objective for minimization. By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc. """ return torch.mean((prediction - reference) ** 2) def compute_mae(reference, prediction): """ Compute MAE on actual salary, assuming your model outputs log1p(salary)""" return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean() loss = compute_loss(reference, prediction) dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True) for grad in dummy_grads: assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \ "Double-check that your model uses all it's layers." ###Output _____no_output_____ ###Markdown Let's train it! ###Code from tqdm import tnrange def iterate_minibatches(data, batch_size=32, max_len=None, max_batches=None, shuffle=True, verbose=True): indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) if max_batches is not None: indices = indices[: batch_size * max_batches] irange = tnrange if verbose else range for start in irange(0, len(indices), batch_size): yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len) num_epochs = 100 max_len = 100 batch_size = 32 batches_per_epoch = 100 for epoch_i in range(num_epochs): print("Training:") train_loss = train_mae = train_batches = 0 model.train(True) for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) loss.backward() opt.step() opt.zero_grad() train_loss += loss.data.numpy() train_mae += compute_mae(reference, prediction).data.numpy() train_batches += 1 print("\tLoss:\t%.5f" % (train_loss / train_batches)) print("\tMAE:\t%.5f" % (train_mae / train_batches)) print('\n\n') print("Validation:") val_loss = val_mae = val_batches = 0 model.train(False) with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') print("Final eval:") val_loss = val_mae = val_batches = 0 with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') ###Output _____no_output_____ ###Markdown Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow). ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription). ###Code data = pd.read_csv("./Train_rev1.csv", index_col=None) data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32') text_columns = ["Title", "FullDescription"] categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"] target_column = "Log1pSalary" data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string data.sample(3) ###Output _____no_output_____ ###Markdown The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization. ###Code print("Before") print(data["Title"][::100000]) import nltk tokenizer = nltk.tokenize.WordPunctTokenizer() for col in text_columns: data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower()))) ###Output _____no_output_____ ###Markdown Now we can assume that our text is a space-separated list of tokens: ###Code print("After") print(data["Title"][::100000]) ###Output _____no_output_____ ###Markdown Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words. ###Code from collections import Counter token_counts = Counter() # Count how many times does each token occur in "Title" and "FullDescription" <YOUR CODE HERE> print("Total unique tokens :", len(token_counts)) print('\n'.join(map(str, token_counts.most_common(n=5)))) print('...') print('\n'.join(map(str, token_counts.most_common()[-3:]))) assert token_counts.most_common(1)[0][1] in range(2600000, 2700000) assert len(token_counts) in range(200000, 210000) print('Correct!') # Let's see how many words are there for each count _=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True) plt.xlabel("Counts") ###Output _____no_output_____ ###Markdown __Task 1.1__ Get a list of all tokens that occur at least 10 times. ###Code min_count = 10 # tokens from token_counts keys that had at least min_count occurrences throughout the dataset tokens = <YOUR CODE HERE> # Add a special tokens for unknown and empty words UNK, PAD = "UNK", "PAD" tokens = [UNK, PAD] + tokens print("Tokens left:", len(tokens)) assert type(tokens)==list assert len(tokens) in range(32000,35000) assert 'me' in tokens assert UNK in tokens print("Correct!") ###Output _____no_output_____ ###Markdown __Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int) ###Code token_to_id = <your code here> assert isinstance(token_to_id, dict) assert len(token_to_id) == len(tokens) for tok in tokens: assert tokens[token_to_id[tok]] == tok print("Correct!") ###Output _____no_output_____ ###Markdown And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices. ###Code UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD]) def as_matrix(sequences, max_len=None): """ Convert a list of tokens into a matrix with padding """ if isinstance(sequences[0], str): sequences = list(map(str.split, sequences)) max_len = min(max(map(len, sequences)), max_len or float('inf')) matrix = np.full((len(sequences), max_len), np.int32(PAD_IX)) for i,seq in enumerate(sequences): row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]] matrix[i, :len(row_ix)] = row_ix return matrix #### print("Lines:") print('\n'.join(data["Title"][::100000].values), end='\n\n') print("Matrix:") print(as_matrix(data["Title"][::100000])) ###Output _____no_output_____ ###Markdown Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding. ###Code from sklearn.feature_extraction import DictVectorizer # we only consider top-1k most frequent companies to minimize memory usage top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000)) recognized_companies = set(top_companies) data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other") categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False) categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1)) ###Output _____no_output_____ ###Markdown The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes. ###Code from sklearn.model_selection import train_test_split data_train, data_val = train_test_split(data, test_size=0.1, random_state=42) print("Train size = ", len(data_train)) print("Validation size = ", len(data_val)) def generate_batch(data, batch_size=None, replace=True, max_len=None): """ Creates a pytorch-friendly dict from the batch data. :returns: a dict with {'title' : int64[batch, title_max_len] """ if batch_size is not None: data = data.sample(batch_size, replace=replace) batch = {} for col in text_columns: batch[col] = as_matrix(data[col].values, max_len) batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1)) if target_column in data.columns: batch[target_column] = data[target_column].values return batch generate_batch(data_train, 3, max_len=10) ###Output _____no_output_____ ###Markdown Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary.![scheme](https://github.com/yandexdataschool/Practical_DL/raw/master/homework04/conv_salary_architecture.png) By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time. ###Code import torch, torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class GlobalMaxPooling(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, x): return x.max(dim=self.dim)[0] class TitleEncoder(nn.Module): def __init__(self, n_tokens=len(tokens), out_size=64): """ A simple sequential encoder for titles. x -> emb -> conv -> global_max -> relu -> dense """ super(self.__class__, self).__init__() self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX) self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1) self.pool1 = GlobalMaxPooling() self.dense = nn.Linear(out_size, out_size) def forward(self, text_ix): """ :param text_ix: int64 Variable of shape [batch_size, max_len] :returns: float32 Variable of shape [batch_size, out_size] """ h = self.emb(text_ix) # we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order h = torch.transpose(h, 1, 2) # Apply the layers as defined above. Add some ReLUs before dense. <YOUR CODE> return <YOUR CODE> title_encoder = TitleEncoder(out_size=64) dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title'])) dummy_v = title_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del title_encoder print("Seems fine") ###Output _____no_output_____ ###Markdown __Task 2.1__ Create description encoder ###Code # Define an encoder for job descriptions. # Use any means you want so long as it's torch.nn.Module. <YOUR CODE HERE> desc_encoder = <Create description encoder> dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription'])) dummy_v = desc_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del desc_encoder print("Seems fine too") ###Output _____no_output_____ ###Markdown __ Task 2.2__ Build one network ~~to rule them all~~ ###Code class FullNetwork(nn.Module): """ This class does all the steps from (title, desc, categorical) features -> predicted target It unites title & desc encoders you defined above as long as some layers for head and categorical branch. """ def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)): super(self.__class__, self).__init__() self.title_encoder = TitleEncoder(out_size=64) self.desc_encoder = <YOUR CODE> # define layers for categorical features. A few dense layers would do. <YOUR CODE> # define "output" layers that process depend the three encoded vectors into answer <YOUR CODE> def forward(self, title_ix, desc_ix, cat_features): """ :param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix :param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix :param cat_features: float32 Variable [batch, n_cat_features] :returns: float32 Variable 1d [batch], predicted log1p-salary """ # process each data source with it's respective encoder title_h = self.title_encoder(title_ix) desc_h = <YOUR CODE> # apply categorical encoder cat_h = <YOUR CODE> # concatenate all vectors together... joint_h = torch.cat([title_h, desc_h, cat_h], dim=1) # ... and stack a few more layers at the top <YOUR CODE> # Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs # Note 2: please do not use output nonlinearities. return <YOUR CODE> model = FullNetwork() opt = torch.optim.Adam(model.parameters(), lr=1e-3) # test it on one batch batch = generate_batch(data_train, 32) title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0] def compute_loss(reference, prediction): """ Computes objective for minimization. By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc. """ return torch.mean((prediction - reference) ** 2) def compute_mae(reference, prediction): """ Compute MAE on actual salary, assuming your model outputs log1p(salary)""" return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean() loss = compute_loss(reference, prediction) dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True) for grad in dummy_grads: assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \ "Double-check that your model uses all it's layers." ###Output _____no_output_____ ###Markdown Let's train it! ###Code from tqdm import tnrange def iterate_minibatches(data, batch_size=32, max_len=None, max_batches=None, shuffle=True, verbose=True): indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) if max_batches is not None: indices = indices[: batch_size * max_batches] irange = tnrange if verbose else range for start in irange(0, len(indices), batch_size): yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len) num_epochs = 100 max_len = 100 batch_size = 32 batches_per_epoch = 100 for epoch_i in range(num_epochs): print("Training:") train_loss = train_mae = train_batches = 0 model.train(True) for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) loss.backward() opt.step() opt.zero_grad() train_loss += loss.data.numpy() train_mae += compute_mae(reference, prediction).data.numpy() train_batches += 1 print("\tLoss:\t%.5f" % (train_loss / train_batches)) print("\tMAE:\t%.5f" % (train_mae / train_batches)) print('\n\n') print("Validation:") val_loss = val_mae = val_batches = 0 model.train(False) with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') print("Final eval:") val_loss = val_mae = val_batches = 0 with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') ###Output _____no_output_____ ###Markdown Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow). ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription). ###Code data = pd.read_csv("./Train_rev1.csv", index_col=None) data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32') text_columns = ["Title", "FullDescription"] categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"] target_column = "Log1pSalary" data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string data.sample(3) ###Output _____no_output_____ ###Markdown The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization. ###Code print("Before") print(data["Title"][::100000]) import nltk tokenizer = nltk.tokenize.WordPunctTokenizer() for col in text_columns: data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower()))) ###Output _____no_output_____ ###Markdown Now we can assume that our text is a space-separated list of tokens: ###Code print("After") print(data["Title"][::100000]) ###Output _____no_output_____ ###Markdown Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words. ###Code from collections import Counter token_counts = Counter() # Count how many times does each token occur in "Title" and "FullDescription" <YOUR CODE HERE> print("Total unique tokens :", len(token_counts)) print('\n'.join(map(str, token_counts.most_common(n=5)))) print('...') print('\n'.join(map(str, token_counts.most_common()[-3:]))) assert token_counts.most_common(1)[0][1] in range(2600000, 2700000) assert len(token_counts) in range(200000, 210000) print('Correct!') # Let's see how many words are there for each count _=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True) plt.xlabel("Counts") ###Output _____no_output_____ ###Markdown __Task 1.1__ Get a list of all tokens that occur at least 10 times. ###Code min_count = 10 # tokens from token_counts keys that had at least min_count occurrences throughout the dataset tokens = <YOUR CODE HERE> # Add a special tokens for unknown and empty words UNK, PAD = "UNK", "PAD" tokens = [UNK, PAD] + tokens print("Tokens left:", len(tokens)) assert type(tokens)==list assert len(tokens) in range(32000,35000) assert 'me' in tokens assert UNK in tokens print("Correct!") ###Output _____no_output_____ ###Markdown __Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int) ###Code token_to_id = <your code here> assert isinstance(token_to_id, dict) assert len(token_to_id) == len(tokens) for tok in tokens: assert tokens[token_to_id[tok]] == tok print("Correct!") ###Output _____no_output_____ ###Markdown And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices. ###Code UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD]) def as_matrix(sequences, max_len=None): """ Convert a list of tokens into a matrix with padding """ if isinstance(sequences[0], str): sequences = list(map(str.split, sequences)) max_len = min(max(map(len, sequences)), max_len or float('inf')) matrix = np.full((len(sequences), max_len), np.int32(PAD_IX)) for i,seq in enumerate(sequences): row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]] matrix[i, :len(row_ix)] = row_ix return matrix #### print("Lines:") print('\n'.join(data["Title"][::100000].values), end='\n\n') print("Matrix:") print(as_matrix(data["Title"][::100000])) ###Output _____no_output_____ ###Markdown Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding. ###Code from sklearn.feature_extraction import DictVectorizer # we only consider top-1k most frequent companies to minimize memory usage top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000)) recognized_companies = set(top_companies) data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other") categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False) categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1)) ###Output _____no_output_____ ###Markdown The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes. ###Code from sklearn.model_selection import train_test_split data_train, data_val = train_test_split(data, test_size=0.1, random_state=42) print("Train size = ", len(data_train)) print("Validation size = ", len(data_val)) def generate_batch(data, batch_size=None, replace=True, max_len=None): """ Creates a pytorch-friendly dict from the batch data. :returns: a dict with {'title' : int64[batch, title_max_len] """ if batch_size is not None: data = data.sample(batch_size, replace=replace) batch = {} for col in text_columns: batch[col] = as_matrix(data[col].values, max_len) batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1)) if target_column in data.columns: batch[target_column] = data[target_column].values return batch generate_batch(data_train, 3, max_len=10) ###Output _____no_output_____ ###Markdown Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary.![scheme](https://github.com/yandexdataschool/Practical_DL/raw/master/homework04/conv_salary_architecture.png) By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time. ###Code import torch, torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class GlobalMaxPooling(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, x): return x.max(dim=self.dim)[0] class TitleEncoder(nn.Module): def __init__(self, n_tokens=len(tokens), out_size=64): """ A simple sequential encoder for titles. x -> emb -> conv -> global_max -> relu -> dense """ super(self.__class__, self).__init__() self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX) self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1) self.pool1 = GlobalMaxPooling() self.dense = nn.Linear(out_size, out_size) def forward(self, text_ix): """ :param text_ix: int64 Variable of shape [batch_size, max_len] :returns: float32 Variable of shape [batch_size, out_size] """ h = self.emb(text_ix) # we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order h = torch.transpose(h, 1, 2) # Apply the layers as defined above. Add some ReLUs before dense. <YOUR CODE> return <YOUR CODE> title_encoder = TitleEncoder(out_size=64) dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title'])) dummy_v = title_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del title_encoder print("Seems fine") ###Output _____no_output_____ ###Markdown __Task 2.1__ Create description encoder ###Code # Define an encoder for job descriptions. # Use any means you want so long as it's torch.nn.Module. <YOUR CODE HERE> desc_encoder = <Create description encoder> dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription'])) dummy_v = desc_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del desc_encoder print("Seems fine too") ###Output _____no_output_____ ###Markdown __ Task 2.2__ Build one network ~~to rule them all~~ ###Code class FullNetwork(nn.Module): """ This class does all the steps from (title, desc, categorical) features -> predicted target It unites title & desc encoders you defined above as long as some layers for head and categorical branch. """ def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)): super(self.__class__, self).__init__() self.title_encoder = TitleEncoder(out_size=64) self.desc_encoder = <YOUR CODE> # define layers for categorical features. A few dense layers would do. <YOUR CODE> # define "output" layers that process depend the three encoded vectors into answer <YOUR CODE> def forward(self, title_ix, desc_ix, cat_features): """ :param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix :param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix :param cat_features: float32 Variable [batch, n_cat_features] :returns: float32 Variable 1d [batch], predicted log1p-salary """ # process each data source with it's respective encoder title_h = self.title_encoder(title_ix) desc_h = <YOUR CODE> # apply categorical encoder cat_h = <YOUR CODE> # concatenate all vectors together... joint_h = torch.cat([title_h, desc_h, cat_h], dim=1) # ... and stack a few more layers at the top <YOUR CODE> # Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs # Note 2: please do not use output nonlinearities. return <YOUR CODE> model = FullNetwork() opt = torch.optim.Adam(model.parameters(), lr=1e-3) # test it on one batch batch = generate_batch(data_train, 32) title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0] def compute_loss(reference, prediction): """ Computes objective for minimization. By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc. """ return torch.mean((prediction - reference) ** 2) def compute_mae(reference, prediction): """ Compute MAE on actual salary, assuming your model outputs log1p(salary)""" return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean() loss = compute_loss(reference, prediction) dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True) for grad in dummy_grads: assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \ "Double-check that your model uses all it's layers." ###Output _____no_output_____ ###Markdown Let's train it! ###Code from tqdm import tnrange def iterate_minibatches(data, batch_size=32, max_len=None, max_batches=None, shuffle=True, verbose=True): indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) if max_batches is not None: indices = indices[: batch_size * max_batches] irange = tnrange if verbose else range for start in irange(0, len(indices), batch_size): yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len) num_epochs = 100 max_len = 100 batch_size = 32 batches_per_epoch = 100 for epoch_i in range(num_epochs): print("Training:") train_loss = train_mae = train_batches = 0 model.train(True) for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) loss.backward() opt.step() opt.zero_grad() train_loss += loss.data.numpy() train_mae += compute_mae(reference, prediction).data.numpy() train_batches += 1 print("\tLoss:\t%.5f" % (train_loss / train_batches)) print("\tMAE:\t%.5f" % (train_mae / train_batches)) print('\n\n') print("Validation:") val_loss = val_mae = val_batches = 0 model.train(False) with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') print("Final eval:") val_loss = val_mae = val_batches = 0 with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') ###Output _____no_output_____ ###Markdown Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow). ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from google.colab import drive drive.mount('/content/drive', force_remount=True) path = '/content/drive/My Drive/PracticalDL/week05_nlp/' ###Output Mounted at /content/drive ###Markdown About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription). ###Code data = pd.read_csv(path + "Train_rev1.csv", index_col=None) data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32') text_columns = ["Title", "FullDescription"] categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"] target_column = "Log1pSalary" data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string data.sample(3) ###Output _____no_output_____ ###Markdown The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization. ###Code print("Before") print(data["Title"][::100000]) import nltk tokenizer = nltk.tokenize.WordPunctTokenizer() for col in text_columns: data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower()))) ###Output _____no_output_____ ###Markdown Now we can assume that our text is a space-separated list of tokens: ###Code print("After") print(data["Title"][::100000]) print(type(data["Title"])) ###Output After 0 engineering systems analyst 100000 hr assistant 200000 senior ec & i engineer Name: Title, dtype: object <class 'pandas.core.series.Series'> ###Markdown Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words. ###Code from collections import Counter token_counts = Counter() # Count how many times does each token occur in "Title" and "FullDescription" full_list = data["Title"].tolist() descriptions = data["FullDescription"].tolist() full_list.extend(descriptions) token_counts = Counter(' '.join(full_list).split()) print("Total unique tokens :", len(token_counts)) print('\n'.join(map(str, token_counts.most_common(n=5)))) print('...') print('\n'.join(map(str, token_counts.most_common()[-3:]))) assert token_counts.most_common(1)[0][1] in range(2600000, 2700000) assert len(token_counts) in range(200000, 210000) print('Correct!') # Let's see how many words are there for each count _=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True) plt.xlabel("Counts") ###Output _____no_output_____ ###Markdown __Task 1.1__ Get a list of all tokens that occur at least 10 times. ###Code min_count = 10 # tokens from token_counts keys that had at least min_count occurrences throughout the dataset tokens = list(map(lambda pair: pair[0], filter(lambda x: x[1] >= min_count, token_counts.items()))) # Add a special tokens for unknown and empty words UNK, PAD = "UNK", "PAD" tokens = [UNK, PAD] + tokens print("Tokens left:", len(tokens)) assert type(tokens)==list assert len(tokens) in range(32000,35000) assert 'me' in tokens assert UNK in tokens print("Correct!") ###Output Tokens left: 34158 Correct! ###Markdown __Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int) ###Code token_to_id = {} for i in range(len(tokens)): token_to_id[tokens[i]] = i assert isinstance(token_to_id, dict) assert len(token_to_id) == len(tokens) for tok in tokens: assert tokens[token_to_id[tok]] == tok print("Correct!") ###Output Correct! ###Markdown And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices. ###Code UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD]) def as_matrix(sequences, max_len=None): """ Convert a list of tokens into a matrix with padding """ if isinstance(sequences[0], str): sequences = list(map(str.split, sequences)) max_len = min(max(map(len, sequences)), max_len or float('inf')) matrix = np.full((len(sequences), max_len), np.int32(PAD_IX)) for i,seq in enumerate(sequences): row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]] matrix[i, :len(row_ix)] = row_ix return matrix #### print("Lines:") print('\n'.join(data["Title"][::100000].values), end='\n\n') print("Matrix:") print(as_matrix(data["Title"][::100000])) ###Output engineering systems analyst hr assistant senior ec & i engineer Matrix: [[ 2 3 4 1 1] [ 998 176 1 1 1] [ 18 3472 242 59 6]] ###Markdown Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding. ###Code from sklearn.feature_extraction import DictVectorizer # we only consider top-1k most frequent companies to minimize memory usage top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000)) recognized_companies = set(top_companies) data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other") categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False) categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1)) ###Output _____no_output_____ ###Markdown The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes. ###Code from sklearn.model_selection import train_test_split data_train, data_val = train_test_split(data, test_size=0.1, random_state=42) print("Train size = ", len(data_train)) print("Validation size = ", len(data_val)) def generate_batch(data, batch_size=None, replace=True, max_len=None): """ Creates a pytorch-friendly dict from the batch data. :returns: a dict with {'title' : int64[batch, title_max_len] """ if batch_size is not None: data = data.sample(batch_size, replace=replace) batch = {} for col in text_columns: batch[col] = as_matrix(data[col].values, max_len) batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1)) if target_column in data.columns: batch[target_column] = data[target_column].values return batch generate_batch(data_train, 3, max_len=10) ###Output _____no_output_____ ###Markdown Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary.![scheme](https://github.com/yandexdataschool/Practical_DL/raw/master/homework04/conv_salary_architecture.png) By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time. ###Code import torch, torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class GlobalMaxPooling(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, x): return x.max(dim=self.dim)[0] class AveragePooling(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, x): x[x == 1] = 0 return torch.mean(x, dim=self.dim) class TitleEncoder(nn.Module): def __init__(self, n_tokens=len(tokens), out_size=48): """ A simple sequential encoder for titles. x -> emb -> conv -> global_max -> relu -> dense """ super(self.__class__, self).__init__() self.emb = nn.Embedding(n_tokens, 48, padding_idx=PAD_IX) self.conv1 = nn.Conv1d(48, out_size, kernel_size=3, padding=1) self.batchnorm1 = nn.BatchNorm1d(out_size) self.conv2 = nn.Conv1d(out_size, out_size, kernel_size=3, padding=1) self.batchnorm2 = nn.BatchNorm1d(out_size) self.relu1 = nn.ReLU() self.pool1 = AveragePooling() self.dense = nn.Linear(out_size, out_size) def forward(self, text_ix): """ :param text_ix: int64 Variable of shape [batch_size, max_len] :returns: float32 Variable of shape [batch_size, out_size] """ h = self.emb(text_ix) # we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order h = torch.transpose(h, 1, 2) # Apply the layers as defined above. Add some ReLUs before dense. result = self.dense(self.pool1(self.relu1(self.batchnorm2(self.conv2(self.batchnorm1(self.conv1(h))))))) return result title_encoder = TitleEncoder(out_size=64) dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title'])) dummy_v = title_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del title_encoder print("Seems fine") ###Output Seems fine ###Markdown __Task 2.1__ Create description encoder ###Code # Define an encoder for job descriptions. # Use any means you want so long as it's torch.nn.Module. class DescriptionEncoder(nn.Module): def __init__(self, n_tokens=len(tokens), out_size=48): """ A simple sequential encoder for desciptions. x -> emb -> conv -> global_max -> relu -> dense """ super(self.__class__, self).__init__() self.emb = nn.Embedding(n_tokens, 48, padding_idx=PAD_IX) self.conv1 = nn.Conv1d(48, out_size, kernel_size=3, padding=1) self.batchnorm1 = nn.BatchNorm1d(out_size) self.conv2 = nn.Conv1d(out_size, out_size, kernel_size=3, padding=1) self.batchnorm2 = nn.BatchNorm1d(out_size) self.relu1 = nn.ReLU() self.pool1 = AveragePooling() self.dense = nn.Linear(out_size, out_size) def forward(self, text_ix): """ :param text_ix: int64 Variable of shape [batch_size, max_len] :returns: float32 Variable of shape [batch_size, out_size] """ h = self.emb(text_ix) # we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order h = torch.transpose(h, 1, 2) # Apply the layers as defined above. Add some ReLUs before dense. result = self.dense(self.pool1(self.relu1(self.batchnorm2(self.conv2(self.batchnorm1(self.conv1(h))))))) return result desc_encoder = DescriptionEncoder(out_size=64) dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription'])) dummy_v = desc_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del desc_encoder print("Seems fine too") ###Output Seems fine too ###Markdown __ Task 2.2__ Build one network ~~to rule them all~~ ###Code class FullNetwork(nn.Module): """ This class does all the steps from (title, desc, categorical) features -> predicted target It unites title & desc encoders you defined above as long as some layers for head and categorical branch. """ def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)): super(self.__class__, self).__init__() self.title_encoder = TitleEncoder(out_size=48) self.desc_encoder = DescriptionEncoder(out_size=48) # define layers for categorical features. A few dense layers would do. self.dense1 = nn.Linear(n_cat_features, 128) self.dropout1 = nn.Dropout(p=0.2) self.relu1 = nn.ReLU() self.dense2 = nn.Linear(128, 64) self.dropout2 = nn.Dropout(p=0.2) self.relu2 = nn.ReLU() self.dense3 = nn.Linear(64, 48) self.relu3 = nn.ReLU() # define "output" layers that process depend the three encoded vectors into answer self.dense4 = nn.Linear(144, 48) self.relu4 = nn.ReLU() self.dense5 = nn.Linear(48, 1) def forward(self, title_ix, desc_ix, cat_features): """ :param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix :param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix :param cat_features: float32 Variable [batch, n_cat_features] :returns: float32 Variable 1d [batch], predicted log1p-salary """ # process each data source with it's respective encoder title_h = self.title_encoder(title_ix) desc_h = self.desc_encoder(desc_ix) # apply categorical encoder cat_h = self.relu3(self.dense3(self.relu2(self.dropout2(self.dense2(self.relu1(self.dropout1(self.dense1(cat_features)))))))) # concatenate all vectors together... joint_h = torch.cat([title_h, desc_h, cat_h], dim=1) # ... and stack a few more layers at the top result = self.dense5(self.relu4(self.dense4(joint_h)))[:,0] # Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs # Note 2: please do not use output nonlinearities. return result model = FullNetwork() opt = torch.optim.Adam(model.parameters(), lr=1e-3) # test it on one batch batch = generate_batch(data_train, 32) title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0] def compute_loss(reference, prediction): """ Computes objective for minimization. By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc. """ return torch.mean((prediction - reference) ** 2) def compute_mae(reference, prediction): """ Compute MAE on actual salary, assuming your model outputs log1p(salary)""" return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean() loss = compute_loss(reference, prediction) dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True) for grad in dummy_grads: assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \ "Double-check that your model uses all it's layers." ###Output _____no_output_____ ###Markdown Let's train it! ###Code from tqdm import tnrange def iterate_minibatches(data, batch_size=32, max_len=None, max_batches=None, shuffle=True, verbose=True): indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) if max_batches is not None: indices = indices[: batch_size * max_batches] irange = tnrange if verbose else range for start in irange(0, len(indices), batch_size): yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len) num_epochs = 10 max_len = 100 batch_size = 32 batches_per_epoch = 100 for epoch_i in range(num_epochs): print("Training:") train_loss = train_mae = train_batches = 0 model.train(True) for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) loss.backward() opt.step() opt.zero_grad() train_loss += loss.data.numpy() train_mae += compute_mae(reference, prediction).data.numpy() train_batches += 1 print("\tLoss:\t%.5f" % (train_loss / train_batches)) print("\tMAE:\t%.5f" % (train_mae / train_batches)) print('\n\n') print("Validation:") val_loss = val_mae = val_batches = 0 model.train(False) with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') print("Final eval:") val_loss = val_mae = val_batches = 0 with torch.no_grad(): for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy() val_mae += compute_mae(reference, prediction).data.numpy() val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') ###Output Final eval: ###Markdown Task 3.2: Actually make it workYour main task is to use some of the tricks you've learned on the network and analyze if you can improve __validation MAE__.Try __at least 3 options__ from the list below for a passing grade. If you're into A) CNN architectureAll the tricks you know about dense and convolutional neural networks apply here as well.* Dropout. Nuff said.* Batch Norm. This time it's `nn.BatchNorm1d`* Parallel convolution layers. The idea is that you apply several nn.Conv1d to the same embeddings and concatenate output channels.* More layers, more neurons, ya know... B) Play with poolingThere's more than one way to do max pooling:* Max over time - our `GlobalMaxPooling`* Average over time (excluding PAD)* Softmax-pooling:$$ out_{i, t} = \sum_t {h_{i,t} \cdot {{e ^ {h_{i, t}}} \over \sum_\tau e ^ {h_{j, \tau}} } }$$* Attentive pooling$$ out_{i, t} = \sum_t {h_{i,t} \cdot Attn(h_t)}$$, where $$ Attn(h_t) = {{e ^ {NN_{attn}(h_t)}} \over \sum_\tau e ^ {NN_{attn}(h_\tau)}} $$and $NN_{attn}$ is a small neural networkThe optimal score is usually achieved by concatenating several different poolings, including several attentive pooling with different $NN_{attn}$ C) Fun with embeddingsIt's not always a good idea to train embeddings from scratch. Here's a few tricks:* Use a pre-trained word2vec from [here](http://ahogrammer.com/2017/01/20/the-list-of-pretrained-word-embeddings/) or [here](http://mccormickml.com/2016/04/12/googles-pretrained-word2vec-model-in-python/).* Start with pre-trained embeddings, then fine-tune them with gradient descent* Use the same embedding matrix in title and desc vectorizer D) Going recurrentWe've already learned that recurrent networks can do cool stuff in sequence modelling. Turns out, they're not useless for classification as well. With some tricks of course..* Like convolutional layers, LSTM should be pooled into a fixed-size vector with some of the poolings. * Please bear in mind that while convolution uses [batch, units, time] dim order, recurrent units are built for [batch, time, unit]. You may need to `torch.transpose`.* Since you know all the text in advance, use bidirectional RNN * Run one LSTM from left to right * Run another in parallel from right to left * Concatenate their output sequences along unit axis (dim=-1)* It might be good idea to mix convolutions and recurrent layers differently for title and description E) Optimizing seriously* You don't necessarily need 100 epochs. Use early stopping. If you've never done this before, take a look at [keras](https://github.com/keras-team/keras/blob/master/keras/callbacks.pyL461) for inspiration. * In short, train until you notice that validation * Maintain the best-on-validation snapshot via `model.state_dict` * Plotting learning curves is usually a good idea A short reportPlease tell us what you did and how did it work.``, i guess... ###Code ###Output _____no_output_____ ###Markdown Natural Language Processing with Deep Learning (7 points)Today we're gonna apply the newly learned DL tools for sequence processing to the task of predicting job salary.Special thanks to [Oleg Vasilev](https://github.com/Omrigan/) for the assignment core (orignally written for theano/tensorflow). ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown About the challengeFor starters, let's download the data from __[here](https://yadi.sk/d/vVEOWPFY3NruT7)__.You can also get it from the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (in that case, pick `Train_rev1.*`).Our task is to predict one number, __SalaryNormalized__, in the sense of minimizing __Mean Absolute Error__.To do so, our model ca access a number of features:* Free text: __`Title`__ and __`FullDescription`__* Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.You can read more [in the official description](https://www.kaggle.com/c/job-salary-predictiondescription). ###Code data = pd.read_csv("./Train_rev1.csv", index_col=None) data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32') text_columns = ["Title", "FullDescription"] categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"] target_column = "Log1pSalary" data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast nan to string data.sample(3) ###Output _____no_output_____ ###Markdown The NLP partTo even begin training our neural network, we're gonna need to preprocess the text features: tokenize it and build the token vocabularies.Since it is not an NLP course, we're gonna use simple built-in NLTK tokenization. ###Code print("Before") print(data["Title"][::100000]) import nltk tokenizer = nltk.tokenize.WordPunctTokenizer() for col in text_columns: data[col] = data[col].apply(lambda l: ' '.join(tokenizer.tokenize(str(l).lower()))) ###Output _____no_output_____ ###Markdown Now we can assume that our text is a space-separated list of tokens: ###Code print("After") print(data["Title"][::100000]) ###Output _____no_output_____ ###Markdown Not all words are equally useful. Some of them are typos or rare words that are only present a few times. Let's see how many times is each word present in the data so that we can build a "white list" of known words. ###Code from collections import Counter token_counts = Counter() # Count how many times does each token occur in "Title" and "FullDescription" <YOUR CODE HERE> print("Total unique tokens :", len(token_counts)) print('\n'.join(map(str, token_counts.most_common(n=5)))) print('...') print('\n'.join(map(str, token_counts.most_common()[-3:]))) assert token_counts.most_common(1)[0][1] in range(2600000, 2700000) assert len(token_counts) in range(200000, 210000) print('Correct!') # Let's see how many words are there for each count _=plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True) plt.xlabel("Counts") ###Output _____no_output_____ ###Markdown __Task 1.1__ Get a list of all tokens that occur at least 10 times. ###Code min_count = 10 # tokens from token_counts keys that had at least min_count occurrences throughout the dataset tokens = <YOUR CODE HERE> # Add a special tokens for unknown and empty words UNK, PAD = "UNK", "PAD" tokens = [UNK, PAD] + tokens print("Tokens left:", len(tokens)) assert type(tokens)==list assert len(tokens) in range(32000,35000) assert 'me' in tokens assert UNK in tokens print("Correct!") ###Output _____no_output_____ ###Markdown __Task 1.2__ Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int) ###Code token_to_id = <your code here> assert isinstance(token_to_id, dict) assert len(token_to_id) == len(tokens) for tok in tokens: assert tokens[token_to_id[tok]] == tok print("Correct!") ###Output _____no_output_____ ###Markdown And finally, let's use the vocabulary you've built to map text lines into torch-digestible matrices. ###Code UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD]) def as_matrix(sequences, max_len=None): """ Convert a list of tokens into a matrix with padding """ if isinstance(sequences[0], str): sequences = list(map(str.split, sequences)) max_len = min(max(map(len, sequences)), max_len or float('inf')) matrix = np.full((len(sequences), max_len), np.int32(PAD_IX)) for i,seq in enumerate(sequences): row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]] matrix[i, :len(row_ix)] = row_ix return matrix #### print("Lines:") print('\n'.join(data["Title"][::100000].values), end='\n\n') print("Matrix:") print(as_matrix(data["Title"][::100000])) ###Output _____no_output_____ ###Markdown Now let's encode the categirical data we have.As usual, we shall use one-hot encoding for simplicity. Kudos if you implement tf-idf, target averaging or pseudo-counter-based encoding. ###Code from sklearn.feature_extraction import DictVectorizer # we only consider top-1k most frequent companies to minimize memory usage top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000)) recognized_companies = set(top_companies) data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other") categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False) categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1)) ###Output _____no_output_____ ###Markdown The data science partOnce we've learned to tokenize the data, let's design a machine learning experiment.As before, we won't focus too much on validation, opting for a simple train-test split.__To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes. ###Code from sklearn.model_selection import train_test_split data_train, data_val = train_test_split(data, test_size=0.1, random_state=42) print("Train size = ", len(data_train)) print("Validation size = ", len(data_val)) def generate_batch(data, batch_size=None, replace=True, max_len=None): """ Creates a pytorch-friendly dict from the batch data. :returns: a dict with {'title' : int64[batch, title_max_len] """ if batch_size is not None: data = data.sample(batch_size, replace=replace) batch = {} for col in text_columns: batch[col] = as_matrix(data[col].values, max_len) batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1)) if target_column in data.columns: batch[target_column] = data[target_column].values return batch generate_batch(data_train, 3, max_len=10) ###Output _____no_output_____ ###Markdown Finally, let's talk deep learningOut model consists of three branches:* Title encoder* Description encoder* Categorical features encoderWe will then feed all 3 branches into one common network that predicts salary.![scheme](https://github.com/yandexdataschool/Practical_DL/raw/master/homework04/conv_salary_architecture.png) By default, both text vectorizers shall use 1d convolutions, followed by global pooling over time. ###Code import torch, torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class GlobalMaxPooling(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, x): return x.max(dim=self.dim)[0] class TitleEncoder(nn.Module): def __init__(self, n_tokens=len(tokens), out_size=64): """ A simple sequential encoder for titles. x -> emb -> conv -> global_max -> relu -> dense """ super(self.__class__, self).__init__() self.emb = nn.Embedding(n_tokens, 64, padding_idx=PAD_IX) self.conv1 = nn.Conv1d(64, out_size, kernel_size=3, padding=1) self.pool1 = GlobalMaxPooling() self.dense = nn.Linear(out_size, out_size) def forward(self, text_ix): """ :param text_ix: int64 Variable of shape [batch_size, max_len] :returns: float32 Variable of shape [batch_size, out_size] """ h = self.emb(text_ix) # we transpose from [batch, time, units] to [batch, units, time] to fit Conv1d dim order h = torch.transpose(h, 1, 2) # Apply the layers as defined above. Add some ReLUs before dense. <YOUR CODE> return <YOUR CODE> title_encoder = TitleEncoder(out_size=64) dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['Title'])) dummy_v = title_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del title_encoder print("Seems fine") ###Output _____no_output_____ ###Markdown __Task 2.1__ Create description encoder ###Code # Define an encoder for job descriptions. # Use any means you want so long as it's torch.nn.Module. <YOUR CODE HERE> desc_encoder = <Create description encoder> dummy_x = Variable(torch.LongTensor(generate_batch(data_train, 3)['FullDescription'])) dummy_v = desc_encoder(dummy_x) assert isinstance(dummy_v, Variable) assert tuple(dummy_v.shape) == (dummy_x.shape[0], 64) del desc_encoder print("Seems fine too") ###Output _____no_output_____ ###Markdown __ Task 2.2__ Build one network ~~to rule them all~~ ###Code class FullNetwork(nn.Module): """ This class does all the steps from (title, desc, categorical) features -> predicted target It unites title & desc encoders you defined above as long as some layers for head and categorical branch. """ def __init__(self, n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_)): super(self.__class__, self).__init__() self.title_encoder = TitleEncoder(out_size=64) self.desc_encoder = <YOUR CODE> # define layers for categorical features. A few dense layers would do. <YOUR CODE> # define "output" layers that process depend the three encoded vectors into answer <YOUR CODE> def forward(self, title_ix, desc_ix, cat_features): """ :param title_ix: int32 Variable [batch, title_len], job titles encoded by as_matrix :param desc_ix: int32 Variable [batch, desc_len] , job descriptions encoded by as_matrix :param cat_features: float32 Variable [batch, n_cat_features] :returns: float32 Variable 1d [batch], predicted log1p-salary """ # process each data source with it's respective encoder title_h = self.title_encoder(title_ix) desc_h = <YOUR CODE> # apply categorical encoder cat_h = <YOUR CODE> # concatenate all vectors together... joint_h = torch.cat([title_h, desc_h, cat_h], dim=1) # ... and stack a few more layers at the top <YOUR CODE> # Note 1: do not forget to select first columns, [:, 0], to get to 1d outputs # Note 2: please do not use output nonlinearities. return <YOUR CODE> model = FullNetwork() opt = torch.optim.Adam(model.parameters(), lr=1e-3) # test it on one batch batch = generate_batch(data_train, 32) title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) assert len(prediction.shape) == 1 and prediction.shape[0] == title_ix.shape[0] def compute_loss(reference, prediction): """ Computes objective for minimization. By deafult we minimize MSE, but you are encouraged to try mix up MSE, MAE, huber loss, etc. """ return torch.mean((prediction - reference) ** 2) def compute_mae(reference, prediction): """ Compute MAE on actual salary, assuming your model outputs log1p(salary)""" return torch.abs(torch.exp(reference - 1) - torch.exp(prediction - 1)).mean() loss = compute_loss(reference, prediction) dummy_grads = torch.autograd.grad(loss, model.parameters(), retain_graph=True) for grad in dummy_grads: assert grad is not None and not (grad == 0).all(), "Some model parameters received zero grads. " \ "Double-check that your model uses all it's layers." ###Output _____no_output_____ ###Markdown Let's train it! ###Code from tqdm import tnrange def iterate_minibatches(data, batch_size=32, max_len=None, max_batches=None, shuffle=True, verbose=True): indices = np.arange(len(data)) if shuffle: indices = np.random.permutation(indices) if max_batches is not None: indices = indices[: batch_size * max_batches] irange = tnrange if verbose else range for start in irange(0, len(indices), batch_size): yield generate_batch(data.iloc[indices[start : start + batch_size]], max_len=max_len) num_epochs = 100 max_len = 100 batch_size = 32 batches_per_epoch = 100 for epoch_i in range(num_epochs): print("Training:") train_loss = train_mae = train_batches = 0 model.train(True) for batch in iterate_minibatches(data_train, max_batches=batches_per_epoch): title_ix = Variable(torch.LongTensor(batch["Title"])) desc_ix = Variable(torch.LongTensor(batch["FullDescription"])) cat_features = Variable(torch.FloatTensor(batch["Categorical"])) reference = Variable(torch.FloatTensor(batch[target_column])) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) loss.backward() opt.step() opt.zero_grad() train_loss += loss.data.numpy()[0] train_mae += compute_mae(reference, prediction).data.numpy()[0] train_batches += 1 print("\tLoss:\t%.5f" % (train_loss / train_batches)) print("\tMAE:\t%.5f" % (train_mae / train_batches)) print('\n\n') print("Validation:") val_loss = val_mae = val_batches = 0 model.train(False) for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"]), volatile=True) desc_ix = Variable(torch.LongTensor(batch["FullDescription"]), volatile=True) cat_features = Variable(torch.FloatTensor(batch["Categorical"]), volatile=True) reference = Variable(torch.FloatTensor(batch[target_column]), volatile=True) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy()[0] val_mae += compute_mae(reference, prediction).data.numpy()[0] val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') print("Final eval:") val_loss = val_mae = val_batches = 0 for batch in iterate_minibatches(data_val, shuffle=False): title_ix = Variable(torch.LongTensor(batch["Title"]), volatile=True) desc_ix = Variable(torch.LongTensor(batch["FullDescription"]), volatile=True) cat_features = Variable(torch.FloatTensor(batch["Categorical"]), volatile=True) reference = Variable(torch.FloatTensor(batch[target_column]), volatile=True) prediction = model(title_ix, desc_ix, cat_features) loss = compute_loss(reference, prediction) val_loss += loss.data.numpy()[0] val_mae += compute_mae(reference, prediction).data.numpy()[0] val_batches += 1 print("\tLoss:\t%.5f" % (val_loss / val_batches)) print("\tMAE:\t%.5f" % (val_mae / val_batches)) print('\n\n') ###Output _____no_output_____
train_fasttext_model_with_biome_text2.ipynb
###Markdown Installing *biome.text* ###Code !pip install -U pip !pip install -U biome-text exit(0) ###Output Collecting pip [?25l Downloading https://files.pythonhosted.org/packages/fe/ef/60d7ba03b5c442309ef42e7d69959f73aacccd0d86008362a681c4698e83/pip-21.0.1-py3-none-any.whl (1.5MB)  |████████████████████████████████| 1.5MB 6.8MB/s [?25hInstalling collected packages: pip Found existing installation: pip 19.3.1 Uninstalling pip-19.3.1: Successfully uninstalled pip-19.3.1 Successfully installed pip-21.0.1 Collecting biome-text Downloading biome_text-2.0.0-py3-none-any.whl (1.8 MB)  |████████████████████████████████| 1.8 MB 6.9 MB/s [?25hCollecting s3fs~=0.4.0 Downloading s3fs-0.4.2-py3-none-any.whl (19 kB) Collecting mlflow~=1.9.0 Downloading mlflow-1.9.1-py3-none-any.whl (11.9 MB)  |████████████████████████████████| 11.9 MB 13.2 MB/s [?25hCollecting xlrd~=1.2.0 Downloading xlrd-1.2.0-py2.py3-none-any.whl (103 kB)  |████████████████████████████████| 103 kB 53.8 MB/s [?25hCollecting gevent~=20.9.0 Downloading gevent-20.9.0-cp36-cp36m-manylinux2010_x86_64.whl (5.3 MB)  |████████████████████████████████| 5.3 MB 51.0 MB/s [?25hRequirement already satisfied: flask~=1.1.2 in /usr/local/lib/python3.6/dist-packages (from biome-text) (1.1.2) Collecting spacy~=2.3.0 Downloading spacy-2.3.5-cp36-cp36m-manylinux2014_x86_64.whl (10.4 MB)  |████████████████████████████████| 10.4 MB 50.3 MB/s [?25hCollecting ray[tune]~=1.0.0 Downloading ray-1.0.1.post1-cp36-cp36m-manylinux1_x86_64.whl (23.1 MB)  |████████████████████████████████| 23.1 MB 55.0 MB/s [?25hCollecting flask-cors~=3.0.8 Downloading Flask_Cors-3.0.10-py2.py3-none-any.whl (14 kB) Collecting ipywidgets~=7.5.1 Downloading ipywidgets-7.5.1-py2.py3-none-any.whl (121 kB)  |████████████████████████████████| 121 kB 34.8 MB/s [?25hCollecting flatdict~=4.0.0 Downloading flatdict-4.0.1.tar.gz (8.3 kB) Collecting beautifulsoup4~=4.9.0 Downloading beautifulsoup4-4.9.3-py3-none-any.whl (115 kB)  |████████████████████████████████| 115 kB 40.4 MB/s [?25hCollecting captum~=0.2.0 Downloading captum-0.2.0-py3-none-any.whl (1.4 MB)  |████████████████████████████████| 1.4 MB 42.4 MB/s [?25hRequirement already satisfied: click~=7.1.0 in /usr/local/lib/python3.6/dist-packages (from biome-text) (7.1.2) Collecting cachey~=0.2.0 Downloading cachey-0.2.1-py3-none-any.whl (6.4 kB) Collecting distributed~=2.17.0 Downloading distributed-2.17.0-py3-none-any.whl (638 kB)  |████████████████████████████████| 638 kB 46.8 MB/s [?25hCollecting tqdm>=4.49.0 Downloading tqdm-4.56.2-py2.py3-none-any.whl (72 kB)  |████████████████████████████████| 72 kB 953 kB/s [?25hCollecting fastapi~=0.55.0 Downloading fastapi-0.55.1-py3-none-any.whl (48 kB)  |████████████████████████████████| 48 kB 4.8 MB/s [?25hCollecting datasets~=1.1.2 Downloading datasets-1.1.3-py3-none-any.whl (153 kB)  |████████████████████████████████| 153 kB 55.2 MB/s [?25hCollecting allennlp~=1.3.0 Downloading allennlp-1.3.0-py3-none-any.whl (506 kB)  |████████████████████████████████| 506 kB 47.8 MB/s [?25hRequirement already satisfied: pandas~=1.1.0 in /usr/local/lib/python3.6/dist-packages (from biome-text) (1.1.5) Collecting lxml~=4.5.0 Downloading lxml-4.5.2-cp36-cp36m-manylinux1_x86_64.whl (5.5 MB)  |████████████████████████████████| 5.5 MB 44.2 MB/s [?25hCollecting uvicorn~=0.11.0 Downloading uvicorn-0.11.8-py3-none-any.whl (43 kB)  |████████████████████████████████| 43 kB 1.4 MB/s [?25hCollecting elasticsearch<7.5.0,>=6.8.0 Downloading elasticsearch-7.1.0-py2.py3-none-any.whl (83 kB)  |████████████████████████████████| 83 kB 1.2 MB/s [?25hRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (1.4.1) Collecting overrides==3.1.0 Downloading overrides-3.1.0.tar.gz (11 kB) Collecting tensorboardX>=1.2 Downloading tensorboardX-2.1-py2.py3-none-any.whl (308 kB)  |████████████████████████████████| 308 kB 50.2 MB/s [?25hRequirement already satisfied: pytest in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (3.6.4) Collecting transformers<4.1,>=4.0 Downloading transformers-4.0.1-py3-none-any.whl (1.4 MB)  |████████████████████████████████| 1.4 MB 44.8 MB/s [?25hRequirement already satisfied: torch<1.8.0,>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (1.7.0+cu101) Collecting jsonpickle Downloading jsonpickle-2.0.0-py2.py3-none-any.whl (37 kB) Collecting boto3<2.0,>=1.14 Downloading boto3-1.17.8-py2.py3-none-any.whl (130 kB)  |████████████████████████████████| 130 kB 54.0 MB/s [?25hRequirement already satisfied: filelock<3.1,>=3.0 in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (3.0.12) Collecting sentencepiece Downloading sentencepiece-0.1.95-cp36-cp36m-manylinux2014_x86_64.whl (1.2 MB)  |████████████████████████████████| 1.2 MB 31.4 MB/s [?25hCollecting jsonnet>=0.10.0 Downloading jsonnet-0.17.0.tar.gz (259 kB)  |████████████████████████████████| 259 kB 53.2 MB/s [?25hRequirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (0.8) Requirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (3.2.5) Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (2.10.0) Requirement already satisfied: requests>=2.18 in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (2.23.0) Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (0.22.2.post1) Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from allennlp~=1.3.0->biome-text) (1.19.5) Collecting soupsieve>1.2 Downloading soupsieve-2.2-py3-none-any.whl (33 kB) Collecting jmespath<1.0.0,>=0.7.1 Downloading jmespath-0.10.0-py2.py3-none-any.whl (24 kB) Collecting botocore<1.21.0,>=1.20.8 Downloading botocore-1.20.8-py2.py3-none-any.whl (7.2 MB)  |████████████████████████████████| 7.2 MB 47.4 MB/s [?25hCollecting s3transfer<0.4.0,>=0.3.0 Downloading s3transfer-0.3.4-py2.py3-none-any.whl (69 kB)  |████████████████████████████████| 69 kB 6.4 MB/s [?25hCollecting urllib3<1.27,>=1.25.4 Downloading urllib3-1.26.3-py2.py3-none-any.whl (137 kB)  |████████████████████████████████| 137 kB 54.0 MB/s [?25hRequirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.6/dist-packages (from botocore<1.21.0,>=1.20.8->boto3<2.0,>=1.14->allennlp~=1.3.0->biome-text) (2.8.1) Requirement already satisfied: heapdict in /usr/local/lib/python3.6/dist-packages (from cachey~=0.2.0->biome-text) (1.0.1) Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from captum~=0.2.0->biome-text) (3.2.2) Requirement already satisfied: multiprocess in /usr/local/lib/python3.6/dist-packages (from datasets~=1.1.2->biome-text) (0.70.11.1) Collecting pyarrow>=0.17.1 Downloading pyarrow-3.0.0-cp36-cp36m-manylinux2014_x86_64.whl (20.7 MB)  |████████████████████████████████| 20.7 MB 1.4 MB/s [?25hCollecting xxhash Downloading xxhash-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl (242 kB)  |████████████████████████████████| 242 kB 54.5 MB/s [?25hCollecting tqdm>=4.49.0 Downloading tqdm-4.49.0-py2.py3-none-any.whl (69 kB)  |████████████████████████████████| 69 kB 6.6 MB/s [?25hRequirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from datasets~=1.1.2->biome-text) (0.3.3) Requirement already satisfied: tornado>=5 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (5.1.1) Requirement already satisfied: zict>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (2.0.0) Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (3.13) Requirement already satisfied: sortedcontainers!=2.0.0,!=2.0.1 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (2.3.0) Requirement already satisfied: dask>=2.9.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (2.12.0) Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (53.0.0) Requirement already satisfied: cloudpickle>=1.3.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (1.3.0) Requirement already satisfied: tblib>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (1.7.0) Requirement already satisfied: psutil>=5.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (5.4.8) Collecting contextvars Downloading contextvars-2.4.tar.gz (9.6 kB) Requirement already satisfied: toolz>=0.8.2 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (0.11.1) Requirement already satisfied: msgpack>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from distributed~=2.17.0->biome-text) (1.0.2) Collecting starlette==0.13.2 Downloading starlette-0.13.2-py3-none-any.whl (59 kB)  |████████████████████████████████| 59 kB 5.6 MB/s [?25hCollecting pydantic<2.0.0,>=0.32.2 Downloading pydantic-1.7.3-cp36-cp36m-manylinux2014_x86_64.whl (9.2 MB)  |████████████████████████████████| 9.2 MB 41.4 MB/s [?25hRequirement already satisfied: itsdangerous>=0.24 in /usr/local/lib/python3.6/dist-packages (from flask~=1.1.2->biome-text) (1.1.0) Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/lib/python3.6/dist-packages (from flask~=1.1.2->biome-text) (2.11.3) Requirement already satisfied: Werkzeug>=0.15 in /usr/local/lib/python3.6/dist-packages (from flask~=1.1.2->biome-text) (1.0.1) Requirement already satisfied: Six in /usr/local/lib/python3.6/dist-packages (from flask-cors~=3.0.8->biome-text) (1.15.0) Collecting zope.interface Downloading zope.interface-5.2.0-cp36-cp36m-manylinux2010_x86_64.whl (236 kB)  |████████████████████████████████| 236 kB 53.2 MB/s [?25hCollecting greenlet>=0.4.17 Downloading greenlet-1.0.0-cp36-cp36m-manylinux2010_x86_64.whl (156 kB)  |████████████████████████████████| 156 kB 54.4 MB/s [?25hCollecting zope.event Downloading zope.event-4.5.0-py2.py3-none-any.whl (6.8 kB) Requirement already satisfied: traitlets>=4.3.1 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (4.3.3) Requirement already satisfied: nbformat>=4.2.0 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (5.1.2) Requirement already satisfied: ipykernel>=4.5.1 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (4.10.1) Requirement already satisfied: widgetsnbextension~=3.5.0 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (3.5.1) Requirement already satisfied: ipython>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from ipywidgets~=7.5.1->biome-text) (5.5.0) Requirement already satisfied: jupyter-client in /usr/local/lib/python3.6/dist-packages (from ipykernel>=4.5.1->ipywidgets~=7.5.1->biome-text) (5.3.5) Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (2.6.1) Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (4.4.2) Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (0.8.1) Requirement already satisfied: pexpect in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (4.8.0) Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (1.0.18) Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (0.7.5) Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from Jinja2>=2.10.1->flask~=1.1.2->biome-text) (1.1.1) Collecting gorilla Downloading gorilla-0.3.0-py2.py3-none-any.whl (11 kB) Requirement already satisfied: entrypoints in /usr/local/lib/python3.6/dist-packages (from mlflow~=1.9.0->biome-text) (0.3) Requirement already satisfied: sqlparse in /usr/local/lib/python3.6/dist-packages (from mlflow~=1.9.0->biome-text) (0.4.1) Collecting azure-storage-blob>=12.0 Downloading azure_storage_blob-12.7.1-py2.py3-none-any.whl (339 kB)  |████████████████████████████████| 339 kB 54.5 MB/s [?25hRequirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from mlflow~=1.9.0->biome-text) (3.12.4) Collecting alembic Downloading alembic-1.5.4.tar.gz (1.1 MB)  |████████████████████████████████| 1.1 MB 35.9 MB/s [?25hCollecting gunicorn Downloading gunicorn-20.0.4-py2.py3-none-any.whl (77 kB)  |████████████████████████████████| 77 kB 5.3 MB/s [?25hCollecting gitpython>=2.1.0 Downloading GitPython-3.1.13-py3-none-any.whl (159 kB)  |████████████████████████████████| 159 kB 53.4 MB/s [?25hCollecting sqlalchemy<=1.3.13 Downloading SQLAlchemy-1.3.13.tar.gz (6.0 MB)  |████████████████████████████████| 6.0 MB 43.0 MB/s [?25hCollecting querystring-parser Downloading querystring_parser-1.2.4-py2.py3-none-any.whl (7.9 kB) Collecting prometheus-flask-exporter Downloading prometheus_flask_exporter-0.18.1.tar.gz (21 kB) Collecting databricks-cli>=0.8.7 Downloading databricks-cli-0.14.1.tar.gz (54 kB)  |████████████████████████████████| 54 kB 2.3 MB/s [?25hCollecting docker>=4.0.0 Downloading docker-4.4.2-py2.py3-none-any.whl (146 kB)  |████████████████████████████████| 146 kB 46.3 MB/s [?25hCollecting azure-core<2.0.0,>=1.10.0 Downloading azure_core-1.11.0-py2.py3-none-any.whl (127 kB)  |████████████████████████████████| 127 kB 53.0 MB/s [?25hCollecting msrest>=0.6.18 Downloading msrest-0.6.21-py2.py3-none-any.whl (85 kB)  |████████████████████████████████| 85 kB 3.6 MB/s [?25hCollecting cryptography>=2.1.4 Downloading cryptography-3.4.5-cp36-abi3-manylinux2014_x86_64.whl (3.2 MB)  |████████████████████████████████| 3.2 MB 42.6 MB/s [?25hRequirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.6/dist-packages (from cryptography>=2.1.4->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (1.14.4) Requirement already satisfied: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi>=1.12->cryptography>=2.1.4->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (2.20) Requirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from databricks-cli>=0.8.7->mlflow~=1.9.0->biome-text) (0.8.7) Collecting websocket-client>=0.32.0 Downloading websocket_client-0.57.0-py2.py3-none-any.whl (200 kB)  |████████████████████████████████| 200 kB 54.9 MB/s [?25hCollecting gitdb<5,>=4.0.1 Downloading gitdb-4.0.5-py3-none-any.whl (63 kB)  |████████████████████████████████| 63 kB 1.6 MB/s [?25hCollecting smmap<4,>=3.0.1 Downloading smmap-3.0.5-py2.py3-none-any.whl (25 kB) Requirement already satisfied: requests-oauthlib>=0.5.0 in /usr/local/lib/python3.6/dist-packages (from msrest>=0.6.18->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (1.3.0) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from msrest>=0.6.18->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (2020.12.5) Collecting isodate>=0.6.0 Downloading isodate-0.6.0-py2.py3-none-any.whl (45 kB)  |████████████████████████████████| 45 kB 2.5 MB/s [?25hRequirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.2.0->ipywidgets~=7.5.1->biome-text) (0.2.0) Requirement already satisfied: jupyter-core in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.2.0->ipywidgets~=7.5.1->biome-text) (4.7.1) Requirement already satisfied: jsonschema!=2.5.0,>=2.4 in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.2.0->ipywidgets~=7.5.1->biome-text) (2.6.0) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas~=1.1.0->biome-text) (2018.9) Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=4.0.0->ipywidgets~=7.5.1->biome-text) (0.2.5) Requirement already satisfied: prometheus-client>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from ray[tune]~=1.0.0->biome-text) (0.9.0) Collecting colorful Downloading colorful-0.5.4-py2.py3-none-any.whl (201 kB)  |████████████████████████████████| 201 kB 46.6 MB/s [?25hCollecting gpustat Downloading gpustat-0.6.0.tar.gz (78 kB)  |████████████████████████████████| 78 kB 4.5 MB/s [?25hRequirement already satisfied: google in /usr/local/lib/python3.6/dist-packages (from ray[tune]~=1.0.0->biome-text) (2.0.3) Requirement already satisfied: grpcio>=1.28.1 in /usr/local/lib/python3.6/dist-packages (from ray[tune]~=1.0.0->biome-text) (1.32.0) Collecting aiohttp-cors Downloading aiohttp_cors-0.7.0-py3-none-any.whl (27 kB) Collecting redis<3.5.0,>=3.3.2 Downloading redis-3.4.1-py2.py3-none-any.whl (71 kB)  |████████████████████████████████| 71 kB 6.3 MB/s [?25hCollecting opencensus Downloading opencensus-0.7.12-py2.py3-none-any.whl (127 kB)  |████████████████████████████████| 127 kB 48.0 MB/s [?25hCollecting colorama Downloading colorama-0.4.4-py2.py3-none-any.whl (16 kB) Collecting py-spy>=0.2.0 Downloading py_spy-0.3.4-py2.py3-none-manylinux1_x86_64.whl (3.2 MB)  |████████████████████████████████| 3.2 MB 34.7 MB/s [?25hCollecting aioredis Downloading aioredis-1.3.1-py3-none-any.whl (65 kB)  |████████████████████████████████| 65 kB 2.4 MB/s [?25hCollecting aiohttp Downloading aiohttp-3.7.3-cp36-cp36m-manylinux2014_x86_64.whl (1.3 MB)  |████████████████████████████████| 1.3 MB 31.5 MB/s [?25hCollecting urllib3<1.27,>=1.25.4 Downloading urllib3-1.25.11-py2.py3-none-any.whl (127 kB)  |████████████████████████████████| 127 kB 50.1 MB/s [?25hRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.18->allennlp~=1.3.0->biome-text) (3.0.4) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.18->allennlp~=1.3.0->biome-text) (2.10) Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.5.0->msrest>=0.6.18->azure-storage-blob>=12.0->mlflow~=1.9.0->biome-text) (3.1.0) Collecting fsspec>=0.6.0 Downloading fsspec-0.8.5-py3-none-any.whl (98 kB)  |████████████████████████████████| 98 kB 5.4 MB/s [?25hRequirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (0.8.2) Requirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.1.3) Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (2.0.5) Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.0.5) Collecting thinc<7.5.0,>=7.4.1 Downloading thinc-7.4.5-cp36-cp36m-manylinux2014_x86_64.whl (1.1 MB)  |████████████████████████████████| 1.1 MB 42.0 MB/s [?25hRequirement already satisfied: blis<0.8.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (0.4.1) Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.0.5) Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (3.0.5) Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from spacy~=2.3.0->biome-text) (1.0.0) Requirement already satisfied: importlib-metadata>=0.20 in /usr/local/lib/python3.6/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy~=2.3.0->biome-text) (3.4.0) Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.20->catalogue<1.1.0,>=0.0.7->spacy~=2.3.0->biome-text) (3.4.0) Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.20->catalogue<1.1.0,>=0.0.7->spacy~=2.3.0->biome-text) (3.7.4.3) Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch<1.8.0,>=1.6.0->allennlp~=1.3.0->biome-text) (0.16.0) Collecting sacremoses Downloading sacremoses-0.0.43.tar.gz (883 kB)  |████████████████████████████████| 883 kB 47.8 MB/s [?25hRequirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers<4.1,>=4.0->allennlp~=1.3.0->biome-text) (20.9) Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers<4.1,>=4.0->allennlp~=1.3.0->biome-text) (2019.12.20) Collecting tokenizers==0.9.4 Downloading tokenizers-0.9.4-cp36-cp36m-manylinux2010_x86_64.whl (2.9 MB)  |████████████████████████████████| 2.9 MB 42.1 MB/s [?25hCollecting uvloop>=0.14.0 Downloading uvloop-0.15.1.tar.gz (2.1 MB)  |████████████████████████████████| 2.1 MB 37.6 MB/s WARNING: Discarding https://files.pythonhosted.org/packages/94/98/9dc814f391b2293ecc790b9752e005296c69c3694fd9975b6cb77c448135/uvloop-0.15.1.tar.gz#sha256=7846828112bfb49abc5fdfc47d0e4dfd7402115c9fde3c14c31818cfbeeb63dc (from https://pypi.org/simple/uvloop/). Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output. [?25h Downloading uvloop-0.15.0.tar.gz (2.1 MB)  |████████████████████████████████| 2.1 MB 35.2 MB/s WARNING: Discarding https://files.pythonhosted.org/packages/df/fb/0e1b479ac5502f3d4531a2fc3f046312616f1ad020c686da353c2ff3bbc6/uvloop-0.15.0.tar.gz#sha256=1a503d5b49da6e3dd5607d6e533a5315b1caedbf629901807c65a23a09cad065 (from https://pypi.org/simple/uvloop/). Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output. [?25h Downloading uvloop-0.14.0-cp36-cp36m-manylinux2010_x86_64.whl (3.9 MB)  |████████████████████████████████| 3.9 MB 37.7 MB/s [?25hCollecting h11<0.10,>=0.8 Downloading h11-0.9.0-py2.py3-none-any.whl (53 kB)  |████████████████████████████████| 53 kB 2.0 MB/s [?25hCollecting httptools==0.1.* Downloading httptools-0.1.1-cp36-cp36m-manylinux1_x86_64.whl (216 kB)  |████████████████████████████████| 216 kB 56.2 MB/s [?25hCollecting websockets==8.* Downloading websockets-8.1-cp36-cp36m-manylinux2010_x86_64.whl (78 kB)  |████████████████████████████████| 78 kB 6.9 MB/s [?25hRequirement already satisfied: notebook>=4.4.1 in /usr/local/lib/python3.6/dist-packages (from widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (5.3.1) Requirement already satisfied: terminado>=0.8.1 in /usr/local/lib/python3.6/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.9.2) Requirement already satisfied: nbconvert in /usr/local/lib/python3.6/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (5.6.1) Requirement already satisfied: Send2Trash in /usr/local/lib/python3.6/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (1.5.0) Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.6/dist-packages (from jupyter-client->ipykernel>=4.5.1->ipywidgets~=7.5.1->biome-text) (22.0.2) Requirement already satisfied: ptyprocess in /usr/local/lib/python3.6/dist-packages (from terminado>=0.8.1->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.7.0) Collecting yarl<2.0,>=1.0 Downloading yarl-1.6.3-cp36-cp36m-manylinux2014_x86_64.whl (293 kB)  |████████████████████████████████| 293 kB 55.5 MB/s [?25hCollecting idna-ssl>=1.0 Downloading idna-ssl-1.1.0.tar.gz (3.4 kB) Collecting multidict<7.0,>=4.5 Downloading multidict-5.1.0-cp36-cp36m-manylinux2014_x86_64.whl (141 kB)  |████████████████████████████████| 141 kB 49.6 MB/s [?25hRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.6/dist-packages (from aiohttp->ray[tune]~=1.0.0->biome-text) (20.3.0) Collecting async-timeout<4.0,>=3.0 Downloading async_timeout-3.0.1-py3-none-any.whl (8.2 kB) Collecting hiredis Downloading hiredis-1.1.0-cp36-cp36m-manylinux2010_x86_64.whl (61 kB)  |████████████████████████████████| 61 kB 7.4 MB/s [?25hCollecting Mako Downloading Mako-1.1.4.tar.gz (479 kB)  |████████████████████████████████| 479 kB 46.4 MB/s [?25hCollecting python-editor>=0.3 Downloading python_editor-1.0.4-py3-none-any.whl (4.9 kB) Collecting immutables>=0.9 Downloading immutables-0.15-cp36-cp36m-manylinux1_x86_64.whl (100 kB)  |████████████████████████████████| 100 kB 10.1 MB/s [?25hRequirement already satisfied: nvidia-ml-py3>=7.352.0 in /usr/local/lib/python3.6/dist-packages (from gpustat->ray[tune]~=1.0.0->biome-text) (7.352.0) Collecting blessings>=1.6 Downloading blessings-1.7-py3-none-any.whl (18 kB) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->captum~=0.2.0->biome-text) (2.4.7) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->captum~=0.2.0->biome-text) (0.10.0) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->captum~=0.2.0->biome-text) (1.3.1) Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (1.4.3) Requirement already satisfied: defusedxml in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.6.0) Requirement already satisfied: testpath in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.4.4) Requirement already satisfied: bleach in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (3.3.0) Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.8.4) Requirement already satisfied: webencodings in /usr/local/lib/python3.6/dist-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets~=7.5.1->biome-text) (0.5.1) Requirement already satisfied: google-api-core<2.0.0,>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from opencensus->ray[tune]~=1.0.0->biome-text) (1.16.0) Collecting opencensus-context==0.1.2 Downloading opencensus_context-0.1.2-py2.py3-none-any.whl (4.4 kB) Requirement already satisfied: google-auth<2.0dev,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (1.25.0) Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (1.52.0) Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (4.2.1) Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (0.2.8) Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (4.7) Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=0.4.0->google-api-core<2.0.0,>=1.0.0->opencensus->ray[tune]~=1.0.0->biome-text) (0.4.8) Requirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (1.10.0) Requirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (8.7.0) Requirement already satisfied: pluggy<0.8,>=0.5 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (0.7.1) Requirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.6/dist-packages (from pytest->allennlp~=1.3.0->biome-text) (1.4.0) Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers<4.1,>=4.0->allennlp~=1.3.0->biome-text) (1.0.0) Building wheels for collected packages: overrides, flatdict, jsonnet, databricks-cli, sqlalchemy, idna-ssl, alembic, contextvars, gpustat, Mako, prometheus-flask-exporter, sacremoses Building wheel for overrides (setup.py) ... [?25l[?25hdone Created wheel for overrides: filename=overrides-3.1.0-py3-none-any.whl size=10175 sha256=3a7538da9d27180b4bc6ecb7c33268bbe2969dc082d3d0fa1fe72b6a49cd6450 Stored in directory: /root/.cache/pip/wheels/e6/3b/34/ae59fc8d35c37f01099425ab73599e45e9b9b599a7ccc2c45f Building wheel for flatdict (setup.py) ... [?25l[?25hdone Created wheel for flatdict: filename=flatdict-4.0.1-py3-none-any.whl size=6931 sha256=0dd28f38f87788a9a9d3ea59ca745c3a235d3f75b47263e6161e7f52262f5d52 Stored in directory: /root/.cache/pip/wheels/8c/0b/50/16de22650bd1c28ef15caa790b1b61847aef6c1b57fcb5fe3a Building wheel for jsonnet (setup.py) ... [?25l[?25hdone Created wheel for jsonnet: filename=jsonnet-0.17.0-cp36-cp36m-linux_x86_64.whl size=3387860 sha256=3181e05711275244d03128d7e730c8c5a5ebb33c45a7da5906d650de4e630eda Stored in directory: /root/.cache/pip/wheels/7e/ad/c9/995f065cc9d62d8a6e39ed458050b2c085429afc651e62bf73 Building wheel for databricks-cli (setup.py) ... [?25l[?25hdone Created wheel for databricks-cli: filename=databricks_cli-0.14.1-py3-none-any.whl size=100577 sha256=277971e85d745198bc798c03343c24964b27493c823ad055d23214db96c9105c Stored in directory: /root/.cache/pip/wheels/36/2c/2e/09bcfa0bdb7005b96213ff0967f9ab2697b8d07196d1edeeeb Building wheel for sqlalchemy (setup.py) ... [?25l[?25hdone Created wheel for sqlalchemy: filename=SQLAlchemy-1.3.13-cp36-cp36m-linux_x86_64.whl size=1217230 sha256=39fe531ca08cc2387ccf0c02554e9841467ac60bed7744d4cf232fbcaef3933c Stored in directory: /root/.cache/pip/wheels/28/3e/f9/8eca04781258bb6956ffba37e4e6e6951e5b3a16d4494b91cb Building wheel for idna-ssl (setup.py) ... [?25l[?25hdone Created wheel for idna-ssl: filename=idna_ssl-1.1.0-py3-none-any.whl size=3160 sha256=69ae8b8ec4f392f494b6e9a2ef3a0d2c53109ad046bea891edfe99778ae9da05 Stored in directory: /root/.cache/pip/wheels/6a/f5/9c/f8331a854f7a8739cf0e74c13854e4dd7b1af11b04fe1dde13 Building wheel for alembic (setup.py) ... [?25l[?25hdone Created wheel for alembic: filename=alembic-1.5.4-py2.py3-none-any.whl size=156314 sha256=5aa2c51659c3db05860f1345ac45f4c243c51a3ca2371b1185edcced42551605 Stored in directory: /root/.cache/pip/wheels/97/72/33/933963de9d1c3bb66e4442a9fd0726e0082ea361a87d7ec815 Building wheel for contextvars (setup.py) ... [?25l[?25hdone Created wheel for contextvars: filename=contextvars-2.4-py3-none-any.whl size=7665 sha256=a82def709b5036d5ca919d2f4e8ea663fd151a60e431b2a4e8c2cc3c18edf576 Stored in directory: /root/.cache/pip/wheels/41/11/53/911724983aa48deb94792432e14e518447212dd6c5477d49d3 Building wheel for gpustat (setup.py) ... [?25l[?25hdone Created wheel for gpustat: filename=gpustat-0.6.0-py3-none-any.whl size=12617 sha256=7c0953fb444bb64b127ffe14c86aaa870c1e4d9c4939c7a8107e906f03f5176d Stored in directory: /root/.cache/pip/wheels/50/da/35/fe2cfb3bc47822299f5e124a599d56f00b30ec0b328db16b9f Building wheel for Mako (setup.py) ... [?25l[?25hdone Created wheel for Mako: filename=Mako-1.1.4-py2.py3-none-any.whl size=75675 sha256=757f954d5cef850071c2e008ca25bde8bc22dd2772d249b8483f47a20142cb6b Stored in directory: /root/.cache/pip/wheels/3c/ee/c2/9651c6b977f9d2a1bb766970d190f71213e2ca47b36d8dc488 Building wheel for prometheus-flask-exporter (setup.py) ... [?25l[?25hdone Created wheel for prometheus-flask-exporter: filename=prometheus_flask_exporter-0.18.1-py3-none-any.whl size=17158 sha256=bb2b7a6eedc2f4ddd48f52c9b46f1b40c4e58ee5ff005ce45266091aaac381a9 Stored in directory: /root/.cache/pip/wheels/fe/70/a9/22af6c68f513e58533fb7fd649f4cc5e2a27c24422a41a1bfa Building wheel for sacremoses (setup.py) ... [?25l[?25hdone Created wheel for sacremoses: filename=sacremoses-0.0.43-py3-none-any.whl size=893258 sha256=252cc74ff717686c582999e941ced9c01322b71bf10d4938c9fad68e408c53fe Stored in directory: /root/.cache/pip/wheels/49/25/98/cdea9c79b2d9a22ccc59540b1784b67f06b633378e97f58da2 Successfully built overrides flatdict jsonnet databricks-cli sqlalchemy idna-ssl alembic contextvars gpustat Mako prometheus-flask-exporter sacremoses Installing collected packages: urllib3, multidict, immutables, yarl, soupsieve, jmespath, idna-ssl, contextvars, async-timeout, tqdm, smmap, opencensus-context, isodate, hiredis, botocore, blessings, beautifulsoup4, aiohttp, websocket-client, tokenizers, thinc, sqlalchemy, sacremoses, s3transfer, redis, python-editor, py-spy, opencensus, msrest, Mako, gpustat, gitdb, cryptography, colorful, colorama, azure-core, aioredis, aiohttp-cors, zope.interface, zope.event, xxhash, websockets, uvloop, transformers, tensorboardX, starlette, spacy, sentencepiece, ray, querystring-parser, pydantic, pyarrow, prometheus-flask-exporter, overrides, jsonpickle, jsonnet, httptools, h11, gunicorn, greenlet, gorilla, gitpython, fsspec, docker, databricks-cli, boto3, azure-storage-blob, alembic, xlrd, uvicorn, s3fs, mlflow, lxml, ipywidgets, gevent, flatdict, flask-cors, fastapi, elasticsearch, distributed, datasets, captum, cachey, allennlp, biome-text Attempting uninstall: urllib3 Found existing installation: urllib3 1.24.3 Uninstalling urllib3-1.24.3: Successfully uninstalled urllib3-1.24.3 Attempting uninstall: tqdm Found existing installation: tqdm 4.41.1 Uninstalling tqdm-4.41.1: Successfully uninstalled tqdm-4.41.1 Attempting uninstall: beautifulsoup4 Found existing installation: beautifulsoup4 4.6.3 Uninstalling beautifulsoup4-4.6.3: Successfully uninstalled beautifulsoup4-4.6.3 Attempting uninstall: thinc Found existing installation: thinc 7.4.0 Uninstalling thinc-7.4.0: Successfully uninstalled thinc-7.4.0 Attempting uninstall: sqlalchemy Found existing installation: SQLAlchemy 1.3.23 Uninstalling SQLAlchemy-1.3.23: Successfully uninstalled SQLAlchemy-1.3.23 Attempting uninstall: spacy Found existing installation: spacy 2.2.4 Uninstalling spacy-2.2.4: Successfully uninstalled spacy-2.2.4 Attempting uninstall: pyarrow Found existing installation: pyarrow 0.14.1 Uninstalling pyarrow-0.14.1: Successfully uninstalled pyarrow-0.14.1 Attempting uninstall: xlrd Found existing installation: xlrd 1.1.0 Uninstalling xlrd-1.1.0: Successfully uninstalled xlrd-1.1.0 Attempting uninstall: lxml Found existing installation: lxml 4.2.6 Uninstalling lxml-4.2.6: Successfully uninstalled lxml-4.2.6 Attempting uninstall: ipywidgets Found existing installation: ipywidgets 7.6.3 Uninstalling ipywidgets-7.6.3: Successfully uninstalled ipywidgets-7.6.3 Attempting uninstall: distributed Found existing installation: distributed 1.25.3 Uninstalling distributed-1.25.3: Successfully uninstalled distributed-1.25.3 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible. Successfully installed Mako-1.1.4 aiohttp-3.7.3 aiohttp-cors-0.7.0 aioredis-1.3.1 alembic-1.5.4 allennlp-1.3.0 async-timeout-3.0.1 azure-core-1.11.0 azure-storage-blob-12.7.1 beautifulsoup4-4.9.3 biome-text-2.0.0 blessings-1.7 boto3-1.17.8 botocore-1.20.8 cachey-0.2.1 captum-0.2.0 colorama-0.4.4 colorful-0.5.4 contextvars-2.4 cryptography-3.4.5 databricks-cli-0.14.1 datasets-1.1.3 distributed-2.17.0 docker-4.4.2 elasticsearch-7.1.0 fastapi-0.55.1 flask-cors-3.0.10 flatdict-4.0.1 fsspec-0.8.5 gevent-20.9.0 gitdb-4.0.5 gitpython-3.1.13 gorilla-0.3.0 gpustat-0.6.0 greenlet-1.0.0 gunicorn-20.0.4 h11-0.9.0 hiredis-1.1.0 httptools-0.1.1 idna-ssl-1.1.0 immutables-0.15 ipywidgets-7.5.1 isodate-0.6.0 jmespath-0.10.0 jsonnet-0.17.0 jsonpickle-2.0.0 lxml-4.5.2 mlflow-1.9.1 msrest-0.6.21 multidict-5.1.0 opencensus-0.7.12 opencensus-context-0.1.2 overrides-3.1.0 prometheus-flask-exporter-0.18.1 py-spy-0.3.4 pyarrow-3.0.0 pydantic-1.7.3 python-editor-1.0.4 querystring-parser-1.2.4 ray-1.0.1.post1 redis-3.4.1 s3fs-0.4.2 s3transfer-0.3.4 sacremoses-0.0.43 sentencepiece-0.1.95 smmap-3.0.5 soupsieve-2.2 spacy-2.3.5 sqlalchemy-1.3.13 starlette-0.13.2 tensorboardX-2.1 thinc-7.4.5 tokenizers-0.9.4 tqdm-4.49.0 transformers-4.0.1 urllib3-1.25.11 uvicorn-0.11.8 uvloop-0.14.0 websocket-client-0.57.0 websockets-8.1 xlrd-1.2.0 xxhash-2.0.0 yarl-1.6.3 zope.event-4.5.0 zope.interface-5.2.0 ###Markdown Downloading the dataHere we download the preprocessed data and the pre trained word vectors. ###Code !git clone https://github.com/recognai/cantemist-ner/ !wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.es.300.vec.gz ###Output --2021-02-16 09:26:27-- https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.es.300.vec.gz Resolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 104.22.75.142, 104.22.74.142, 172.67.9.4, ... Connecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|104.22.75.142|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 1285580896 (1.2G) [binary/octet-stream] Saving to: ‘cc.es.300.vec.gz’ cc.es.300.vec.gz 100%[===================>] 1.20G 11.8MB/s in 1m 45s 2021-02-16 09:28:13 (11.7 MB/s) - ‘cc.es.300.vec.gz’ saved [1285580896/1285580896] ###Markdown Training the system ###Code from biome.text import Pipeline, Dataset, VocabularyConfiguration, TrainerConfiguration ###Output _____no_output_____ ###Markdown Loading the datasetsThese datasets were created using the *NER_dataprep.ipynb* and *NER_dataprep_test.ipynb* notebooks in our [cantemist-ner](https://github.com/recognai/cantemist-ner) repo. ###Code train_ds = Dataset.from_json("cantemist-ner/data/NER/train_full.json") # This test dataset was released after the competition ended: test_ds = Dataset.from_json("cantemist-ner/data/NER/gold_test.json") # for biome.text v2 we need to rename the "labels" column containing the NER tags train_ds.rename_column_("labels", "tags") test_ds.rename_column_("labels", "tags") ###Output _____no_output_____ ###Markdown Defining the pipelineThe architecture and hyerparameters were found by means of a random search HPO. ###Code pipeline_config = { 'name': 'candemist-ner-first-hpo', 'features': { 'word': { 'embedding_dim': 300, 'lowercase_tokens': True, 'trainable': True, 'weights_file': "/content/cc.es.300.vec.gz" }, 'char': { 'embedding_dim': 64, 'lowercase_characters': True, 'encoder': { 'bidirectional': True, 'hidden_size': 128, 'num_layers': 1, 'type': 'gru' }, 'dropout': 0.16517050992687604 }, }, 'encoder': { 'bidirectional': True, 'hidden_size': 512, 'input_size': 556, 'num_layers': 1, 'type': 'lstm' }, 'head': { 'dropout': 0.2689579604286324, 'labels': ['MORFOLOGIA_NEOPLASIA'], 'type': 'TokenClassification' }, } pl = Pipeline.from_config(pipeline_config) ###Output ✔ Download and installation successful You can now load the model via spacy.load('en_core_web_sm') ✔ Linking successful /usr/local/lib/python3.6/dist-packages/en_core_web_sm --> /usr/local/lib/python3.6/dist-packages/spacy/data/en You can now load the model via spacy.load('en') ###Markdown Defining the vocabularyOnly include words that appear at least two times in the `train_ds` dataset. ###Code vocab_config = VocabularyConfiguration( datasets=[train_ds], min_count={"word": 2} ) ###Output _____no_output_____ ###Markdown Defining the trainerThe hyerparameters were found by means of a random search HPO. ###Code trainer_dict={ "optimizer": { "type": "adamw", "lr": 0.0038931174186587806, "weight_decay": 0.01, }, "learning_rate_scheduler": { "type": "step", "step_size":2, "gamma":0.1 }, "batch_size": 32, "num_epochs": 4, "validation_metric": "+f1-measure-overall", "patience":3 } trainer_config = TrainerConfiguration(**trainer_dict) ###Output _____no_output_____ ###Markdown Training the pipeline ###Code pl.train( output="output", training=train_ds, test=test_ds, trainer=trainer_config, vocab_config=vocab_config, ) ###Output 2021-02-16 09:40:03,935 - allennlp.data.vocabulary - INFO - Fitting token dictionary from dataset. building vocab: 0it [00:00, ?it/s] ###Markdown Appendix: BETO model with an f1 score of 0.861In a quick follow-up work, we experimented with the pretrained "*BETO: Spanish Bert*" model, used by the winner (f1: 0.87) and the runner-up (f1: 0.869) of the Cantemist NER competition, and were able to achieve similar results. ###Code pipeline_dict = { "name": "", "features": { "transformers": { "model_name": "dccuchile/bert-base-spanish-wwm-cased", #"mismatched": True, # False for wordpiece tokens, True for word tokens "trainable": True, "max_length": 512 }, }, "head": { "type": "TokenClassification", "labels": ["MORFOLOGIA_NEOPLASIA"] } } pl = Pipeline.from_config(pipeline_dict) # The hyperparameters were found by means of a random search HPO trainer_dict = { "optimizer": { "type": "adamw", "lr": 3e-5 }, "batch_size": 8, "patience": 2, "num_epochs": 8, "validation_metric" : "+f1-measure-overall", "learning_rate_scheduler": { "type": "linear_with_warmup", "num_epochs": 8, "num_steps_per_epoch": 4485, "warmup_steps": 100, } } trainer = TrainerConfiguration(**trainer_dict) pl.train( output="output_beto", training=train_ds, test=test_ds, trainer=trainer, ) ###Output _____no_output_____
notebooks/MILP_for_experimental_design.ipynb
###Markdown Design of Complex Neuroscience Experiments using Mixed Integer Linear ProgrammingThis notebook provides example implementations for the four case studies in the article ["Design of Complex Experiments using Mixed Integer Linear Programming"](https://arxiv.org/abs/2012.02361). Each case study aims to demonstrate how Mixed Integer Linear Programming (MILP) can be used to address real-world experimental design challenges. This notebook reproduces the figures related to each case study. Full details of each case study can be found in the main article. The article also contains an introduction to the mathematical foundations of MILP. Code Organization The `milp` PackageFor clarity, some code has been has been omitted from this notebook and placed in the `milp` python package. This includes code related to solving each program and visualizing each program solution. The `milp` package can be found in the root directory of this repository and can be installed using the included `setup.py` file. The `milp` WorkflowEach example is solved by creating mixed integer linear programs. Each mixed integer linear program is constructed using the same basic workflow: 1. Initialize Program`milp.program.initialize_program()` initializes a dictionary that represents a mixed integer linear program. This dictionary will be updated to include the program's variables, linear constraints, and cost function terms.```pythonimport milpprogram = milp.program.initialize_program()``` 2. Add Variables`milp.program.add_variable()` adds a variable to a program. The name given to each variable will be used to specify its constraints and cost function coefficients. Whether a varaible is real, integer, or boolean can be specified by setting `variable_type` to `float`, `int`, or `bool`.```pythonmilp.program.add_variable( program=program, name='a', variable_type=bool,)milp.program.add_variable( program=program, name='b', variable_type=int, lower_bound=float('-inf'), upper_bound=float('inf'),)milp.program.add_variable( program=program, name='c', variable_type=float,)``` 3. Add Linear Constraints`milp.program.add_constraint()` adds a linear constraint to program. An equality constraint can be specified by using arguments `A_eq` and `b_eq`. The value of `A_eq` should be a dictionary whose keys are variable names and whose values are coefficients of those variables. Inequality constraints can be specified by using `A_lt` and `b_lt`. ```pythonmilp.program.add_constraint( program=program, A_eq={'a': 1, 'b': 1}, b_eq=0,)milp.program.add_constraint( program=program, A_lt={'a': 1, 'b': 2, 'c': 3}, b_lt=3,)``` 4. Specify Cost Function`milp.program.add_cost_terms` is used to specify the program's cost function. The value of `coefficients` should be a dictionary whose keys are variable names and whose values are coefficients of those variables.```pythonmilp.program.add_cost_terms( program=program, coefficients={'a': -1, 'b': 1},)``` 5. Solve Program`milp.program.solve_program()` solves the program using an external library (by default uses [Gurobi](https://www.gurobi.com)). It does this by 1) converting the program into the library-compatible representation, 2) running its solver, and 3) returning the solution. This solution specifies an experimental design that optimally conforms to the design constraints of the program.```pythonsolution = milp.program.solve_MILP(program=program)print(solution['variables'])``````> {'a': True, 'b': -1, 'c': 0.0}```Taken together these code snippets have represented and solved the following simple program:$$\min{b - a} \\a + b = 0 \\a + 2 b + 3 c \leq 3 \\\\a \in \mathbb{B}, b \in \mathbb{Z}, c \in \mathbb{R}^+$$Some additional functions are used to implement the design patterns discussed in **Section 3.2** of the main article. For further details refer to the source code and docstrings of `milp`. Software Licensing- All code in this repository, including the code in the `milp` package and the code in this Jupyter notebook, is licensed under a BSD 2-clause license.- By default the `milp` package solves programs using the external library [Gurobi](https://www.gurobi.com). Gurobi offers free academic licenses, available [here](https://www.gurobi.com/downloads/end-user-license-agreement-academic/). Instructions for installing Gurobi can be found on the Gurobi website.- Alternative solvers can be used instead of Gurobi. The `milp` also contains an adapter for [CPLEX](https://www.ibm.com/analytics/cplex-optimizer), used by specifying `solve_MILP(..., solver='cplex')`. ###Code import random import matplotlib.pyplot as plt import numpy as np import scipy.stats import milp from milp.examples import balanced_grouping from milp.examples import stimulus_task_pairing from milp.examples import structured_sampling from milp.examples import trajectory_design # setup plot formatting %matplotlib inline milp.formatting.setup_plot_formatting() # initialize software license milp.program.initialize_license() ###Output Using license file /home/jlg/storm/bin/gurobi902/gurobi.lic Academic license - for non-commercial use only ###Markdown Example Problem 1: Balanced GroupingA common challenge when designing experiments is to distribute conditions across blocks or sessions in a balanced way. Perhaps a researcher requires that each block of the experiment must have a similar number of exemplars of some stimulus category. Perhaps a researcher must collect data on a large battery of tasks, and the task distribution should be similar across each scanning session. Perhaps a researcher needs to evenly distribute content across trials, blocks, and sessions simultaneously. All of these scenarios can be addressed by MILP in a similar manner. Design SpecificationsIn this first example we will cover the simple case of trying to balance the mean luminance of visual stimuli across scanning runs. Suppose a researcher is designing a vision study using 360 short videos. Each video is 20 seconds long and has its own mean luminance. For data collection, the videos must be grouped into 12 separate runs of 10 minutes each, with each video appearing in exactly one run. Importantly, the researcher plans to perform an analysis that requires all of the runs to have the same mean luminance. Although this may seem like a trivial requirement, it can be extremely challenging in practice. There are 10^367 possible groupings that split 360 items into 12 equally sized groups. This space is far too large to test every possible grouping. The solution space is also discrete, so gradient-based methods cannot be used. **Figure 2B** shows a synthetic distribution of luminances that we have created for the videos. MILP FormulationIt is straightforward to express this problem as a mixed integer linear program. For notation, let $V$ be the number of videos, $R$ be the number of runs, $v$ be the index over videos, and $r$ be the index over runs. The first step is to define the variables of the solution space. Introduce binary variables $X$ to represent possible pairings between runs and videos:> **VARIABLES**$$X \in \mathbb{B}^{V \times R} \\$$$$\begin{aligned}x_{v,r} &= 1 \rightarrow \text{ video } v \text{ is in run } r \\ &= 0 \rightarrow \text{ video }v \text{ is not in run } r\end{aligned}$$All of the desired grouping properties in this problem can be expressed as linear equalities over elements of $X$. To specify that each run must contain $V / R = 30$ videos, introduce a constraint for each run:> **CONSTRAINTS**$$\sum_v x_{v,r} = 30 \quad \forall \ r \in \{1, ..., R\}$$To specify that each video appears in exactly one run, introduce a constraint for each video:> **CONSTRAINTS**$$\sum_r x_{v,r} = 1 \quad \forall \ v \in \{1, ..., V\}$$Any value of $X$ that satisfies all of the above constraints is a feasible grouping that produces a valid experiment.A cost function can now be designed to select the experiment that best balances luminance. Assume that the mean luminance of each video is stored in a vector $f$. This vector is predetermined by the video dataset. Use a cost function that is the sum of absolute deviations between the global mean and the mean of each run.> **COST FUNCTION**$$\begin{aligned}\mu &= \frac{1}{V} \sum_{v} f_v \\&= \text{mean luminance across all videos} \\\mu_r &= \frac{R}{V} \sum_v f_v \ x_{v,r} \\&= \text{mean luminance within run } r \\\end{aligned}$$$$ $$$$\min \sum_r | \mu - \mu_r |$$ ###Code def create_group_balance_problem(I, G, F): """create program to solve group balance problem ## Parameters - I: int number of items - G: int number of groups - F: (n_features, I)-shaped array of features """ s = int(I / float(G)) if s * G != I: raise Exception('G does not evenly divide I') program = milp.program.initialize_program() # decision variables for i in range(I): for g in range(G): milp.program.add_variable( program=program, name='X_{i},{g}'.format(i=i, g=g), variable_type=bool, ) # each item should be placed in one group for i in range(I): coefficients = {'X_{i},{g}'.format(i=i, g=g): 1 for g in range(G)} milp.program.add_constraint(program, A_eq=coefficients, b_eq=1) # groups should have sizes s1, ..., sg for g in range(G): coefficients = {'X_{i},{g}'.format(i=i, g=g): 1 for i in range(I)} milp.program.add_constraint(program, A_eq=coefficients, b_eq=s) # mean group features for f in range(F.shape[0]): for g in range(G): m_name = 'm_{f},{g}'.format(f=f, g=g) milp.program.add_variable( program=program, name=m_name, variable_type=float, ) coefficients = { 'X_{i},{g}'.format(i=i, g=g): F[f, i] for i in range(I) } coefficients[m_name] = -s milp.program.add_constraint( program=program, A_eq=coefficients, b_eq=0, ) # global means global_means = [] for f in range(F.shape[0]): global_mean = (1 / float(I)) * sum(F[f, i] for i in range(I)) global_means.append(global_mean) milp.program.store_constant( program, 'global_feature_mean_' + str(f), global_means[-1], ) # balance terms in cost function for f in range(F.shape[0]): for g in range(G): coefficients = {'m_{f},{g}'.format(f=f, g=g): 1} milp.program.add_abs_cost_term( program=program, coefficients=coefficients, constant=-global_means[f], ) return program ###Output _____no_output_____ ###Markdown create program ###Code np.random.seed(0) I = 360 G = 12 n_f = 1 # power distribution F = np.zeros([n_f, I]) F[0, :] = 1 - np.random.power(3, size=(I)) F = (F * 1000) F_reduce = F.astype(int) program = create_group_balance_problem( I=I, G=G, F=F_reduce, ) ###Output _____no_output_____ ###Markdown solve program ###Code solution = milp.program.solve_MILP(program) X_solution = milp.program.get_solution_variable(solution, 'X') m_solution = milp.program.get_solution_variable(solution, 'm') global_feature_mean = milp.program.get_solution_constant( solution, 'global_feature_mean', ) print() print('X_solution', X_solution.shape) ###Output program size: - n_variables: 4344 - n_constraints: 408 - n_cost_function_terms: 12 X_solution (360, 12) ###Markdown find randomized solutions ###Code # n = 1e6 n = 1e8 X_solution = X_solution.T.copy().T balanced_grouping.set_comparison_context(F, X_solution, F.mean(1)) best_randomization = balanced_grouping.compute_randomization_solutions( F, n, processes=20, ) randomization_ds = best_randomization['randomization_ds'] ###Output _____no_output_____ ###Markdown visualize result ###Code balanced_grouping.plot_grouping_separate_plots( F, X_solution, randomization_ds, n=n, figsize=[6, 6], randomization_axis=[-50, 650, 0, n / 7.0], show_title=True, show_labels=True, show_legend=True, ) ###Output Figure 2B: ###Markdown How much better is MILP? ###Code X_groups = [np.nonzero(X_solution[:, g])[0] for g in range(G)] global_feature_means = F.mean(1) group_evaluation = balanced_grouping.evaluate_groups( X_groups, F, global_feature_means ) d_milp = group_evaluation['d_total'] ratio = best_randomization['d'] / d_milp print( 'the MILP solution has', ratio, 'times smaller error than the best randomized solution', ) ###Output the MILP solution has 116.23892083863578 times smaller error than the best randomized solution ###Markdown **Figure 2C** and **Figure 2D** show a solution to this program. As guaranteed by the MILP solver, this solution achieves the lowest cost function value of any X in the feasible set. **Figure 2C** shows the degree to which the mean luminance of each run deviates from the global mean luminance. This value is small compared to the overall range of luminances. **Figure 2D** compares the solution found using MILP to 108 solutions found using randomization. Each randomization solution is generated by simply shuffling the videos and then splitting into 12 sequential groups. The graph shows that the solution found using MILP is substantially more balanced than any solution that can be found by randomization. The best randomized solution is the one with the lowest total balance error. As shown in in Figure 2D, the best randomized solution still has a total balance error that is 116 times larger than the solution found using MILP. Example Problem 2: Stimulus-Task PairingAnother common neuroimaging challenge is designing top-down attention experiments. In these studies, the subject’s attention state is varied systematically to demonstrate the effect of attention on brain activity. If similar stimuli are presented during each attention condition, then attention is assumed to be the operative factor underlying any differences in brain activity.Visual attention experiments are typically limited to a small number of attention conditions. Each additional condition requires more data to be collected and also adds complexity to the overall design. Each attention condition in an experiment might also have its own idiosyncratic requirements. For example, some visual tasks (e.g. object identification) can only be performed on certain types of images (e.g. images containing objects). These experiments must be carefully balanced to ensure that the only meaningful difference across each condition is the subject's attentional state. In this case study, we demonstrate how MILP can address these challenges to create designs that have large numbers of attention conditions. Suppose a researcher is designing a visual attention experiment in a manner similar to (Clark et al. 1997), (O'Craven et al. 1999), or (Harel et al. 2014). In each of these studies, the subject’s attention state is varied independently from a visual stimulus to demonstrate the effect of attention on brain activity. These studies used 2, 3, and 6 attention states, respectively. For this example, suppose the researcher would like to significantly increase the number of attention conditions in an attempt to build a richer and more complete model of how attention affects brain activity. Design SpecificationsThe researcher would like the experiment to consist of many individual trials. During each trial, the subject will first be cued with a visual search target, such as an object category, a scene category, or a color. Then, an image will briefly flash. Finally, the subject will have a response period to indicate whether they detected the search target in the image. The researcher has allotted time for 2800 trials evenly split across 14 different search conditions, resulting in 200 trials per condition.The researcher has a stimulus dataset of 700 images. Each trial will use one of these images. Each of the images has been labeled along each of the 14 different search dimensions with one of three values. A “0” indicates that the image definitely does not contain the search target, a “1” indicates that the image might contain the search target, and a “2” indicates that the image definitely contains the search target. For this example we will create a synthetic dataset of image labels generated from a multinomial distribution where feature values [0, 1, 2] have probabilities [0.5, 0.25, 0.25].The main challenge that the researcher faces is deciding which images to use with each attention condition. There are three different types of balance that the researcher would like to impose. First, to reduce the effect of the subject memorizing the images, the researcher would like each image to appear in an equal number of trials throughout the experiment. This results in 4 trials per image (= 2800 trials / 700 images). Each image should also be paired with each attention condition no more than once. Second, the researcher would like to control for the effects of target detection (Guo et al. 2012; Çukur et al. 2013). To this end, the trials within each task should be evenly balanced across the 3 detection levels, meaning that ⅓ of trials should definitely contain the search target, ⅓ should ambiguously contain the search target, and ⅓ should definitely not contain the search target. Finally, to ensure that the stimulus feature distributions are similar across tasks, the researcher would like the feature distribution of each task to resemble the global feature distribution. More specifically, the mean value of each of the 14 features should be approximately equal across conditions. MILP FormulationThis problem can be seen as a variant of the previous balanced grouping problem in **Section 4.1** where the groups are now task conditions rather than runs. The main differences are that: 1) each group has its own unique constraints, 2) stimuli are allowed to appear in more than 1 group, and 3) multiple features are being balanced across groups.Use $i$ to index images, $t$ to index tasks, and $f$ to index stimulus features. The main variable of interest is the pairing of stimuli with tasks. Introduce binary variables $X$ to represent these pairings:> **VARIABLES**$$X \in \mathbb{B}^{700 \times 14} \\$$$$\begin{aligned}x_{i,t} &= 1 \rightarrow \text{ image } i \text{ is paired with task } t \\ &= 0 \rightarrow \text{ image } i \text{ is not paired with task } t\end{aligned}$$It is simple to constrain each task to have the same number of trials (2800 / 14 = 200)> **CONSTRAINTS**$$\sum_i x_{i,t} = 200 \quad \quad \quad \forall \ t \in \{ 1, ..., 14 \}$$It is also simple to require that all images appear an equal number of times throughout the experiment. For 2800 trials and 700 images, each image should be used 2800 / 700 = 4 times.> **CONSTRAINTS**$$\sum_t x_{i,t} = 4 \quad \quad \quad \forall \ i \in \{ 1, ..., 700 \}$$For each task, there should be an equal number of trials where the search target is present, ambiguous, or absent. Thus each of these three feature levels should have 200 × ⅓ = 66.67 trials. Since this is not an integer, constraints can be constructed using the integral floor and ceiling of this number. Let $S_{t,v}$ be the set of stimulus indices that have feature value $v$ for task $t$.> **CONSTRAINTS**$$\begin{aligned}\sum_{i \in S_{t,v}} x_{i,t} \ge 66 \quad \quad \quad & \forall \ t \in \{1, ..., 14\} \text{ and } v \in \{ 1, 2, 3 \} \\\sum_{i \in S_{t,v}} x_{i,t} \le 67 \quad \quad \quad & \forall \ t \in \{1, ..., 14\} \text{ and } v \in \{ 1, 2, 3 \}\end{aligned}$$The final part of the program is a cost function that promotes a similar stimulus feature distribution within each task. Use a matrix $L$ to refer to the feature values of each stimulus, where $L_{i,t}$ is the value of feature $t$ for image $i$. Also, let $μ_f$ be the global mean value of feature $f$ across all images. $L$ and $μ_f$ are constants predetermined by the image dataset. Use a cost function that minimizes the deviations between the global feature means $μ_f$ and the feature means of each task $m_{f,t}$.> **COST FUNCTION**$$\begin{aligned}m_{f,t} &= \frac{1}{200} \sum_i x_{i,t}\ L_{i,f} \\ &= \text{mean value of feature } f \text{ across trials of task } t\end{aligned}$$$$\\$$$$\min \sum_{f,t \atop {f \neq t}} | m_{f,t} - \mu_f |$$ ###Code def create_stimulus_condition_pairing_problem( n_trials, L, feature_probabilities, ): """create program to solve stimulus condition pairing problem ## Parameters - n_trials: int number of items - L: (n_features, n_images)-shaped array of stimulus tags - feature_probabilities: dict of probably for each detection level """ n_features, n_stimuli = L.shape n_tasks = n_features program = milp.program.initialize_program() # decision variables for i in range(n_stimuli): for g in range(n_tasks): milp.program.add_variable( program, 'X_{i},{g}'.format(i=i, g=g), variable_type=bool, ) trials_per_condition = int(n_trials / n_tasks) assert trials_per_condition * n_tasks == n_trials # constraint: match feature probabilities for g in range(n_tasks): for feature_value, feature_probability in feature_probabilities.items(): feature_count = feature_probability * trials_per_condition variables_of_feature_value = {} for i in range(n_stimuli): if L[g, i] == feature_value: variables_of_feature_value['X_{i},{g}'.format(i=i, g=g)] = 1 mode = 'ceil_floor' if mode == 'exact': assert feature_count == int(feature_count) milp.program.add_constraint( program, A_eq=variables_of_feature_value, b_eq=feature_count, ) elif mode == 'ceil_floor': ceil = int(np.ceil(feature_count)) floor = int(np.floor(feature_count)) milp.program.add_constraint( program, A_lt=variables_of_feature_value, b_lt=ceil, ) milp.program.add_constraint( program, A_lt={k: -v for k, v in variables_of_feature_value.items()}, b_lt=-floor, ) else: raise Exception(mode) # hard constrain equal usages per stimulus target_stimulus_repeats = (n_trials / float(n_stimuli)) milp.program.store_constant( program=program, name='target_stimulus_repeats', value=target_stimulus_repeats, ) for i in range(n_stimuli): coefficients = {} for g in range(n_tasks): coefficients['X_{i},{g}'.format(i=i, g=g)] = 1 milp.program.add_constraint( program, A_eq=coefficients, b_eq=target_stimulus_repeats, ) # for each task, balance the feature distributions to the population mean for g in range(n_tasks): for f in range(n_features): if g == f: continue coefficients = {} for i in range(n_stimuli): coefficients['X_{i},{g}'.format(i=i, g=g)] = L[f, i] milp.program.add_abs_cost_term( program, coefficients=coefficients, constant=-trials_per_condition * L[f, :].mean(), ) return program np.random.seed(0) n_tasks = 14 n_stimuli = 700 n_trials = 2800 F_feature_values = [0, 1, 2] F_feature_probabilities = [0.5, 0.25, 0.25] L = np.random.choice( F_feature_values, p=np.array(F_feature_probabilities), size=(n_tasks, n_stimuli), ) n_feature_values = len(F_feature_values) feature_probabilities = { F_feature_values[i]: 1.0 / n_feature_values for i in range(n_feature_values) } program = create_stimulus_condition_pairing_problem( n_trials=n_trials, L=L, feature_probabilities=feature_probabilities, ) ###Output _____no_output_____ ###Markdown solve program ###Code solution = milp.program.solve_MILP(program) X_solution = milp.program.get_solution_variable(solution, 'X') print() print('X_solution', X_solution.shape) ###Output program size: - n_variables: 9982 - n_constraints: 1148 - n_cost_function_terms: 182 X_solution (700, 14) ###Markdown visualize result ###Code print('MILP solution') stimulus_task_pairing.plot_as_separate_figures( X_solution, L, figsize=[6, 6], show_title=True, show_labels=True, ) ###Output MILP solution ###Markdown Example Problem 3: Structured Hierarchical SamplingAnother common neuroimaging design challenge is sampling stimuli from a highly structured space, such as the space of natural language. Such spaces are difficult to sample because samples must obey strict rules rather than being drawn from a simple probability distribution. Natural language stimuli cannot be generated by simply combining random words. For language to be intelligible, words must be jointly compatible in a meaningful way, obeying rules of grammar, syntax, and semantics. To sample from this type of space, one must be able to efficiently represent and navigate the rules of the space.In this example, suppose a researcher would like to generate natural language stimuli in the form of questions about concrete nouns. These questions will be used in an experiment that asks a large number of questions about a large number of concrete nouns, in a manner similar to (Sudre et al. 2012). In this previous study, the authors asked 20 questions about each of 60 nouns in approximately 1 hour of scanning time. In this example we will use a sparse sampling strategy to increase the scope of this experiment to 5x the number of nouns and 6x the number of questions while only using 2x the trials. We will also allow the researcher to specify rules about which nouns are semantically compatible with each question.Questions are formed by pairing a single concrete noun (e.g. a car, a phone, a flower) with a question template (e.g. How heavy is ? When was invented? What color is ?). Concrete nouns are organized into a semantic hierarchy (**Figure 4B**). Unlike the (Sudre et al. 2012) study, each question template is only applicable to nouns from a particular portion of this hierarchy (**Figure 4C**). For example, questions related to object affordances might only be compatible with nouns from the “Inanimate Object” portion of the tree. Questions related to social behavior might only be compatible with the “People” portion of the tree. The researcher would like to independently model the effects of questions and concrete nouns. To this end they will place constraints on how often each is sampled, and the manner in which they are allowed to combine. Design SpecificationsIn this example the researcher has allotted scanning time for 2400 trials. There are a total of 300 concrete nouns organized into a 15-group semantic hierarchy shown in **Figure 4B**. Each concrete noun exists in exactly one of the 10 leaf groups, and each leaf group contains exactly 30 concrete nouns. For each of the 15 example groups shown, the researcher has 8 question templates, for a total of 120 question templates. Each question template is applicable to some subset of the semantic hierarchy. Question templates related to a non-leaf group are compatible with any nouns for which that non-leaf group is an ancestor.The main challenge that the researcher faces is deciding which concrete nouns to pair with each question template. Each noun should be paired with each question template no more than 1 time, and all pairings should respect the compatibility constraints of the semantic hierarchy. Each of the 120 question templates should be used the same number of times across the 2400 trials. Because each noun is compatible with a different number of question templates, it is not possible to exactly balance the number of times each concrete noun is used. However, noun usage should still be balanced as much as possible. Finally, for question templates that are compatible with multiple leaf groups in the semantic hierarchy, the researcher would like to balance the number of times each template is paired with each compatible leaf group. MILP FormulationIndex question templates with $t$, concrete nouns with $n$, and noun leaf groups with $g$. Store information about template-group compatibility in a matrix $C$>$$C \in \mathbb{B} ^ {120 \times 10} \\$$$$\begin{aligned}c_{t, g} &= 1 \rightarrow \text{ question template } t \text{ compatible with group } g \\&= 0 \rightarrow \text{ question template } t \text{ not compatible with group } g\end{aligned}$$Store information about noun-group membership in a matrix M> $$M \in \mathbb{B} ^ {10 \times 300} \\$$$$\begin{aligned}m_{g, n} &= 1 \rightarrow \text{ noun } n \text{ is in group } g \\ &= 0 \rightarrow \text{ noun } n \text{ is not in group } g\end{aligned}$$Matrices $C$ and $M$ are constants that are predetermined by the given semantic tree.The main variables to be decided are pairings between question templates and concrete nouns. Introduce variables $P$ to represent these pairings> **VARIABLES** $$P \in \mathbb{B} ^ {120 \times 300} $$$$\begin{aligned}p_{t,n} &= 1 \rightarrow \text{ question } t \text{ is paired with noun } n \\ &= 0 \rightarrow \text{ question } t \text{ is not paired with noun } n\end{aligned}$$Pairs $(t, n)$ for which $(CM)_{t,n} = 0$ are invalid pairings. The corresponding $p_{t,n}$ variables can be set to $0$ to reduce the size of the program. Each question template should be used 2400 / 120 = 20 times> **CONSTRAINT**$$\sum_n p_{t,n} = 20 \quad \forall \ t \in \{1, ..., 20 \}$$The next step is to balance the number of times that each question template is paired with each of its compatible noun groups. The number of times that template $t$ is paired with group $g$ is given by an element of the matrix product $(PMT)_{t,g}$. Let $v_t$ be the number of noun groups that are compatible with template $t$. Since each question template is to be used 20 times, each question template $t$ should be used $(20 / v_t)$ times with each of its compatible noun groups. Since this quotient is not necessarily integral, $(PMT)_{t,g}$ can be constrained to fall within its integral floor and ceiling.> **CONSTRAINT**$$v_t = \sum_g c_{t,g} \\ \\$$$$\forall t \in \{ 1, ..., 120 \} \text{ and } g \in \{ 1, ..., 20 \} \\\begin{aligned}\quad \quad \quad \quad \quad \quad (PM^T)_{t,g} &\ge floor \left ( \frac{20}{v_t} \right ) \\\text{ and } \\\quad \quad \quad \quad \quad \quad (PM^T)_{t,g} &\leq ceil \left ( \frac{20}{v_t} \right )\end{aligned}$$Finally, a cost function can be created to balance the number of times each concrete noun is used. There are many ways this can be achieved. The number of times each noun is used is given by an. Suppose the researcher would like to avoid outliers, and thus wishes to minimize the range of noun occurrence counts. As explained in the **Section 3**, this range can be minimized as:> **COST FUNCTION**$$\begin{aligned}a_n &= \sum_t p_{t,n} \\ &= \text{number of times noun } n \text{ is used}\end{aligned}$$$$ \\ $$$$ \\ \\ \min \left \{ \max_n(a_n) - \min_n(a_n) \right \}$$ ###Code def create_structured_hierarchical_sampling_problem( noun_semantic_tree, n_total_trials, n_question_templates, n_nouns_per_leaf_group, ): """create program to solve structured hierarchical sampling problem ## Parameters - noun_semantic_tree: dict of nested noun semantic tree - n_total_trials: int number of total trials - n_question_templates: int number of question templates - n_nouns_per_leaf_group: int number of nouns per leaf group """ n_appearances_per_question_template = int( n_total_trials / n_question_templates ) C, M = structured_sampling.get_coding_matrices( noun_semantic_tree=noun_semantic_tree, n_total_trials=n_total_trials, n_question_templates=n_question_templates, n_nouns_per_leaf_group=n_nouns_per_leaf_group, ) CM = C.dot(M) T, G = C.shape G, N = M.shape program = milp.program.initialize_program() # decision variables for t in range(T): for n in range(N): if CM[t, n]: milp.program.add_variable( program, 'P_{t},{n}'.format(t=t, n=n), variable_type=bool, ) # number of times each question template is used for t in range(T): A_eq = {} for variable in milp.program.get_variables(program, 'P', t, None): A_eq[variable] = 1 milp.program.add_constraint( program, A_eq=A_eq, b_eq=n_appearances_per_question_template, ) # minimize max(noun_usage) - min(noun_usage) milp.program.add_variable(program, 'maximum_noun_usage', int) milp.program.add_variable(program, 'minimum_noun_usage', int) for n in range(N): coefficients = {'maximum_noun_usage': -1} coefficients_negative = {'minimum_noun_usage': 1} for variable in milp.program.get_variables(program, 'P', None, n): coefficients[variable] = 1 coefficients_negative[variable] = -1 milp.program.add_constraint(program, A_lt=coefficients, b_lt=0) milp.program.add_constraint(program, A_lt=coefficients_negative, b_lt=0) milp.program.add_cost_terms( program=program, coefficients={'maximum_noun_usage': 1}, ) milp.program.add_cost_terms( program=program, coefficients={'minimum_noun_usage': -1}, ) # balance number of times each group is paired with each question for t in range(T): n_template_groups = C[t, :].sum() target_pairings_per_group = ( n_appearances_per_question_template / float(n_template_groups) ) for g in range(G): if C[t, g]: coefficients = {} for n in range(N): if M[g, n]: coefficients['P_{t},{n}'.format(t=t, n=n)] = 1 if np.isclose( target_pairings_per_group, int(target_pairings_per_group), ): milp.program.add_constraint( program, A_eq=coefficients, b_eq=target_pairings_per_group, ) else: ceil = np.ceil(target_pairings_per_group) floor = np.floor(target_pairings_per_group) milp.program.add_constraint( program, A_lt=coefficients, b_lt=ceil, ) milp.program.add_constraint( program, A_lt={k: -v for k, v in coefficients.items()}, b_lt=-floor, ) return program ###Output _____no_output_____ ###Markdown define parameters ###Code noun_semantic_tree = { 'Entities': { 'People': { 'Famous People': {}, 'People that the Subject Knows': {}, }, 'Animals': {}, 'Corporations': {}, }, 'Inanimate Objects': { 'Vehicles': {}, 'Handheld Tools': {}, 'Clothing Items': {}, }, 'Places': { 'Countries': {}, 'Buildings': { 'Famous Buildings': {}, 'Generic Buildings': {}, }, }, } n_total_trials = 2400 n_question_templates = 120 n_nouns_per_leaf_group = 30 C, M = structured_sampling.get_coding_matrices( noun_semantic_tree=noun_semantic_tree, n_total_trials=n_total_trials, n_question_templates=n_question_templates, n_nouns_per_leaf_group=n_nouns_per_leaf_group, ) program = create_structured_hierarchical_sampling_problem( noun_semantic_tree=noun_semantic_tree, n_total_trials=n_total_trials, n_question_templates=n_question_templates, n_nouns_per_leaf_group=n_nouns_per_leaf_group, ) ###Output _____no_output_____ ###Markdown solve program ###Code solution = milp.program.solve_MILP(program) P_solution = milp.program.get_solution_variable(solution, 'P') print() print('P_solution', P_solution.shape) CM = C.dot(M) P = milp.program.get_solution_variable(solution=solution, name='P') PMT = P.astype(int).dot(M.T) ###Output _____no_output_____ ###Markdown inspect solution ###Code structured_sampling.plot_solution_summary_as_separate_figures( C, M, P, figsize=[6, 6], show_title=True, show_labels=True, ) ###Output Figure 5A: ###Markdown Example Problem 4: Sequence Design For NavigationOur final case study will demonstrate how MILP can address challenges associated with designing navigation experiments. Navigation is a rapidly advancing field of neuroscience featuring studies that are increasingly rich and naturalistic (Spiers and Maguire 2006; Suthana et al. 2011; Nielson et al. 2015). As these studies grow more complex, so do the design constraints that must be integrated into experimental designs. Here we show how MILP is a natural fit for representing and optimizing the structure of such experiments.Suppose a researcher is designing a neuroimaging experiment where subjects must navigate a complex, naturalistic environment. Subjects will perform a “taxi driver” task where they are successively cued to drive to various locations on a map. Each time they reach a destination, a new destination cue will appear. Subjects will perform many of these trials throughout the course of the experiment.An important aspect of this design is the sequence of cued destinations. This sequence will determine the particular distribution of navigational phenomena that the subject encounters throughout the experiment (Hartley et al. 2003; Xu et al. 2010). In the simplest case, this sequence could be generated randomly. However, this misses an opportunity to control the conditions measured by the experiment. Optimal selection of this sequence might require special consideration of the particular map being used and the hypotheses being tested. Design SpecificationsFor this example, suppose a map has 25 possible destinations. The researcher would like to collect 80 trials per subject over the course of 40 minutes, resulting in a mean trial time of 30 seconds. To prevent memory effects related to the lengths of trials, the researcher would like path lengths of each trial to approximate an exponential distribution. To prevent memory effects related to repeatedly visiting locations, the researcher would like the number of times each location is visited to assume a geometric distribution. MILP formulationWe will formulate this as a graph traversal problem where each location is a node and each route between locations is edge. This is similar to the classic traveling salesman problem, where the goal is to find a sequence that both visits every node once and minimizes total distance traveled. However, the goal here is instead to find a sequence whose edge length distribution maximally conforms to the target exponential distribution. Another difference is we would like to allow each destination to be visited more than once.We will use notation $N_i$ to refer to node $i$, and $E_{i,j}$ to refer to the edge that connects $N_i$ to $N_j$. For simplicity, we will first formulate the problem where each node is visited at most once. Also for simplicity, we will randomly choose nodes $N_I$ and $N_F$ to be the initial and final nodes in the sequence. Introduce a binary variable to track the edges are used in the sequence> **VARIABLES**$$X \in \mathbb{B}^{25 \times 25} \\$$$$\begin{aligned}X_{i,j} &= 0 \text{ if edge } E_{i,j} \text{ is not used} \\ &= 1 \text{ if edge } E_{i,j} \text{ is used}\end{aligned}$$Assume $X_{i,j} = 0$ for all $i$. We can constrain the number of edges in the sequence to equal the number of trials> **CONSTRAINTS**$$\sum_{i,j} X_{i,j} = \text{number of trials}$$The number of times the subject enters and leaves each destination are given by the sums> $$\sum_i X_{i,j} = \text{ number of times entering } N_j$$$$\sum_j X_{i,j} = \text{ number of times entering } N_i$$For a well formed sequence, $N_I$ should be left once, $N_F$ should be entered once> **CONSTRAINTS**$$\begin{aligned}\sum_i X_{i, F} &= 1 \\\sum_j X_{F, j} &= 0 \\\sum_j X_{I, j} &= 1 \\\sum_i X_{i, I} &= 0 \\\end{aligned}$$Other nodes should be entered and left an equal number of times> **CONSTRAINTS**$$\sum_i X_{i,h} = \sum_j X_{h,j} \quad \quad \quad \quad \forall \ h \notin \{ I, F \}$$To constrain trial length so that it is distributed exponentially, we will discretize exponential distribution into a 10 bin histogram, as shown in **Figure 6C**. Each bin represents a specific range of trial lengths $R_b$ and has a target number of trials $T_b$ that should fall in that range.Let $L_{i,j}$ be the length of edge $E_{i,j}$. Let Ab be the number of trials that fall within the range of bin $b$. Our cost function is then the deviation between the actual and target number of trials within each bin> **COST FUNCTION**$$B_b = \left \{ (i,j) | L_{i,j} \in R_b \right \} \\$$$$A_b = \sum_{ (i,j) \in B_b} X_{i,j}$$$$\min \sum_b | T_b - A_b |$$These constraints produce a sequence that is formed from NI to NF. However, they also allow for the inclusion of “subtours”, which are additional unconnected cyclic paths that exist alongside the main sequence. Since we want a single, acyclic sequence, we will utilize a common technique called “subtour elimination”. Eliminating all subtours outright would require an intractably large number of constraints. Much more efficient is to iteratively solve a series of MILP programs, and successively add constraints until a solution free of subtours is found. Details of subtour elimination and iterative solving can be found in (Laporte and Nobert 1983). For each subtour detected in the intermediate solutions, we will add a constraint> **CONSTRAINT**$$S = \text{ the set of edges in subtour} \\$$$$\sum_{(i,j) \in S , i \neq j} X_{i, j} \leq |S| - 1$$Finally, to allow each node to be visited multiple times, we will simply stack multiple copies of the original graph. Each node will be connected to all copies of all nodes other than itself. Any route through this augmented graph can be transformed into a route on the original graph by simply combining all copies of each node into a single node. ###Code np.random.seed(0) n_nodes = 25 x = np.random.rand(n_nodes) y = np.random.rand(n_nodes) distances = ((x - x[:, np.newaxis]) ** 2 + (y - y[:, np.newaxis]) ** 2) ** 0.5 t_per_distance = 2.0 t_mean_trial = 0.5 t_per_run = 10.0 n_runs = 4 n_trials = (n_runs * t_per_run) / float(t_mean_trial) # n_trials = 100 mean_repeats_per_node = n_trials / float(n_nodes) mean_trial_distance = t_mean_trial / t_per_distance print('n_trials:', n_trials) print('mean_repeats_per_node:', mean_repeats_per_node) print('mean_trial_distance:', mean_trial_distance) trajectory_design.plot_distance_distribution(distances) ###Output n_trials: 80.0 mean_repeats_per_node: 3.2 mean_trial_distance: 0.25 Figure 6D: ###Markdown repeats per node ###Code print('compute repeats per node') print() n_counts = 30 geometric_parameter = 1 / mean_repeats_per_node n_repeats = np.arange(n_counts) n_repeats_pmf = scipy.stats.geom.pmf(n_repeats, geometric_parameter) repeat_counts = trajectory_design.compute_repeats(n_counts, n_nodes, n_trials) visits_per_location = trajectory_design.compute_visits_per_location( repeat_counts ) print('mean', (n_repeats_pmf * n_repeats).sum()) print() print('count sum:', repeat_counts.sum()) print( 'weighted count sum:', (repeat_counts * np.arange(repeat_counts.shape[0])).sum(), ) trajectory_design.plot_node_repeats_pmf(n_repeats, n_repeats_pmf) plt.show() trajectory_design.plot_node_repeats_denormalized( n_repeats, n_repeats_pmf, n_nodes, ) plt.show() trajectory_design.plot_target_vs_actual_repeats_per_node( repeat_counts, n_repeats_pmf, n_nodes, ) plt.show() trajectory_design.plot_locations( x, y, colors=visits_per_location, title=visits_per_location, cmap='nipy_spectral', ) plt.show() ###Output compute repeats per node mean 3.199385156983983 count sum: 25 weighted count sum: 80 ###Markdown path length distribution ###Code n_bins = 10 distance_pdf = trajectory_design.compute_target_path_length_pdf( distances, mean_trial_distance, ) distance_pmf = trajectory_design.compute_target_path_length_pmf( n_bins, distances, distance_pdf, ) discrete_path_distribution = trajectory_design.compute_target_path_length_discrete( distance_pmf, n_trials, ) print('target mean:', mean_trial_distance) print('mean:', distance_pdf['scipy_distribution'].mean()) trajectory_design.plot_distance_pdf(distance_pdf) plt.show() trajectory_design.plot_distance_pmf(distance_pmf) plt.show() trajectory_design.plot_discrete_path_distribution(discrete_path_distribution) plt.show() ###Output program size: - n_variables: 20 - n_constraints: 21 - n_cost_function_terms: 10 target mean: 0.25 mean: 0.25 ###Markdown compute path ###Code trajectory = trajectory_design.compute_trajectory( visits_per_location=visits_per_location, distances=distances, target_path_length_distribution=discrete_path_distribution, ) ###Output program size: - n_variables: 6170 - n_constraints: 182 - n_cost_function_terms: 10 eliminating subtours iteration 0 (sizes = [11, 41, 3, 17, 9, 3, 3]) eliminating subtours iteration 1 (sizes = [18, 60, 3, 3]) eliminating subtours iteration 2 (sizes = [76, 4, 3]) eliminating subtours iteration 3 (sizes = [54, 12, 4, 3, 3, 3, 3, 3, 4]) eliminating subtours iteration 4 (sizes = [71, 7, 5]) eliminating subtours iteration 5 (sizes = [22, 9, 37, 10, 3, 3, 3]) eliminating subtours iteration 6 (sizes = [26, 29, 5, 3, 3, 17, 4]) eliminating subtours iteration 7 (sizes = [48, 12, 21, 3]) eliminating subtours iteration 8 (sizes = [52, 23, 3, 3, 4]) eliminating subtours iteration 9 (sizes = [34, 41, 4, 3, 3]) eliminating subtours iteration 10 (sizes = [45, 5, 25, 3, 5, 3]) eliminating subtours iteration 11 (sizes = [17, 40, 9, 4, 13, 3]) eliminating subtours iteration 12 (sizes = [13, 52, 8, 3, 7, 3]) eliminating subtours iteration 13 (sizes = [42, 4, 14, 18, 7]) eliminating subtours iteration 14 (sizes = [53, 22, 4, 3, 3]) eliminating subtours iteration 15 (sizes = [38, 15, 14, 4, 12, 3]) eliminating subtours iteration 16 (sizes = [34, 24, 23, 3]) eliminating subtours iteration 17 (sizes = [11, 44, 22, 5, 3]) eliminating subtours iteration 18 (sizes = [3, 27, 10, 27, 11, 3, 6]) eliminating subtours iteration 19 (sizes = [43, 14, 14, 13]) eliminating subtours iteration 20 (sizes = [25, 53, 5]) eliminating subtours iteration 21 (sizes = [71, 3, 4, 3, 4]) eliminating subtours iteration 22 (sizes = [75, 5, 3]) eliminating subtours iteration 23 (sizes = [41, 41]) eliminating subtours iteration 24 (sizes = [20, 3, 3, 56, 3]) eliminating subtours iteration 25 (sizes = [15, 42, 26]) eliminating subtours iteration 26 (sizes = [67, 5, 6, 6]) eliminating subtours iteration 27 (sizes = [70, 7, 4, 3]) eliminating subtours iteration 28 (sizes = [74, 3, 6]) eliminating subtours iteration 29 (sizes = [10, 46, 7, 16, 3, 4]) eliminating subtours iteration 30 (sizes = [19, 21, 3, 27, 3, 5, 3, 3, 5]) eliminating subtours iteration 31 (sizes = [44, 21, 5, 3, 4, 9]) eliminating subtours iteration 32 (sizes = [25, 26, 11, 16, 7]) eliminating subtours iteration 33 (sizes = [46, 12, 20, 4, 3]) eliminating subtours iteration 34 (sizes = [52, 24, 3, 5]) eliminating subtours iteration 35 (sizes = [18, 37, 20, 7, 3]) eliminating subtours iteration 36 (sizes = [7, 11, 8, 27, 21, 3, 8, 3]) eliminating subtours iteration 37 (sizes = [66, 7, 4, 7]) eliminating subtours iteration 38 (sizes = [12, 16, 24, 15, 16, 3]) eliminating subtours iteration 39 (sizes = [51, 4, 4, 7, 10, 8, 3]) eliminating subtours iteration 40 (sizes = [22, 30, 15, 11, 3, 3, 3]) eliminating subtours iteration 41 (sizes = [58, 22, 3]) eliminating subtours iteration 42 (sizes = [53, 19, 3, 7, 3]) eliminating subtours iteration 43 (sizes = [40, 28, 9, 7]) eliminating subtours iteration 44 (sizes = [48, 5, 3, 14, 9, 5, 3]) eliminating subtours iteration 45 (sizes = [66, 5, 4, 3, 3, 3, 3]) eliminating subtours iteration 46 (sizes = [46, 12, 3, 6, 9, 3, 4, 5]) eliminating subtours iteration 47 (sizes = [63, 3, 4, 5, 3, 4, 5]) eliminating subtours iteration 48 (sizes = [50, 11, 18, 3, 3]) eliminating subtours iteration 49 (sizes = [54, 6, 21, 3]) eliminating subtours iteration 50 (sizes = [10, 62, 3, 6, 4]) eliminating subtours iteration 51 (sizes = [24, 3, 44, 5, 9]) eliminating subtours iteration 52 (sizes = [74, 4, 3, 3]) eliminating subtours iteration 53 (sizes = [36, 16, 5, 3, 6, 3, 12, 5, 3]) eliminating subtours iteration 54 (sizes = [79, 3]) eliminating subtours iteration 55 (sizes = [73, 3, 3, 5]) eliminating subtours iteration 56 (sizes = [41, 32, 6, 5]) eliminating subtours iteration 57 (sizes = [56, 7, 3, 9, 3, 8]) eliminating subtours iteration 58 (sizes = [28, 34, 4, 6, 11, 3]) eliminating subtours iteration 59 (sizes = [71, 7, 3, 3]) eliminating subtours iteration 60 (sizes = [23, 25, 17, 16, 4]) eliminating subtours iteration 61 (sizes = [76, 3, 4]) eliminating subtours iteration 62 (sizes = [15, 44, 3, 12, 3, 6, 4]) eliminating subtours iteration 63 (sizes = [25, 8, 3, 24, 15, 4, 6, 3]) eliminating subtours iteration 64 (sizes = [7, 62, 8, 5, 3]) eliminating subtours iteration 65 (sizes = [28, 42, 4, 8, 3]) eliminating subtours iteration 66 (sizes = [59, 3, 17, 3, 3]) eliminating subtours iteration 67 (sizes = [34, 7, 7, 13, 5, 10, 4, 5, 4]) eliminating subtours iteration 68 (sizes = [48, 31, 4]) eliminating subtours iteration 69 (sizes = [3, 23, 41, 12, 3, 4]) eliminating subtours iteration 70 (sizes = [73, 3, 5, 3]) eliminating subtours iteration 71 (sizes = [56, 4, 6, 9, 4, 3, 5]) eliminating subtours iteration 72 (sizes = [43, 8, 19, 7, 5, 4]) eliminating subtours iteration 73 (sizes = [8, 38, 23, 15]) eliminating subtours iteration 74 (sizes = [64, 12, 3, 5]) eliminating subtours iteration 75 (sizes = [67, 7, 5, 3, 3]) eliminating subtours iteration 76 (sizes = [73, 5, 5]) eliminating subtours iteration 77 (sizes = [5, 14, 47, 11, 4, 5]) eliminating subtours iteration 78 (sizes = [15, 49, 17, 3]) eliminating subtours iteration 79 (sizes = [24, 26, 5, 23, 3, 5]) eliminating subtours iteration 80 (sizes = [72, 8, 3]) eliminating subtours iteration 81 (sizes = [19, 52, 3, 4, 3, 3, 3]) eliminating subtours iteration 82 (sizes = [3, 70, 8, 3]) eliminating subtours iteration 83 (sizes = [46, 12, 9, 3, 5, 4, 8]) eliminating subtours iteration 84 (sizes = [14, 40, 18, 8, 3, 3]) eliminating subtours iteration 85 (sizes = [14, 11, 15, 20, 14, 3, 6, 3, 3]) eliminating subtours iteration 86 (sizes = [68, 7, 5, 4]) eliminating subtours iteration 87 (sizes = [32, 14, 35, 3]) eliminating subtours iteration 88 (sizes = [68, 4, 4, 8]) eliminating subtours iteration 89 (sizes = [37, 36, 3, 3, 6]) eliminating subtours iteration 90 (sizes = [44, 10, 7, 10, 8, 5, 3]) eliminating subtours iteration 91 (sizes = [50, 4, 13, 4, 10, 5]) eliminating subtours iteration 92 (sizes = [26, 38, 3, 3, 4, 3, 3, 3, 6]) eliminating subtours iteration 93 (sizes = [56, 6, 15, 7]) eliminating subtours iteration 94 (sizes = [34, 31, 4, 6, 3, 8]) eliminating subtours iteration 95 (sizes = [70, 8, 3, 3]) eliminating subtours iteration 96 (sizes = [63, 15, 5]) eliminating subtours iteration 97 (sizes = [73, 5, 5]) eliminating subtours iteration 98 (sizes = [34, 11, 23, 16]) eliminating subtours iteration 99 (sizes = [17, 26, 26, 5, 3, 7, 3]) eliminating subtours iteration 100 (sizes = [17, 30, 13, 16, 9]) eliminating subtours iteration 101 (sizes = [33, 34, 6, 7, 3, 3]) eliminating subtours iteration 102 (sizes = [72, 6, 5]) final trajectory: [19, 10, 17, 8, 10, 2, 18, 23, 7, 10, 12, 11, 3, 4, 3, 11, 3, 14, 2, 23, 1, 21, 4, 3, 2, 14, 15, 24, 15, 16, 14, 11, 3, 12, 19, 10, 19, 10, 7, 19, 10, 19, 10, 23, 15, 24, 15, 22, 3, 23, 1, 10, 19, 7, 19, 10, 2, 22, 9, 13, 20, 2, 13, 2, 6, 11, 12, 3, 5, 23, 1, 18, 1, 18, 16, 15, 16, 14, 2, 0, 2] ###Markdown compute random paths ###Code n_random = 10000000 # n_random = 100000 random_paths = trajectory_design.compute_random_paths( n_random=n_random, n_trials=n_trials, distances=distances, visits_per_location=visits_per_location, **discrete_path_distribution ) for key, value in random_paths.items(): print(key + ':', value) ###Output best_path: [19 16 15 14 19 15 15 7 14 2 5 1 21 13 19 8 23 24 13 19 7 14 1 18 1 2 2 3 23 7 14 0 23 3 10 16 23 2 9 10 12 11 3 12 22 1 18 15 24 10 3 3 11 2 4 23 10 2 20 10 15 16 19 17 19 10 19 10 10 2 2 6 11 10 3 22 11 4 18 12 3] best_counts: [21 7 14 8 5 3 7 11 3 1] best_error: 36 n_random: 10000000 ###Markdown Plot Results ###Code figsize = [6, 6] n_shown_repeats = 15 trajectory_design.plot_summary( distances=distances, n_repeats=n_repeats, n_repeats_pmf=n_repeats_pmf, n_nodes=n_nodes, repeat_counts=repeat_counts, n_shown_repeats=n_shown_repeats, x=x, y=y, visits_per_location=visits_per_location, distance_pdf=distance_pdf, discrete_path_distribution=discrete_path_distribution, distance_pmf=distance_pmf, trajectory=trajectory, random_paths=random_paths, figsize=figsize, show_title=True, show_labels=True, show_legend=True, show_colorbar=True, ) ###Output Figure 6D:
doc/LectureNotes/_build/jupyter_execute/statistics.ipynb
###Markdown Elements of Probability Theory and Statistical Data Analysis Domains and probabilitiesConsider the following simple example, namely the tossing of two dice, resulting in the following possible values $$\{2,3,4,5,6,7,8,9,10,11,12\}.$$ These values are called the *domain*. To this domain we have the corresponding *probabilities* $$\{1/36,2/36/,3/36,4/36,5/36,6/36,5/36,4/36,3/36,2/36,1/36\}.$$ Tossing the diceThe numbers in the domain are the outcomes of the physical process of tossing say two dice.We cannot tell beforehand whether the outcome is 3 or 5 or any other number in this domain.This defines the randomness of the outcome, or unexpectedness or any other synonimous word whichencompasses the uncertitude of the final outcome. The only thing we can tell beforehandis that say the outcome 2 has a certain probability. If our favorite hobby is to spend an hour every evening throwing dice and registering the sequence of outcomes, we will note that the numbers in the above domain $$\{2,3,4,5,6,7,8,9,10,11,12\},$$ appear in a random order. After 11 throws the results may look like $$\{10,8,6,3,6,9,11,8,12,4,5\}.$$ Stochastic variables**Random variables are characterized by a domain which contains all possible values that the random value may take. This domain has a corresponding probability distribution function(PDF)**. Stochastic variables and the main concepts, the discrete caseThere are two main concepts associated with a stochastic variable. The*domain* is the set $\mathbb D = \{x\}$ of all accessible valuesthe variable can assume, so that $X \in \mathbb D$. An example of adiscrete domain is the set of six different numbers that we may get bythrowing of a dice, $x\in\{1,\,2,\,3,\,4,\,5,\,6\}$.The *probability distribution function (PDF)* is a function$p(x)$ on the domain which, in the discrete case, gives us theprobability or relative frequency with which these values of $X$occur $$p(x) = \mathrm{Prob}(X=x).$$ Stochastic variables and the main concepts, the continuous caseIn the continuous case, the PDF does not directly depict theactual probability. Instead we define the probability for thestochastic variable to assume any value on an infinitesimal intervalaround $x$ to be $p(x)dx$. The continuous function $p(x)$ then gives usthe *density* of the probability rather than the probabilityitself. The probability for a stochastic variable to assume any valueon a non-infinitesimal interval $[a,\,b]$ is then just the integral $$\mathrm{Prob}(a\leq X\leq b) = \int_a^b p(x)dx.$$ Qualitatively speaking, a stochastic variable represents the values ofnumbers chosen as if by chance from some specified PDF so that theselection of a large set of these numbers reproduces this PDF. The cumulative probabilityOf interest to us is the *cumulative probabilitydistribution function* (**CDF**), $P(x)$, which is just the probabilityfor a stochastic variable $X$ to assume any value less than $x$ $$P(x)=\mathrm{Prob(}X\leq x\mathrm{)} =\int_{-\infty}^x p(x^{\prime})dx^{\prime}.$$ The relation between a CDF and its corresponding PDF is then $$p(x) = \frac{d}{dx}P(x).$$ Properties of PDFsThere are two properties that all PDFs must satisfy. The first one ispositivity (assuming that the PDF is normalized) $$0 \leq p(x) \leq 1.$$ Naturally, it would be nonsensical for any of the values of the domainto occur with a probability greater than $1$ or less than $0$. Also,the PDF must be normalized. That is, all the probabilities must add upto unity. The probability of "anything" to happen is always unity. Forboth discrete and continuous PDFs, this condition is $$\begin{align*}\sum_{x_i\in\mathbb D} p(x_i) & = 1,\\\int_{x\in\mathbb D} p(x)\,dx & = 1.\end{align*}$$ Important distributions, the uniform distributionThe first oneis the most basic PDF; namely the uniform distribution $$\begin{equation}p(x) = \frac{1}{b-a}\theta(x-a)\theta(b-x).\label{eq:unifromPDF} \tag{1}\end{equation}$$ For $a=0$ and $b=1$ we have $$\begin{array}{ll}p(x)dx = dx & \in [0,1].\end{array}$$ The latter distribution is used to generate random numbers. For other PDFs, one needs normally a mapping from this distribution to say for example the exponential distribution. Gaussian distributionThe second one is the Gaussian Distribution $$p(x) = \frac{1}{\sigma\sqrt{2\pi}} \exp{(-\frac{(x-\mu)^2}{2\sigma^2})},$$ with mean value $\mu$ and standard deviation $\sigma$. If $\mu=0$ and $\sigma=1$, it is normally called the **standard normal distribution** $$p(x) = \frac{1}{\sqrt{2\pi}} \exp{(-\frac{x^2}{2})},$$ The following simple Python code plots the above distribution for different values of $\mu$ and $\sigma$. ###Code %matplotlib inline import numpy as np from math import acos, exp, sqrt from matplotlib import pyplot as plt from matplotlib import rc, rcParams import matplotlib.units as units import matplotlib.ticker as ticker rc('text',usetex=True) rc('font',**{'family':'serif','serif':['Gaussian distribution']}) font = {'family' : 'serif', 'color' : 'darkred', 'weight' : 'normal', 'size' : 16, } pi = acos(-1.0) mu0 = 0.0 sigma0 = 1.0 mu1= 1.0 sigma1 = 2.0 mu2 = 2.0 sigma2 = 4.0 x = np.linspace(-20.0, 20.0) v0 = np.exp(-(x*x-2*x*mu0+mu0*mu0)/(2*sigma0*sigma0))/sqrt(2*pi*sigma0*sigma0) v1 = np.exp(-(x*x-2*x*mu1+mu1*mu1)/(2*sigma1*sigma1))/sqrt(2*pi*sigma1*sigma1) v2 = np.exp(-(x*x-2*x*mu2+mu2*mu2)/(2*sigma2*sigma2))/sqrt(2*pi*sigma2*sigma2) plt.plot(x, v0, 'b-', x, v1, 'r-', x, v2, 'g-') plt.title(r'{\bf Gaussian distributions}', fontsize=20) plt.text(-19, 0.3, r'Parameters: $\mu = 0$, $\sigma = 1$', fontdict=font) plt.text(-19, 0.18, r'Parameters: $\mu = 1$, $\sigma = 2$', fontdict=font) plt.text(-19, 0.08, r'Parameters: $\mu = 2$, $\sigma = 4$', fontdict=font) plt.xlabel(r'$x$',fontsize=20) plt.ylabel(r'$p(x)$ [MeV]',fontsize=20) # Tweak spacing to prevent clipping of ylabel plt.subplots_adjust(left=0.15) plt.savefig('gaussian.pdf', format='pdf') plt.show() ###Output _____no_output_____ ###Markdown Exponential distributionAnother important distribution in science is the exponential distribution $$p(x) = \alpha\exp{-(\alpha x)}.$$ Expectation valuesLet $h(x)$ be an arbitrary continuous function on the domain of the stochasticvariable $X$ whose PDF is $p(x)$. We define the *expectation value*of $h$ with respect to $p$ as follows $$\begin{equation}\langle h \rangle_X \equiv \int\! h(x)p(x)\,dx\label{eq:expectation_value_of_h_wrt_p} \tag{2}\end{equation}$$ Whenever the PDF is known implicitly, like in this case, we will dropthe index $X$ for clarity. A particularly useful class of special expectation values are the*moments*. The $n$-th moment of the PDF $p$ is defined asfollows $$\langle x^n \rangle \equiv \int\! x^n p(x)\,dx$$ Stochastic variables and the main concepts, mean valuesThe zero-th moment $\langle 1\rangle$ is just the normalization condition of$p$. The first moment, $\langle x\rangle$, is called the *mean* of $p$and often denoted by the letter $\mu$ $$\langle x\rangle = \mu \equiv \int x p(x)dx,$$ for a continuous distribution and $$\langle x\rangle = \mu \equiv \sum_{i=1}^N x_i p(x_i),$$ for a discrete distribution. Qualitatively it represents the centroid or the average value of thePDF and is therefore simply called the expectation value of $p(x)$. Stochastic variables and the main concepts, central moments, the varianceA special version of the moments is the set of *central moments*, the n-th central moment defined as $$\langle (x-\langle x\rangle )^n\rangle \equiv \int\! (x-\langle x\rangle)^n p(x)\,dx$$ The zero-th and first central moments are both trivial, equal $1$ and$0$, respectively. But the second central moment, known as the*variance* of $p$, is of particular interest. For the stochasticvariable $X$, the variance is denoted as $\sigma^2_X$ or $\mathrm{Var}(X)$ $$\begin{align*}\sigma^2_X &=\mathrm{Var}(X) = \langle (x-\langle x\rangle)^2\rangle =\int (x-\langle x\rangle)^2 p(x)dx\\& = \int\left(x^2 - 2 x \langle x\rangle^{2} +\langle x\rangle^2\right)p(x)dx\\& = \langle x^2\rangle - 2 \langle x\rangle\langle x\rangle + \langle x\rangle^2\\& = \langle x^2 \rangle - \langle x\rangle^2\end{align*}$$ The square root of the variance, $\sigma =\sqrt{\langle (x-\langle x\rangle)^2\rangle}$ is called the **standard deviation** of $p$. It is the RMS (root-mean-square)value of the deviation of the PDF from its mean value, interpretedqualitatively as the "spread" of $p$ around its mean. Probability Distribution FunctionsThe following table collects properties of probability distribution functions.In our notation we reserve the label $p(x)$ for the probability of a certain event,while $P(x)$ is the cumulative probability. Discrete PDF Continuous PDF Domain $\left\{x_1, x_2, x_3, \dots, x_N\right\}$ $[a,b]$ Probability $p(x_i)$ $p(x)dx$ Cumulative $P_i=\sum_{l=1}^ip(x_l)$ $P(x)=\int_a^xp(t)dt$ Positivity $0 \le p(x_i) \le 1$ $p(x) \ge 0$ Positivity $0 \le P_i \le 1$ $0 \le P(x) \le 1$ Monotonic $P_i \ge P_j$ if $x_i \ge x_j$ $P(x_i) \ge P(x_j)$ if $x_i \ge x_j$ Normalization $P_N=1$ $P(b)=1$ Probability Distribution FunctionsWith a PDF we can compute expectation values of selected quantities such as $$\langle x^k\rangle=\sum_{i=1}^{N}x_i^kp(x_i),$$ if we have a discrete PDF or $$\langle x^k\rangle=\int_a^b x^kp(x)dx,$$ in the case of a continuous PDF. We have already defined the mean value $\mu$and the variance $\sigma^2$. The three famous Probability Distribution FunctionsThere are at least three PDFs which one may encounter. These are the**Uniform distribution** $$p(x)=\frac{1}{b-a}\Theta(x-a)\Theta(b-x),$$ yielding probabilities different from zero in the interval $[a,b]$.**The exponential distribution** $$p(x)=\alpha \exp{(-\alpha x)},$$ yielding probabilities different from zero in the interval $[0,\infty)$ and with mean value $$\mu = \int_0^{\infty}xp(x)dx=\int_0^{\infty}x\alpha \exp{(-\alpha x)}dx=\frac{1}{\alpha},$$ with variance $$\sigma^2=\int_0^{\infty}x^2p(x)dx-\mu^2 = \frac{1}{\alpha^2}.$$ Probability Distribution Functions, the normal distributionFinally, we have the so-called univariate normal distribution, or just the **normal distribution** $$p(x)=\frac{1}{b\sqrt{2\pi}}\exp{\left(-\frac{(x-a)^2}{2b^2}\right)}$$ with probabilities different from zero in the interval $(-\infty,\infty)$.The integral $\int_{-\infty}^{\infty}\exp{\left(-(x^2\right)}dx$ appears in many calculations, its valueis $\sqrt{\pi}$, a result we will need when we compute the mean value and the variance.The mean value is $$\mu = \int_0^{\infty}xp(x)dx=\frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}x \exp{\left(-\frac{(x-a)^2}{2b^2}\right)}dx,$$ which becomes with a suitable change of variables $$\mu =\frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}b\sqrt{2}(a+b\sqrt{2}y)\exp{-y^2}dy=a.$$ Probability Distribution Functions, the normal distributionSimilarly, the variance becomes $$\sigma^2 = \frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}(x-\mu)^2 \exp{\left(-\frac{(x-a)^2}{2b^2}\right)}dx,$$ and inserting the mean value and performing a variable change we obtain $$\sigma^2 = \frac{1}{b\sqrt{2\pi}}\int_{-\infty}^{\infty}b\sqrt{2}(b\sqrt{2}y)^2\exp{\left(-y^2\right)}dy=\frac{2b^2}{\sqrt{\pi}}\int_{-\infty}^{\infty}y^2\exp{\left(-y^2\right)}dy,$$ and performing a final integration by parts we obtain the well-known result $\sigma^2=b^2$.It is useful to introduce the standard normal distribution as well, defined by $\mu=a=0$, viz. a distributioncentered around zero and with a variance $\sigma^2=1$, leading to $$\begin{equation} p(x)=\frac{1}{\sqrt{2\pi}}\exp{\left(-\frac{x^2}{2}\right)}.\label{_auto1} \tag{3}\end{equation}$$ Probability Distribution Functions, the cumulative distributionThe exponential and uniform distributions have simple cumulative functions,whereas the normal distribution does not, being proportional to the so-callederror function $erf(x)$, given by $$P(x) = \frac{1}{\sqrt{2\pi}}\int_{-\infty}^x\exp{\left(-\frac{t^2}{2}\right)}dt,$$ which is difficult to evaluate in a quick way. Probability Distribution Functions, other important distributionSome other PDFs which one encounters often in the natural sciences are the binomial distribution $$p(x) = \left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} \hspace{0.5cm}x=0,1,\dots,n,$$ where $y$ is the probability for a specific event, such as the tossing of a coin or moving left or rightin case of a random walker. Note that $x$ is a discrete stochastic variable. The sequence of binomial trials is characterized by the following definitions * Every experiment is thought to consist of $N$ independent trials. * In every independent trial one registers if a specific situation happens or not, such as the jump to the left or right of a random walker. * The probability for every outcome in a single trial has the same value, for example the outcome of tossing (either heads or tails) a coin is always $1/2$. Probability Distribution Functions, the binomial distributionIn order to compute the mean and variance we need to recall Newton's binomialformula $$(a+b)^m=\sum_{n=0}^m \left(\begin{array}{c} m \\ n\end{array}\right)a^nb^{m-n},$$ which can be used to show that $$\sum_{x=0}^n\left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} = (y+1-y)^n = 1,$$ the PDF is normalized to one. The mean value is $$\mu = \sum_{x=0}^n x\left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} =\sum_{x=0}^n x\frac{n!}{x!(n-x)!}y^x(1-y)^{n-x},$$ resulting in $$\mu = \sum_{x=0}^n x\frac{(n-1)!}{(x-1)!(n-1-(x-1))!}y^{x-1}(1-y)^{n-1-(x-1)},$$ which we rewrite as $$\mu=ny\sum_{\nu=0}^n\left(\begin{array}{c} n-1 \\ \nu\end{array}\right)y^{\nu}(1-y)^{n-1-\nu} =ny(y+1-y)^{n-1}=ny.$$ The variance is slightly trickier to get. It reads $\sigma^2=ny(1-y)$. Probability Distribution Functions, Poisson's distributionAnother important distribution with discrete stochastic variables $x$ is the Poisson model, which resembles the exponential distribution and reads $$p(x) = \frac{\lambda^x}{x!} e^{-\lambda} \hspace{0.5cm}x=0,1,\dots,;\lambda > 0.$$ In this case both the mean value and the variance are easier to calculate, $$\mu = \sum_{x=0}^{\infty} x \frac{\lambda^x}{x!} e^{-\lambda} = \lambda e^{-\lambda}\sum_{x=1}^{\infty}\frac{\lambda^{x-1}}{(x-1)!}=\lambda,$$ and the variance is $\sigma^2=\lambda$. Probability Distribution Functions, Poisson's distributionAn example of applications of the Poisson distribution could be the countingof the number of $\alpha$-particles emitted from a radioactive source in a given time interval.In the limit of $n\rightarrow \infty$ and for small probabilities $y$, the binomial distributionapproaches the Poisson distribution. Setting $\lambda = ny$, with $y$ the probability for an event inthe binomial distribution we can show that $$\lim_{n\rightarrow \infty}\left(\begin{array}{c} n \\ x\end{array}\right)y^x(1-y)^{n-x} e^{-\lambda}=\sum_{x=1}^{\infty}\frac{\lambda^x}{x!} e^{-\lambda}.$$ Meet the covariance!An important quantity in a statistical analysis is the so-called covariance. Consider the set $\{X_i\}$ of $n$stochastic variables (not necessarily uncorrelated) with themultivariate PDF $P(x_1,\dots,x_n)$. The *covariance* of twoof the stochastic variables, $X_i$ and $X_j$, is defined as follows $$\begin{equation}\mathrm{Cov}(X_i,\,X_j) = \langle (x_i-\langle x_i\rangle)(x_j-\langle x_j\rangle)\rangle \label{_auto2} \tag{4}\end{equation}$$ $$\begin{equation} =\int\cdots\int (x_i-\langle x_i\rangle)(x_j-\langle x_j\rangle)P(x_1,\dots,x_n)\,dx_1\dots dx_n,\label{eq:def_covariance} \tag{5}\end{equation}$$ with $$\langle x_i\rangle =\int\cdots\int x_i P(x_1,\dots,x_n)\,dx_1\dots dx_n.$$ Meet the covariance in matrix disguiseIf we consider the above covariance as a matrix $$C_{ij} =\mathrm{Cov}(X_i,\,X_j),$$ then the diagonal elements are just the familiarvariances, $C_{ii} = \mathrm{Cov}(X_i,\,X_i) = \mathrm{Var}(X_i)$. It turns out thatall the off-diagonal elements are zero if the stochastic variables areuncorrelated. Covariance ###Code # Importing various packages from math import exp, sqrt from random import random, seed import numpy as np import matplotlib.pyplot as plt def covariance(x, y, n): sum = 0.0 mean_x = np.mean(x) mean_y = np.mean(y) for i in range(0, n): sum += (x[(i)]-mean_x)*(y[i]-mean_y) return sum/n n = 10 x=np.random.normal(size=n) y = 4+3*x+np.random.normal(size=n) covxy = covariance(x,y,n) print(covxy) z = np.vstack((x, y)) c = np.cov(z.T) print(c) ###Output 4.323291478597321 [[4.06126507e+00 5.22717936e+00 3.58228342e-01 1.09103481e+01 4.42847770e+00 5.02161783e+00 8.06412177e-03 6.67407338e+00 1.12366979e+01 6.04205220e+00] [5.22717936e+00 6.72780613e+00 4.61069091e-01 1.40425078e+01 5.69981195e+00 6.46323168e+00 1.03791824e-02 8.59007674e+00 1.44625466e+01 7.77661393e+00] [3.58228342e-01 4.61069091e-01 3.15979239e-02 9.62359224e-01 3.90618734e-01 4.42937310e-01 7.11304709e-04 5.88693966e-01 9.91145266e-01 5.32945844e-01] [1.09103481e+01 1.40425078e+01 9.62359224e-01 2.93100040e+01 1.18968431e+01 1.34902789e+01 2.16637855e-02 1.79295029e+01 3.01867234e+01 1.62316154e+01] [4.42847770e+00 5.69981195e+00 3.90618734e-01 1.18968431e+01 4.82889306e+00 5.47566390e+00 8.79326583e-03 7.27753165e+00 1.22527008e+01 6.58836420e+00] [5.02161783e+00 6.46323168e+00 4.42937310e-01 1.34902789e+01 5.47566390e+00 6.20906175e+00 9.97101567e-03 8.25226753e+00 1.38937995e+01 7.47079457e+00] [8.06412177e-03 1.03791824e-02 7.11304709e-04 2.16637855e-02 8.79326583e-03 9.97101567e-03 1.60122668e-05 1.32521615e-02 2.23117916e-02 1.19972087e-02] [6.67407338e+00 8.59007674e+00 5.88693966e-01 1.79295029e+01 7.27753165e+00 8.25226753e+00 1.32521615e-02 1.09678277e+01 1.84658093e+01 9.92919670e+00] [1.12366979e+01 1.44625466e+01 9.91145266e-01 3.01867234e+01 1.22527008e+01 1.38937995e+01 2.23117916e-02 1.84658093e+01 3.10896672e+01 1.67171347e+01] [6.04205220e+00 7.77661393e+00 5.32945844e-01 1.62316154e+01 6.58836420e+00 7.47079457e+00 1.19972087e-02 9.92919670e+00 1.67171347e+01 8.98892195e+00]] ###Markdown Meet the covariance, uncorrelated eventsConsider the stochastic variables $X_i$ and $X_j$, ($i\neq j$). We have $$\begin{align*}Cov(X_i,\,X_j) &= \langle (x_i-\langle x_i\rangle)(x_j-\langle x_j\rangle)\rangle\\&=\langle x_i x_j - x_i\langle x_j\rangle - \langle x_i\rangle x_j + \langle x_i\rangle\langle x_j\rangle\rangle\\&=\langle x_i x_j\rangle - \langle x_i\langle x_j\rangle\rangle - \langle \langle x_i\rangle x_j \rangle +\langle \langle x_i\rangle\langle x_j\rangle\rangle \\&=\langle x_i x_j\rangle - \langle x_i\rangle\langle x_j\rangle - \langle x_i\rangle\langle x_j\rangle +\langle x_i\rangle\langle x_j\rangle \\&=\langle x_i x_j\rangle - \langle x_i\rangle\langle x_j\rangle\end{align*}$$ If $X_i$ and $X_j$ are independent (assuming $i \neq j$), we have that $$\langle x_i x_j\rangle = \langle x_i\rangle\langle x_j\rangle,$$ leading to $$Cov(X_i, X_j) = 0 \hspace{0.1cm} (i\neq j).$$ Numerical experiments and the covarianceNow that we have constructed an idealized mathematical framework, letus try to apply it to empirical observations. Examples of relevantphysical phenomena may be spontaneous decays of nuclei, or a purelymathematical set of numbers produced by some deterministicmechanism. It is the latter we will deal with, using so-called pseudo-randomnumber generators. In general our observations will contain only a limited set ofobservables. We remind the reader thata *stochastic process* is a process that produces sequentially achain of values $$\{x_1, x_2,\dots\,x_k,\dots\}.$$ Numerical experiments and the covarianceWe will call thesevalues our *measurements* and the entire set as our measured*sample*. The action of measuring all the elements of a samplewe will call a stochastic *experiment* (since, operationally,they are often associated with results of empirical observation ofsome physical or mathematical phenomena; precisely an experiment). Weassume that these values are distributed according to some PDF $p_X^{\phantom X}(x)$, where $X$ is just the formal symbol for thestochastic variable whose PDF is $p_X^{\phantom X}(x)$. Instead oftrying to determine the full distribution $p$ we are often onlyinterested in finding the few lowest moments, like the mean$\mu_X^{\phantom X}$ and the variance $\sigma_X^{\phantom X}$. Numerical experiments and the covariance, actual situationsIn practical situations however, a sample is always of finite size. Let thatsize be $n$. The expectation value of a sample $\alpha$, the **sample mean**, is then defined as follows $$\langle x_{\alpha} \rangle \equiv \frac{1}{n}\sum_{k=1}^n x_{\alpha,k}.$$ The *sample variance* is: $$\mathrm{Var}(x) \equiv \frac{1}{n}\sum_{k=1}^n (x_{\alpha,k} - \langle x_{\alpha} \rangle)^2,$$ with its square root being the *standard deviation of the sample*. Numerical experiments and the covariance, our observablesYou can think of the above observables as a set of quantities which definea given experiment. This experiment is then repeated several times, say $m$ times.The total average is then $$\begin{equation}\langle X_m \rangle= \frac{1}{m}\sum_{\alpha=1}^mx_{\alpha}=\frac{1}{mn}\sum_{\alpha, k} x_{\alpha,k},\label{eq:exptmean} \tag{6}\end{equation}$$ where the last sums end at $m$ and $n$.The total variance is $$\sigma^2_m= \frac{1}{mn^2}\sum_{\alpha=1}^m(\langle x_{\alpha} \rangle-\langle X_m \rangle)^2,$$ which we rewrite as $$\begin{equation}\sigma^2_m=\frac{1}{m}\sum_{\alpha=1}^m\sum_{kl=1}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle).\label{eq:exptvariance} \tag{7}\end{equation}$$ Numerical experiments and the covariance, the sample varianceWe define also the sample variance $\sigma^2$ of all $mn$ individual experiments as $$\begin{equation}\sigma^2=\frac{1}{mn}\sum_{\alpha=1}^m\sum_{k=1}^n (x_{\alpha,k}-\langle X_m \rangle)^2.\label{eq:sampleexptvariance} \tag{8}\end{equation}$$ These quantities, being known experimental values or the results from our calculations, may differ, in some casessignificantly, from the similarly namedexact values for the mean value $\mu_X$, the variance $\mathrm{Var}(X)$and the covariance $\mathrm{Cov}(X,Y)$. Numerical experiments and the covariance, central limit theoremThe central limit theorem states that the PDF $\tilde{p}(z)$ ofthe average of $m$ random values corresponding to a PDF $p(x)$ is a normal distribution whose mean is the mean value of the PDF $p(x)$ and whose variance is the varianceof the PDF $p(x)$ divided by $m$, the number of values used to compute $z$.The central limit theorem leads then to the well-known expression for thestandard deviation, given by $$\sigma_m=\frac{\sigma}{\sqrt{m}}.$$ In many cases the above estimate for the standard deviation, in particular if correlations are strong, may be too simplistic. We need therefore a more precise defintion of the error and the variance in our results. Definition of Correlation Functions and Standard DeviationOur estimate of the true average $\mu_{X}$ is the sample mean $\langle X_m \rangle$ $$\mu_{X}^{\phantom X} \approx X_m=\frac{1}{mn}\sum_{\alpha=1}^m\sum_{k=1}^n x_{\alpha,k}.$$ We can then use Eq. ([7](eq:exptvariance)) $$\sigma^2_m=\frac{1}{mn^2}\sum_{\alpha=1}^m\sum_{kl=1}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle),$$ and rewrite it as $$\sigma^2_m=\frac{\sigma^2}{n}+\frac{2}{mn^2}\sum_{\alpha=1}^m\sum_{k<l}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle),$$ where the first term is the sample variance of all $mn$ experiments divided by $n$and the last term is nothing but the covariance which arises when $k\ne l$. Definition of Correlation Functions and Standard DeviationOur estimate of the true average $\mu_{X}$ is the sample mean $\langle X_m \rangle$If the observables are uncorrelated, then the covariance is zero and we obtain a total variancewhich agrees with the central limit theorem. Correlations may often be present in our data set, resulting in a non-zero covariance. The first term is normally called the uncorrelated contribution.Computationally the uncorrelated first term is much easier to treatefficiently than the second.We just accumulate separately the values $x^2$ and $x$ for everymeasurement $x$ we receive. The correlation term, though, has to becalculated at the end of the experiment since we need all themeasurements to calculate the cross terms. Therefore, all measurementshave to be stored throughout the experiment. Definition of Correlation Functions and Standard DeviationLet us analyze the problem by splitting up the correlation term intopartial sums of the form $$f_d = \frac{1}{nm}\sum_{\alpha=1}^m\sum_{k=1}^{n-d}(x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,k+d}-\langle X_m \rangle),$$ The correlation term of the total variance can now be rewritten in terms of$f_d$ $$\frac{2}{mn^2}\sum_{\alpha=1}^m\sum_{k<l}^n (x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,l}-\langle X_m \rangle)=\frac{2}{n}\sum_{d=1}^{n-1} f_d$$ Definition of Correlation Functions and Standard DeviationThe value of $f_d$ reflects the correlation between measurementsseparated by the distance $d$ in the samples. Notice that for$d=0$, $f$ is just the sample variance, $\sigma^2$. If we divide $f_d$by $\sigma^2$, we arrive at the so called **autocorrelation function** $$\begin{equation}\kappa_d = \frac{f_d}{\sigma^2}\label{eq:autocorrelformal} \tag{9}\end{equation}$$ which gives us a useful measure of the correlation pair correlationstarting always at $1$ for $d=0$. Definition of Correlation Functions and Standard Deviation, sample varianceThe sample variance of the $mn$ experiments can now bewritten in terms of the autocorrelation function $$\begin{equation}\sigma_m^2=\frac{\sigma^2}{n}+\frac{2}{n}\cdot\sigma^2\sum_{d=1}^{n-1}\frac{f_d}{\sigma^2}=\left(1+2\sum_{d=1}^{n-1}\kappa_d\right)\frac{1}{n}\sigma^2=\frac{\tau}{n}\cdot\sigma^2\label{eq:error_estimate_corr_time} \tag{10}\end{equation}$$ and we see that $\sigma_m$ can be expressed in terms of theuncorrelated sample variance times a correction factor $\tau$ whichaccounts for the correlation between measurements. We call thiscorrection factor the *autocorrelation time* $$\begin{equation}\tau = 1+2\sum_{d=1}^{n-1}\kappa_d\label{eq:autocorrelation_time} \tag{11}\end{equation}$$ For a correlation free experiment, $\tau$equals 1. Definition of Correlation Functions and Standard DeviationFrom the point of view ofEq. ([10](eq:error_estimate_corr_time)) we can interpret a sequentialcorrelation as an effective reduction of the number of measurements bya factor $\tau$. The effective number of measurements becomes $$n_\mathrm{eff} = \frac{n}{\tau}$$ To neglect the autocorrelation time $\tau$ will always cause oursimple uncorrelated estimate of $\sigma_m^2\approx \sigma^2/n$ tobe less than the true sample error. The estimate of the error will betoo "good". On the other hand, the calculation of the fullautocorrelation time poses an efficiency problem if the set ofmeasurements is very large. The solution to this problem is given by more practically oriented methods like the blocking technique. Code to compute the Covariance matrix and the Covariance ###Code # Importing various packages from math import exp, sqrt from random import random, seed import numpy as np import matplotlib.pyplot as plt # Sample covariance, note the factor 1/(n-1) def covariance(x, y, n): sum = 0.0 mean_x = np.mean(x) mean_y = np.mean(y) for i in range(0, n): sum += (x[(i)]-mean_x)*(y[i]-mean_y) return sum/(n-1.) n = 100 x = np.random.normal(size=n) print(np.mean(x)) y = 4+3*x+np.random.normal(size=n) print(np.mean(y)) z = x**3+np.random.normal(size=n) print(np.mean(z)) covxx = covariance(x,x,n) covyy = covariance(y,y,n) covzz = covariance(z,z,n) covxy = covariance(x,y,n) covxz = covariance(x,z,n) covyz = covariance(y,z,n) print(covxx,covyy, covzz) print(covxy,covxz, covyz) w = np.vstack((x, y, z)) #print(w) c = np.cov(w) print(c) #eigen = np.zeros(n) Eigvals, Eigvecs = np.linalg.eig(c) print(Eigvals) ###Output 0.02730126581656065 4.105137868830763 0.04829457939163212 0.8718475896381779 9.358869339268145 15.339535706819584 2.6818813252071303 2.6510573774179256 7.986091753050161 [[ 0.87184759 2.68188133 2.65105738] [ 2.68188133 9.35886934 7.98609175] [ 2.65105738 7.98609175 15.33953571]] [21.54237024 0.08251898 3.94536341] ###Markdown Random NumbersUniform deviates are just random numbers that lie within a specified range(typically 0 to 1), with any one number in the range just as likely as any other. Theyare, in other words, what you probably think random numbers are. However,we want to distinguish uniform deviates from other sorts of random numbers, forexample numbers drawn from a normal (Gaussian) distribution of specified meanand standard deviation. These other sorts of deviates are almost always generated byperforming appropriate operations on one or more uniform deviates, as we will seein subsequent sections. So, a reliable source of random uniform deviates, the subjectof this section, is an essential building block for any sort of stochastic modelingor Monte Carlo computer work. Random Numbers, better name: pseudo random numbersA disclaimer is however appropriate. It should be fairly obvious that something as deterministic as a computer cannot generate purely random numbers.Numbers generated by any of the standard algorithms are in reality pseudo randomnumbers, hopefully abiding to the following criteria: * they produce a uniform distribution in the interval [0,1]. * correlations between random numbers are negligible * the period before the same sequence of random numbers is repeated is as large as possible and finally * the algorithm should be fast. Random number generator RNG The most common random number generators are based on so-calledLinear congruential relations of the type $$N_i=(aN_{i-1}+c) \mathrm{MOD} (M),$$ which yield a number in the interval [0,1] through $$x_i=N_i/M$$ The number $M$ is called the period and it should be as large as possible and $N_0$ is the starting value, or seed. The function $\mathrm{MOD}$ means the remainder,that is if we were to evaluate $(13)\mathrm{MOD}(9)$, the outcome is the remainderof the division $13/9$, namely $4$. Random number generator RNG and periodic outputsThe problem with such generators is that their outputs are periodic;they will start to repeat themselves with a period that is at most $M$. If howeverthe parameters $a$ and $c$ are badly chosen, the period may be even shorter.Consider the following example $$N_i=(6N_{i-1}+7) \mathrm{MOD} (5),$$ with a seed $N_0=2$. This generator produces the sequence$4,1,3,0,2,4,1,3,0,2,...\dots$, i.e., a sequence with period $5$.However, increasing $M$ may not guarantee a larger period as the followingexample shows $$N_i=(27N_{i-1}+11) \mathrm{MOD} (54),$$ which still, with $N_0=2$, results in $11,38,11,38,11,38,\dots$, a period ofjust $2$. Random number generator RNG and its periodTypical periods for the random generators provided in the program library are of the order of $\sim 10^9$ or larger. Other random number generators which havebecome increasingly popular are so-called shift-register generators.In these generators each successive number depends on many precedingvalues (rather than the last values as in the linear congruentialgenerator).For example, you could make a shift register generator whose $l$th number is the sum of the $l-i$th and $l-j$th values with modulo $M$, $$N_l=(aN_{l-i}+cN_{l-j})\mathrm{MOD}(M).$$ Random number generator RNG, other examplesSuch a generator again produces a sequence of pseudorandom numbersbut this time with a period much larger than $M$.It is also possible to construct more elaborate algorithms by includingmore than two past terms in the sum of each iteration.One example is the generator of [Marsaglia and Zaman](http://dl.acm.org/citation.cfm?id=187154)which consists of two congruential relations $$\begin{equation} N_l=(N_{l-3}-N_{l-1})\mathrm{MOD}(2^{31}-69),\label{eq:mz1} \tag{12}\end{equation}$$ followed by $$\begin{equation} N_l=(69069N_{l-1}+1013904243)\mathrm{MOD}(2^{32}),\label{eq:mz2} \tag{13}\end{equation}$$ which according to the authors has a period larger than $2^{94}$. Random number generator RNG, other examplesInstead of using modular addition, we could use the bitwiseexclusive-OR ($\oplus$) operation so that $$N_l=(N_{l-i})\oplus (N_{l-j})$$ where the bitwise action of $\oplus$ means that if $N_{l-i}=N_{l-j}$ the result is$0$ whereas if $N_{l-i}\ne N_{l-j}$ the result is$1$. As an example, consider the case where $N_{l-i}=6$ and $N_{l-j}=11$. The firstone has a bit representation (using 4 bits only) which reads $0110$ whereas the second number is $1011$. Employing the $\oplus$ operator yields $1101$, or $2^3+2^2+2^0=13$.In Fortran90, the bitwise $\oplus$ operation is coded through the intrinsicfunction $\mathrm{IEOR}(m,n)$ where $m$ and $n$ are the input numbers, while in $C$it is given by $m\wedge n$. Random number generator RNG, RAN0We show here how the linear congruential algorithm can be implemented, namely $$N_i=(aN_{i-1}) \mathrm{MOD} (M).$$ However, since $a$ and $N_{i-1}$ are integers and their multiplication could become greater than the standard 32 bit integer, there is a trick via Schrage's algorithm which approximates the multiplicationof large integers through the factorization $$M=aq+r,$$ where we have defined $$q=[M/a],$$ and $$r = M\hspace{0.1cm}\mathrm{MOD} \hspace{0.1cm}a.$$ where the brackets denote integer division. In the code below the numbers $q$ and $r$ are chosen so that $r < q$. Random number generator RNG, RAN0To see how this works we note first that $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= (aN_{i-1}-[N_{i-1}/q]M)\mathrm{MOD} (M),\label{eq:rntrick1} \tag{14}\end{equation}$$ since we can add or subtract any integer multiple of $M$ from $aN_{i-1}$.The last term $[N_{i-1}/q]M\mathrm{MOD}(M)$ is zero since the integer division $[N_{i-1}/q]$ just yields a constant which is multiplied with $M$. Random number generator RNG, RAN0We can now rewrite Eq. ([14](eq:rntrick1)) as $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= (aN_{i-1}-[N_{i-1}/q](aq+r))\mathrm{MOD} (M),\label{eq:rntrick2} \tag{15}\end{equation}$$ which resultsin $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= \left(a(N_{i-1}-[N_{i-1}/q]q)-[N_{i-1}/q]r)\right)\mathrm{MOD} (M),\label{eq:rntrick3} \tag{16}\end{equation}$$ yielding $$\begin{equation}(aN_{i-1}) \mathrm{MOD} (M)= \left(a(N_{i-1}\mathrm{MOD} (q)) -[N_{i-1}/q]r)\right)\mathrm{MOD} (M).\label{eq:rntrick4} \tag{17}\end{equation}$$ Random number generator RNG, RAN0The term $[N_{i-1}/q]r$ is always smaller or equal $N_{i-1}(r/q)$ and with $r < q$ we obtain always a number smaller than $N_{i-1}$, which is smaller than $M$. And since the number $N_{i-1}\mathrm{MOD} (q)$ is between zero and $q-1$ then$a(N_{i-1}\mathrm{MOD} (q))< aq$. Combined with our definition of $q=[M/a]$ ensures that this term is also smaller than $M$ meaning that both terms fit into a32-bit signed integer. None of these two terms can be negative, but their difference could.The algorithm below adds $M$ if their difference is negative.Note that the program uses the bitwise $\oplus$ operator to generatethe starting point for each generation of a random number. The periodof $ran0$ is $\sim 2.1\times 10^{9}$. A special feature of thisalgorithm is that is should never be called with the initial seed set to $0$. Random number generator RNG, RAN0 code /* ** The function ** ran0() ** is an "Minimal" random number generator of Park and Miller ** Set or reset the input value ** idum to any integer value (except the unlikely value MASK) ** to initialize the sequence; idum must not be altered between ** calls for sucessive deviates in a sequence. ** The function returns a uniform deviate between 0.0 and 1.0. */ double ran0(long &idum) { const int a = 16807, m = 2147483647, q = 127773; const int r = 2836, MASK = 123459876; const double am = 1./m; long k; double ans; idum ^= MASK; k = (*idum)/q; idum = a*(idum - k*q) - r*k; // add m if negative difference if(idum < 0) idum += m; ans=am*(idum); idum ^= MASK; return ans; } // End: function ran0() Properties of Selected Random Number GeneratorsAs mentioned previously, the underlying PDF for the generation ofrandom numbers is the uniform distribution, meaning that the probability for finding a number $x$ in the interval [0,1] is $p(x)=1$.A random number generator should produce numbers which are uniformly distributedin this interval. The table shows the distribution of $N=10000$ randomnumbers generated by the functions in the program library.We note in this table that the number of points in the variousintervals $0.0-0.1$, $0.1-0.2$ etc are fairly close to $1000$, with some minordeviations. Two additional measures are the standard deviation $\sigma$ and the mean$\mu=\langle x\rangle$. Properties of Selected Random Number GeneratorsFor the uniform distribution, the mean value $\mu$ is then $$\mu=\langle x\rangle=\frac{1}{2}$$ while the standard deviation is $$\sigma=\sqrt{\langle x^2\rangle-\mu^2}=\frac{1}{\sqrt{12}}=0.2886.$$ Properties of Selected Random Number GeneratorsThe various random number generators produce results which agree rather well withthese limiting values. $x$-bin ran0 ran1 ran2 ran3 0.0-0.1 1013 991 938 1047 0.1-0.2 1002 1009 1040 1030 0.2-0.3 989 999 1030 993 0.3-0.4 939 960 1023 937 0.4-0.5 1038 1001 1002 992 0.5-0.6 1037 1047 1009 1009 0.6-0.7 1005 989 1003 989 0.7-0.8 986 962 985 954 0.8-0.9 1000 1027 1009 1023 0.9-1.0 991 1015 961 1026 $\mu$ 0.4997 0.5018 0.4992 0.4990 $\sigma$ 0.2882 0.2892 0.2861 0.2915 Simple demonstration of RNGs using pythonThe following simple Python code plots the distribution of the produced random numbers using the linear congruential RNG employed by Python. The trend displayed in the previous table is seen rather clearly. ###Code #!/usr/bin/env python import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt import random # initialize the rng with a seed random.seed() counts = 10000 values = np.zeros(counts) for i in range (1, counts, 1): values[i] = random.random() # the histogram of the data n, bins, patches = plt.hist(values, 10, facecolor='green') plt.xlabel('$x$') plt.ylabel('Number of counts') plt.title(r'Test of uniform distribution') plt.axis([0, 1, 0, 1100]) plt.grid(True) plt.show() ###Output _____no_output_____ ###Markdown Properties of Selected Random Number GeneratorsSince our random numbers, which are typically generated via a linear congruential algorithm,are never fully independent, we can then define an important test which measures the degree of correlation, namely the so-called auto-correlation function defined previously, see again Eq. ([9](eq:autocorrelformal)).We rewrite it here as $$C_k=\frac{f_d} {\sigma^2},$$ with $C_0=1$. Recall that $\sigma^2=\langle x_i^2\rangle-\langle x_i\rangle^2$ and that $$f_d = \frac{1}{nm}\sum_{\alpha=1}^m\sum_{k=1}^{n-d}(x_{\alpha,k}-\langle X_m \rangle)(x_{\alpha,k+d}-\langle X_m \rangle),$$ The non-vanishing of $C_k$ for $k\ne 0$ means that the randomnumbers are not independent. The independence of the random numbers is crucial in the evaluation of other expectation values. If they are not independent, ourassumption for approximating $\sigma_N$ is no longer valid. Autocorrelation functionThis program computes the autocorrelation function as discussed in the equation on the previous slide for random numbers generated with the normal distribution $N(0,1)$. ###Code # Importing various packages from math import exp, sqrt from random import random, seed import numpy as np import matplotlib.pyplot as plt def autocovariance(x, n, k, mean_x): sum = 0.0 for i in range(0, n-k): sum += (x[(i+k)]-mean_x)*(x[i]-mean_x) return sum/n n = 1000 x=np.random.normal(size=n) autocor = np.zeros(n) figaxis = np.zeros(n) mean_x=np.mean(x) var_x = np.var(x) print(mean_x, var_x) for i in range (0, n): figaxis[i] = i autocor[i]=(autocovariance(x, n, i, mean_x))/var_x plt.plot(figaxis, autocor, "r-") plt.axis([0,n,-0.1, 1.0]) plt.xlabel(r'$i$') plt.ylabel(r'$\gamma_i$') plt.title(r'Autocorrelation function') plt.show() ###Output -0.0032873138755776365 0.9279671770201344 ###Markdown As can be seen from the plot, the first point gives back the variance and a value of one. For the remaining values we notice that there are still non-zero values for the auto-correlation function. Correlation function and which random number generators should I useThe program here computes the correlation function for one of the standard functions included with the c++ compiler. // This function computes the autocorrelation function for // the standard c++ random number generator include include include include using namespace std; // output file as global variable ofstream ofile; // Main function begins here int main(int argc, char* argv[]) { int n; char *outfilename; cin >> n; double MCint = 0.; double MCintsqr2=0.; double invers_period = 1./RAND_MAX; // initialise the random number generator srand(time(NULL)); // This produces the so-called seed in MC jargon // Compute the variance and the mean value of the uniform distribution // Compute also the specific values x for each cycle in order to be able to // the covariance and the correlation function // Read in output file, abort if there are too few command-line arguments if( argc <= 2 ){ cout << "Bad Usage: " << argv[0] << " read also output file and number of cycles on same line" << endl; exit(1); } else{ outfilename=argv[1]; } ofile.open(outfilename); // Get the number of Monte-Carlo samples n = atoi(argv[2]); double *X; X = new double[n]; for (int i = 0; i < n; i++){ double x = double(rand())*invers_period; X[i] = x; MCint += x; MCintsqr2 += x*x; } double Mean = MCint/((double) n ); MCintsqr2 = MCintsqr2/((double) n ); double STDev = sqrt(MCintsqr2-Mean*Mean); double Variance = MCintsqr2-Mean*Mean; // Write mean value and standard deviation cout << " Standard deviation= " << STDev << " Integral = " << Mean << endl; // Now we compute the autocorrelation function double *autocor; autocor = new double[n]; for (int j = 0; j < n; j++){ double sum = 0.0; for (int k = 0; k < (n-j); k++){ sum += (X[k]-Mean)*(X[k+j]-Mean); } autocor[j] = sum/Variance/((double) n ); ofile << setiosflags(ios::showpoint | ios::uppercase); ofile << setw(15) << setprecision(8) << j; ofile << setw(15) << setprecision(8) << autocor[j] << endl; } ofile.close(); // close output file return 0; } // end of main program Which RNG should I use?* C++ has a class called **random**. The [random class](http://www.cplusplus.com/reference/random/) contains a large selection of RNGs and is highly recommended. Some of these RNGs have very large periods making it thereby very safe to use these RNGs in case one is performing large calculations. In particular, the [Mersenne twister random number engine](http://www.cplusplus.com/reference/random/mersenne_twister_engine/) has a period of $2^{19937}$. * Add RNGs in Python How to use the Mersenne generatorThe following part of a c++ code (from project 4) sets up the uniform distribution for $x\in [0,1]$. /* // You need this include // Initialize the seed and call the Mersienne algo std::random_device rd; std::mt19937_64 gen(rd()); // Set up the uniform distribution for x \in [[0, 1] std::uniform_real_distribution RandomNumberGenerator(0.0,1.0); // Now use the RNG int ix = (int) (RandomNumberGenerator(gen)*NSpins); Why blocking?**Statistical analysis.** * Monte Carlo simulations can be treated as *computer experiments* * The results can be analysed with the same statistical tools as we would use analysing experimental data. * As in all experiments, we are looking for expectation values and an estimate of how accurate they are, i.e., possible sources for errors.A very good article which explains blocking is H. Flyvbjerg and H. G. Petersen, *Error estimates on averages of correlated data*, [Journal of Chemical Physics 91, 461-466 (1989)](http://scitation.aip.org/content/aip/journal/jcp/91/1/10.1063/1.457480). Why blocking?**Statistical analysis.** * As in other experiments, Monte Carlo experiments have two classes of errors: * Statistical errors * Systematical errors * Statistical errors can be estimated using standard tools from statistics * Systematical errors are method specific and must be treated differently from case to case. (In VMC a common source is the step length or time step in importance sampling) Code to demonstrate the calculation of the autocorrelation functionThe following code computes the autocorrelation function, the covariance and the standard deviationfor standard RNG. The [following file](https://github.com/CompPhysics/ComputationalPhysics2/tree/gh-pages/doc/Programs/LecturePrograms/programs/Blocking/autocorrelation.cpp) gives the code. // This function computes the autocorrelation function for // the Mersenne random number generator with a uniform distribution include include include include include include include include using namespace std; using namespace arma; // output file ofstream ofile; // Main function begins here int main(int argc, char* argv[]) { int MonteCarloCycles; string filename; if (argc > 1) { filename=argv[1]; MonteCarloCycles = atoi(argv[2]); string fileout = filename; string argument = to_string(MonteCarloCycles); fileout.append(argument); ofile.open(fileout); } // Compute the variance and the mean value of the uniform distribution // Compute also the specific values x for each cycle in order to be able to // compute the covariance and the correlation function vec X = zeros(MonteCarloCycles); double MCint = 0.; double MCintsqr2=0.; std::random_device rd; std::mt19937_64 gen(rd()); // Set up the uniform distribution for x \in [[0, 1] std::uniform_real_distribution RandomNumberGenerator(0.0,1.0); for (int i = 0; i < MonteCarloCycles; i++){ double x = RandomNumberGenerator(gen); X(i) = x; MCint += x; MCintsqr2 += x*x; } double Mean = MCint/((double) MonteCarloCycles ); MCintsqr2 = MCintsqr2/((double) MonteCarloCycles ); double STDev = sqrt(MCintsqr2-Mean*Mean); double Variance = MCintsqr2-Mean*Mean; // Write mean value and variance cout << " Sample variance= " << Variance << " Mean value = " << Mean << endl; // Now we compute the autocorrelation function vec autocorrelation = zeros(MonteCarloCycles); for (int j = 0; j < MonteCarloCycles; j++){ double sum = 0.0; for (int k = 0; k < (MonteCarloCycles-j); k++){ sum += (X(k)-Mean)*(X(k+j)-Mean); } autocorrelation(j) = sum/Variance/((double) MonteCarloCycles ); ofile << setiosflags(ios::showpoint | ios::uppercase); ofile << setw(15) << setprecision(8) << j; ofile << setw(15) << setprecision(8) << autocorrelation(j) << endl; } // Now compute the exact covariance using the autocorrelation function double Covariance = 0.0; for (int j = 0; j < MonteCarloCycles; j++){ Covariance += autocorrelation(j); } Covariance *= 2.0/((double) MonteCarloCycles); // Compute now the total variance, including the covariance, and obtain the standard deviation double TotalVariance = (Variance/((double) MonteCarloCycles ))+Covariance; cout << "Covariance =" << Covariance << "Totalvariance= " << TotalVariance << "Sample Variance/n= " << (Variance/((double) MonteCarloCycles )) << endl; cout << " STD from sample variance= " << sqrt(Variance/((double) MonteCarloCycles )) << " STD with covariance = " << sqrt(TotalVariance) << endl; ofile.close(); // close output file return 0; } // end of main program What is blocking?**Blocking.** * Say that we have a set of samples from a Monte Carlo experiment * Assuming (wrongly) that our samples are uncorrelated our best estimate of the standard deviation of the mean $\langle \mathbf{M}\rangle$ is given by $$\sigma=\sqrt{\frac{1}{n}\left(\langle \mathbf{M}^2\rangle-\langle \mathbf{M}\rangle^2\right)}$$ * If the samples are correlated we can rewrite our results to show that $$\sigma=\sqrt{\frac{1+2\tau/\Delta t}{n}\left(\langle \mathbf{M}^2\rangle-\langle \mathbf{M}\rangle^2\right)}$$ where $\tau$ is the correlation time (the time between a sample and the next uncorrelated sample) and $\Delta t$ is time between each sample What is blocking?**Blocking.** * If $\Delta t\gg\tau$ our first estimate of $\sigma$ still holds * Much more common that $\Delta t<\tau$ * In the method of data blocking we divide the sequence of samples into blocks * We then take the mean $\langle \mathbf{M}_i\rangle$ of block $i=1\ldots n_{blocks}$ to calculate the total mean and variance * The size of each block must be so large that sample $j$ of block $i$ is not correlated with sample $j$ of block $i+1$ * The correlation time $\tau$ would be a good choice What is blocking?**Blocking.** * Problem: We don't know $\tau$ or it is too expensive to compute * Solution: Make a plot of std. dev. as a function of blocksize * The estimate of std. dev. of correlated data is too low $\to$ the error will increase with increasing block size until the blocks are uncorrelated, where we reach a plateau * When the std. dev. stops increasing the blocks are uncorrelated Implementation * Do a Monte Carlo simulation, storing all samples to file * Do the statistical analysis on this file, independently of your Monte Carlo program * Read the file into an array * Loop over various block sizes * For each block size $n_b$, loop over the array in steps of $n_b$ taking the mean of elements $i n_b,\ldots,(i+1) n_b$ * Take the mean and variance of the resulting array * Write the results for each block size to file for later analysis Actual implementation with code, main functionWhen the file gets large, it can be useful to write your data in binary mode instead of ascii characters.The [following python file](https://github.com/CompPhysics/MachineLearning/blob/master/doc/Programs/Sampling/analysis.py) reads data from file with the output from every Monte Carlo cycle. ###Code # Blocking @timeFunction def blocking(self, blockSizeMax = 500): blockSizeMin = 1 self.blockSizes = [] self.meanVec = [] self.varVec = [] for i in range(blockSizeMin, blockSizeMax): if(len(self.data) % i != 0): pass#continue blockSize = i meanTempVec = [] varTempVec = [] startPoint = 0 endPoint = blockSize while endPoint <= len(self.data): meanTempVec.append(np.average(self.data[startPoint:endPoint])) startPoint = endPoint endPoint += blockSize mean, var = np.average(meanTempVec), np.var(meanTempVec)/len(meanTempVec) self.meanVec.append(mean) self.varVec.append(var) self.blockSizes.append(blockSize) self.blockingAvg = np.average(self.meanVec[-200:]) self.blockingVar = (np.average(self.varVec[-200:])) self.blockingStd = np.sqrt(self.blockingVar) ###Output _____no_output_____
09-object_oriented_programming_part-1.ipynb
###Markdown Object Oriented Programming Oop allows user to create their own objects. .method_name() -- Syntax ###Code l=[2,3,4,5,6,7,8] l.append(1) l.sort()#This are methods which act as function #Here list is a object print(l) ###Output [1, 2, 3, 4, 5, 6, 7, 8] ###Markdown Syntax : ###Code # Hashtag are used to write comment. # class NameOfClass(): #def __init__(self,para1,para2): #self.para1=para1 #self.para2=para2 #def any_method(self): #print(self.para1) # lets create our own class class Cricket(): pass my_class=Cricket() #We created instance of class type(my_class) ###Output _____no_output_____ ###Markdown Attributes Attributes are nothing but characteristic of object ###Code class AdditionOp(): #Class object attribute input= 'int or float' def __init__(self,a,b): #Attributes: Characteristic of object # We take in parameter a and b # we assign it using 'self' keyword # self.attribute_name=parameter self.num1=a self.num2=b print(self.num1+self.num2) my_num=AdditionOp(b=3,a=4) type(my_num) my_num.num1 my_num.input ###Output _____no_output_____ ###Markdown Methods Methods are nothing but function that are defined in the body of the class(object). ###Code class MathOp: def __init__(self,a=1,b=1): self.a=a self.b=b def addition(self): print(self.a+self.b) def subtraction(self): print(self.a-self.b) def multiplication(self): print(self.a*self.b) def division(self,a,b): print(self.a/self.b) print(a/b) c=MathOp(2,4) c.multiplication() #c.a() 'a' is attribute not method so we cant use '()' c.a c.subtraction() c.division(50,10) help(MathOp) ###Output Help on class MathOp in module __main__: class MathOp(builtins.object) | MathOp(a, b) | | Methods defined here: | | __init__(self, a, b) | Initialize self. See help(type(self)) for accurate signature. | | addition(self) | | division(self, a, b) | | multiplication(self) | | subtraction(self) | | ---------------------------------------------------------------------- | Data descriptors defined here: | | __dict__ | dictionary for instance variables (if defined) | | __weakref__ | list of weak references to the object (if defined) ###Markdown Inheritance ###Code class Father(): def __init__(self): print('I am your Father') def height(self,ht): print('My height is {}'.format(ht)) def weight(self,weight): print('My weight is {}'.format(weight)) def age(self,age): print('My age is {}'.format(age)) class Son(Father): def __init__(self): Father.__init__(self) print('This is me!') def weight(self,wt): print('My weight is {}'.format()) me=Son() me.age(19) me.height('6.9') me.weight(65) ###Output My weight is 65
training_data.ipynb
###Markdown Training DataThis notebook creates a simulation database from an ensemble of atmospheric states. Since the purpose of this data is to verify and evaluate the QRNN and BMCI methods, it is necessary that the prior distribution on the atmospheric states can be expressed analytically. To allow for this, the ensemble is generated from the distributions fitted to the ERA Interim data for northern hemisphere, mid-latititude data from 2016. The corresponding code can be found in the `era_interim_climatology.ipynb` notebook ###Code %env ARTS_INCLUDE_PATH=/home/simonpf/src/atms_retrievals:/home/simonpf/src/arts_clean/controlfiles/ %env ARTS_DATA_PATH=/home/simonpf/src/arts_xml/ %env ARTS_BUILD_PATH=/home/simonpf/build/arts/ %load_ext autoreload %autoreload 2 import scipy as sc import numpy as np %matplotlib inline import matplotlib_settings import matplotlib.pyplot as plt from typhon.arts.workspace import Workspace import atms ###Output env: ARTS_INCLUDE_PATH=/home/simonpf/src/atms_retrievals:/home/simonpf/src/arts_clean/controlfiles/ env: ARTS_DATA_PATH=/home/simonpf/src/arts_xml/ env: ARTS_BUILD_PATH=/home/simonpf/build/arts/ Loading ARTS API from: /home/simonpf/build/arts/src/libarts_api.so ###Markdown ARTS SetupFor the basic simulation, the following ATMS channels are used:| Channel Index | Frequency | Polarization ||---------------|--------------------|--------------|| 0 | $23$ GHz | H || 15 | $88$ GHz | H || 16 | $165$ GHz | H || 17 | $183 \pm 7$ GHz | H || 19 | $183 \pm 3$ GHz | H | ###Code dataset = "summer" suffix = "" if not dataset == "": suffix = "_" + dataset channels = [0, 15, 16, 17, 19] ws = Workspace() atms.setup_atmosphere(ws, dataset = dataset) atms.setup_sensor(ws, channels) atms.checks(ws) ws.jacobianOff() ###Output ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/general.arts ARTS[53396448]: { ARTS[53396448]: - verbosityInit ARTS[53396448]: - scat_speciesSet ARTS[53396448]: - MatrixSet ARTS[53396448]: - Tensor4SetConstant ARTS[53396448]: - ArrayOfStringSet ARTS[53396448]: - Touch ARTS[53396448]: - FlagOff ARTS[53396448]: - MatrixSet ARTS[53396448]: - NumericSet ARTS[53396448]: - ArrayOfStringSet ARTS[53396448]: - Tensor3SetConstant ARTS[53396448]: - Tensor3SetConstant ARTS[53396448]: - Tensor3SetConstant ARTS[53396448]: - Tensor3SetConstant ARTS[53396448]: - Tensor3SetConstant ARTS[53396448]: - Tensor3SetConstant ARTS[53396448]: - IndexSet ARTS[53396448]: - IndexSet ARTS[53396448]: - IndexSet ARTS[53396448]: - IndexSet ARTS[53396448]: - FlagOff ARTS[53396448]: - output_file_formatSetAscii ARTS[53396448]: - StringSet ARTS[53396448]: - IndexSet ARTS[53396448]: - abs_lineshapeDefine ARTS[53396448]: - NumericSet ARTS[53396448]: - NumericSet ARTS[53396448]: - AgendaSet ARTS[53396448]: - IndexSet ARTS[53396448]: - IndexSet ARTS[53396448]: - NumericSet ARTS[53396448]: - NumericSet ARTS[53396448]: - nlteOff ARTS[53396448]: - partition_functionsInitFromBuiltin ARTS[53396448]: - IndexSet ARTS[53396448]: } ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/continua.arts ARTS[53396448]: { ARTS[53396448]: - abs_cont_descriptionInit ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: - abs_cont_descriptionAppend ARTS[53396448]: } ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/agendas.arts ARTS[53396448]: { ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - FlagOff ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - FlagOff ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: - AgendaCreate ARTS[53396448]: - AgendaSet ARTS[53396448]: } ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/general/planet_earth.arts ARTS[53396448]: { ARTS[53396448]: - isotopologue_ratiosInitFromBuiltin ARTS[53396448]: - refellipsoidEarth ARTS[53396448]: - NumericSet ARTS[53396448]: - AgendaSet ARTS[53396448]: - NumericSet ARTS[53396448]: } ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/instruments/metmm/sensor_descriptions/prepare_metmm.arts ARTS[53396448]: { ARTS[53396448]: - ArrayOfArrayOfIndexCreate ARTS[53396448]: - ArrayOfIndexCreate ARTS[53396448]: - VectorCreate ARTS[53396448]: - ArrayOfIndexCreate ARTS[53396448]: - NumericCreate ARTS[53396448]: - VectorCreate ARTS[53396448]: - IndexCreate ARTS[53396448]: } ARTS[53396448]: Executing /home/simonpf/src/atms_simulations/sensor_atms.arts ARTS[53396448]: { ARTS[53396448]: - MatrixSet ARTS[53396448]: - MatrixSet ARTS[53396448]: - ArrayOfStringSet ARTS[53396448]: - VectorSet ARTS[53396448]: - ArrayOfIndexSet ARTS[53396448]: - VectorSet ARTS[53396448]: - Extract ARTS[53396448]: - nrowsGet ARTS[53396448]: - VectorSetConstant ARTS[53396448]: - Delete ARTS[53396448]: } ARTS[53396448]: Executing /home/simonpf/src/arts_clean/controlfiles/instruments/metmm/sensor_descriptions/apply_metmm.arts ARTS[53396448]: { ARTS[53396448]: - Select ARTS[53396448]: - Select ARTS[53396448]: - Select ARTS[53396448]: - Select ARTS[53396448]: - Select ARTS[53396448]: - f_gridMetMM ARTS[53396448]: - sensor_responseMetMM ARTS[53396448]: } ###Markdown Sampling the a priori ###Code dist = atms.StateDistribution(dataset = "summer") ###Output _____no_output_____ ###Markdown To generate the training data, we simply sample from the a priori distribution, computed the integrated column water vapor and then simulate the corresponding brightness temperatures. ###Code def sample_a_priori(ws, n_samples): cwv = np.zeros(n_samples) y = np.zeros((n_samples, len(channels))) q_profiles = np.zeros((n_samples, ws.p_grid.value.shape[0])) t_profiles = np.zeros((n_samples, ws.p_grid.value.shape[0])) p = ws.p_grid.value for i in range(n_samples): dist.sample(ws) ws.yCalc() cwv[i] = atms.vmr2cd(ws) q_profiles[i,:] = np.copy(ws.vmr_field.value[0, :, 0, 0].ravel()) t_profiles[i, :] = np.copy(ws.t_field.value[:, 0, 0].ravel()) ws.sst = np.maximum(ws.t_field.value[0, 0, 0], 270.0) y[i] = np.copy(ws.y.value) if i % 1000 == 0: print("progress: " + str(i)) return y, cwv, q_profiles, t_profiles ###Output _____no_output_____ ###Markdown Training DataFor the training data $10^6$ samples from the joint a priori distribution of water vapor and temperature profiles and corresponding brightness temperatures are generated. ###Code y, cwv, q_profiles, t_profiles = sample_a_priori(ws, 1000000) np.save("data/x_train" + str(len(channels)) + suffix, y) np.save("data/y_train" + str(len(channels)) + suffix, cwv) ###Output progress: 0 progress: 1000 progress: 2000 progress: 3000 progress: 4000 progress: 5000 progress: 6000 progress: 7000 progress: 8000 progress: 9000 progress: 10000 progress: 11000 progress: 12000 progress: 13000 progress: 14000 progress: 15000 progress: 16000 progress: 17000 progress: 18000 progress: 19000 progress: 20000 progress: 21000 progress: 22000 progress: 23000 progress: 24000 progress: 25000 progress: 26000 progress: 27000 progress: 28000 progress: 29000 progress: 30000 progress: 31000 progress: 32000 progress: 33000 progress: 34000 progress: 35000 progress: 36000 progress: 37000 progress: 38000 progress: 39000 progress: 40000 progress: 41000 progress: 42000 progress: 43000 progress: 44000 progress: 45000 progress: 46000 progress: 47000 progress: 48000 progress: 49000 progress: 50000 progress: 51000 progress: 52000 progress: 53000 progress: 54000 progress: 55000 progress: 56000 progress: 57000 progress: 58000 progress: 59000 progress: 60000 progress: 61000 progress: 62000 progress: 63000 progress: 64000 progress: 65000 progress: 66000 progress: 67000 progress: 68000 progress: 69000 progress: 70000 progress: 71000 progress: 72000 progress: 73000 progress: 74000 progress: 75000 progress: 76000 progress: 77000 progress: 78000 progress: 79000 progress: 80000 progress: 81000 progress: 82000 progress: 83000 progress: 84000 progress: 85000 progress: 86000 progress: 87000 progress: 88000 progress: 89000 progress: 90000 progress: 91000 progress: 92000 progress: 93000 progress: 94000 progress: 95000 progress: 96000 progress: 97000 progress: 98000 progress: 99000 progress: 100000 progress: 101000 progress: 102000 progress: 103000 progress: 104000 progress: 105000 progress: 106000 progress: 107000 progress: 108000 progress: 109000 progress: 110000 progress: 111000 progress: 112000 progress: 113000 progress: 114000 progress: 115000 progress: 116000 progress: 117000 progress: 118000 progress: 119000 progress: 120000 progress: 121000 progress: 122000 progress: 123000 progress: 124000 progress: 125000 progress: 126000 progress: 127000 progress: 128000 progress: 129000 progress: 130000 progress: 131000 progress: 132000 progress: 133000 progress: 134000 progress: 135000 progress: 136000 progress: 137000 progress: 138000 progress: 139000 progress: 140000 progress: 141000 progress: 142000 progress: 143000 progress: 144000 progress: 145000 progress: 146000 progress: 147000 progress: 148000 progress: 149000 progress: 150000 progress: 151000 progress: 152000 progress: 153000 progress: 154000 progress: 155000 progress: 156000 progress: 157000 progress: 158000 progress: 159000 progress: 160000 progress: 161000 progress: 162000 progress: 163000 progress: 164000 progress: 165000 progress: 166000 progress: 167000 progress: 168000 progress: 169000 progress: 170000 progress: 171000 progress: 172000 progress: 173000 progress: 174000 progress: 175000 progress: 176000 progress: 177000 progress: 178000 progress: 179000 progress: 180000 progress: 181000 progress: 182000 progress: 183000 progress: 184000 progress: 185000 progress: 186000 progress: 187000 progress: 188000 progress: 189000 progress: 190000 progress: 191000 progress: 192000 progress: 193000 progress: 194000 progress: 195000 progress: 196000 progress: 197000 progress: 198000 progress: 199000 progress: 200000 progress: 201000 progress: 202000 progress: 203000 progress: 204000 progress: 205000 progress: 206000 progress: 207000 progress: 208000 progress: 209000 progress: 210000 progress: 211000 progress: 212000 progress: 213000 progress: 214000 progress: 215000 progress: 216000 progress: 217000 progress: 218000 progress: 219000 progress: 220000 progress: 221000 progress: 222000 progress: 223000 progress: 224000 progress: 225000 progress: 226000 progress: 227000 progress: 228000 progress: 229000 progress: 230000 progress: 231000 progress: 232000 progress: 233000 progress: 234000 progress: 235000 progress: 236000 progress: 237000 progress: 238000 progress: 239000 progress: 240000 progress: 241000 progress: 242000 progress: 243000 progress: 244000 progress: 245000 progress: 246000 progress: 247000 progress: 248000 progress: 249000 progress: 250000 progress: 251000 progress: 252000 progress: 253000 progress: 254000 progress: 255000 progress: 256000 progress: 257000 progress: 258000 progress: 259000 progress: 260000 progress: 261000 progress: 262000 progress: 263000 progress: 264000 progress: 265000 progress: 266000 progress: 267000 progress: 268000 progress: 269000 progress: 270000 progress: 271000 progress: 272000 progress: 273000 progress: 274000 progress: 275000 progress: 276000 progress: 277000 progress: 278000 progress: 279000 progress: 280000 progress: 281000 progress: 282000 progress: 283000 progress: 284000 progress: 285000 progress: 286000 progress: 287000 progress: 288000 progress: 289000 progress: 290000 progress: 291000 progress: 292000 progress: 293000 progress: 294000 progress: 295000 progress: 296000 progress: 297000 progress: 298000 progress: 299000 progress: 300000 progress: 301000 progress: 302000 progress: 303000 progress: 304000 progress: 305000 progress: 306000 progress: 307000 progress: 308000 progress: 309000 progress: 310000 progress: 311000 progress: 312000 progress: 313000 progress: 314000 progress: 315000 progress: 316000 progress: 317000 progress: 318000 progress: 319000 progress: 320000 progress: 321000 progress: 322000 progress: 323000 progress: 324000 progress: 325000 progress: 326000 progress: 327000 progress: 328000 progress: 329000 progress: 330000 progress: 331000 progress: 332000 progress: 333000 progress: 334000 progress: 335000 progress: 336000 progress: 337000 progress: 338000 progress: 339000 progress: 340000 progress: 341000 progress: 342000 progress: 343000 progress: 344000 progress: 345000 progress: 346000 progress: 347000 progress: 348000 progress: 349000 progress: 350000 progress: 351000 progress: 352000 progress: 353000 progress: 354000 progress: 355000 progress: 356000 progress: 357000 progress: 358000 progress: 359000 progress: 360000 progress: 361000 progress: 362000 progress: 363000 progress: 364000 progress: 365000 progress: 366000 progress: 367000 progress: 368000 progress: 369000 progress: 370000 progress: 371000 progress: 372000 progress: 373000 progress: 374000 progress: 375000 progress: 376000 progress: 377000 progress: 378000 progress: 379000 progress: 380000 progress: 381000 progress: 382000 progress: 383000 progress: 384000 progress: 385000 progress: 386000 progress: 387000 progress: 388000 progress: 389000 progress: 390000 progress: 391000 progress: 392000 progress: 393000 progress: 394000 progress: 395000 progress: 396000 progress: 397000 progress: 398000 progress: 399000 progress: 400000 progress: 401000 progress: 402000 progress: 403000 progress: 404000 progress: 405000 progress: 406000 progress: 407000 progress: 408000 progress: 409000 progress: 410000 progress: 411000 progress: 412000 progress: 413000 progress: 414000 progress: 415000 progress: 416000 progress: 417000 progress: 418000 progress: 419000 progress: 420000 progress: 421000 progress: 422000 progress: 423000 progress: 424000 progress: 425000 progress: 426000 progress: 427000 progress: 428000 progress: 429000 progress: 430000 progress: 431000 progress: 432000 progress: 433000 progress: 434000 progress: 435000 progress: 436000 progress: 437000 progress: 438000 progress: 439000 progress: 440000 progress: 441000 progress: 442000 progress: 443000 progress: 444000 progress: 445000 progress: 446000 progress: 447000 progress: 448000 progress: 449000 progress: 450000 progress: 451000 progress: 452000 progress: 453000 progress: 454000 progress: 455000 progress: 456000 progress: 457000 progress: 458000 progress: 459000 progress: 460000 progress: 461000 progress: 462000 progress: 463000 progress: 464000 progress: 465000 progress: 466000 progress: 467000 progress: 468000 progress: 469000 progress: 470000 progress: 471000 progress: 472000 progress: 473000 progress: 474000 progress: 475000 progress: 476000 progress: 477000 progress: 478000 progress: 479000 progress: 480000 progress: 481000 progress: 482000 progress: 483000 progress: 484000 progress: 485000 progress: 486000 progress: 487000 progress: 488000 ###Markdown Test DataFor the test data $10^5$ samples from the joint a priori distribution of water vapor and temperature profils and corresponding brightness temperatures are generated. ###Code y, cwv, q_profiles, t_profiles = sample_a_priori(ws, 1000000) np.save("data/x_train" + str(len(channels)) + suffix, y) np.save("data/y_train" + str(len(channels)) + suffix, cwv) ###Output _____no_output_____ ###Markdown Statistics ###Code q_mean = np.load("data/q_mean.npy") t_mean = np.load("data/t_mean.npy") f, axs = plt.subplots(1, 2) ps = np.arange(1, 28)[::-1] axs[0].set_ylabel("Pressure Level") axs[0].set_xlabel("q [kg / kg]") axs[0].set_title("Specific Humidity") axs[0].invert_yaxis() axs[0].plot(q_mean.ravel()[::-1], p, c = 'b') for i in range(1000): ind = np.random.randint(0, q_profiles.shape[0]) axs[0].plot(q_profiles[ind, :].ravel() / 28.0 * 18.0, p, c = 'b', alpha = 0.01) axs[0].set_xlim([-0.001, 0.03]) axs[1].set_ylabel("Pressure Level") axs[1].set_xlabel("t [K]") axs[1].set_title("Temperature") axs[1].invert_yaxis() axs[1].plot(t_mean.ravel()[::-1], p, c = 'r') for i in range(1000): ind = np.random.randint(0, t_profiles.shape[0]) axs[1].plot(t_profiles[ind, :].ravel(), p, c = 'r', alpha = 0.01) axs[1].set_xlim([180, 320]) plt.tight_layout() ###Output _____no_output_____ ###Markdown Distribution of Brightness Temperatures ###Code f, axs = plt.subplots(len(channels) // 2 + 1, 2) for i, ax in enumerate([ax for l in axs for ax in l]): if i >= len(channels): ax.set_visible(False) else: bins = np.linspace(y[:,i].min(), y[:,i].max(), 41) ax.hist(y[:,i], normed=True, bins=bins) ax.set_xlabel("$T_B$") f = ws.y_f.value[i] * 1e-9 ax.set_title("Channel " + str(i) + ", " + str(f) + " GHz") plt.tight_layout() ###Output _____no_output_____ ###Markdown CWV Distribution ###Code from netCDF4 import Dataset rootgrp = Dataset("era_interim_mid_latitudes_2016_sst_cwv.nc") cwv_grid = rootgrp.variables['tcwv'] bins = np.logspace(-1, 2.5, 51) plt.hist(np.asarray(cwv_grid).ravel(), bins = bins, label = "ERA Interim", alpha = 0.7, normed = True) plt.hist(cwv, bins = bins, alpha = 0.7, normed = True, label = "Fitted") plt.xscale("log") plt.xlabel("CWV [$kg / m^2$]") plt.title("CWV Distribution") ###Output _____no_output_____
Character_level_LSTM/Character_Level_RNN_Exercise.ipynb
###Markdown Character-Level LSTM in PyTorchIn this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN. ###Code import numpy as np import torch from torch import nn import torch.nn.functional as F ###Output _____no_output_____ ###Markdown Load in DataThen, we'll load the Anna Karenina text file and convert it into integers for our network to use. ###Code # open text file and read in data as `text` with open('../input/data.txt', 'r') as f: text = f.read() ###Output _____no_output_____ ###Markdown Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever. ###Code text[:100] ###Output _____no_output_____ ###Markdown TokenizationIn the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network. ###Code # encode the text and map each character to an integer and vice versa # we create two dictionaries: # 1. int2char, which maps integers to characters # 2. char2int, which maps characters to unique integers chars = tuple(set(text)) int2char = dict(enumerate(chars)) char2int = {ch: ii for ii, ch in int2char.items()} # encode the text encoded = np.array([char2int[ch] for ch in text]) ###Output _____no_output_____ ###Markdown And we can see those same characters from above, encoded as integers. ###Code encoded[:100] ###Output _____no_output_____ ###Markdown Pre-processing the dataAs you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that! ###Code def one_hot_encode(arr, n_labels): # Initialize the the encoded array one_hot = np.zeros((arr.size, n_labels), dtype=np.float32) # Fill the appropriate elements with ones one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1. # Finally reshape it to get back to the original array one_hot = one_hot.reshape((*arr.shape, n_labels)) return one_hot # check that the function works as expected test_seq = np.array([[3, 5, 1, 2]]) one_hot = one_hot_encode(test_seq, 8) print(one_hot) def get_batches(arr, batch_size, seq_length): '''Create a generator that returns batches of size batch_size x seq_length from arr. Arguments --------- arr: Array you want to make batches from batch_size: Batch size, the number of sequences per batch seq_length: Number of encoded chars in a sequence ''' batch_size_total = batch_size*seq_length # Get the number of batches we can make n_batches = len(arr)//(batch_size_total) # Keep only enough characters to make full batches arr = arr[:batch_size_total*n_batches] #Reshape into batch_size rows arr = arr.reshape(batch_size,-1) ## Iterate over the batches using a window of size seq_length for n in range(0, arr.shape[1], seq_length): # The features x = arr[:,n:n+seq_length] # The targets, shifted by one y = np.zeros_like(x) try: y[:,:-1],y[:,-1] = x[:,1:], arr[:,n+seq_length] except IndexError: y[:,:-1],y[:,-1] = x[:,1:], arr[:,0] yield x, y ###Output _____no_output_____ ###Markdown Test Your ImplementationNow I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps. ###Code batches = get_batches(encoded, 8, 50) x, y = next(batches) # printing out the first 10 items in a sequence print('x\n', x[:10, :10]) print('\ny\n', y[:10, :10]) ###Output x [[ 20 94 67 96 29 98 72 13 13 27] [ 94 113 67 49 72 94 110 72 38 29] [ 72 37 94 25 15 94 113 81 72 74] [ 15 94 15 113 51 29 98 112 72 94] [ 99 32 15 72 38 12 52 29 51 29] [112 12 32 72 94 67 29 72 94 110] [110 38 12 25 72 94 81 97 72 105] [ 12 81 72 52 38 29 81 72 38 29]] y [[ 94 67 96 29 98 72 13 13 27 67] [113 67 49 72 94 110 72 38 29 72] [ 37 94 25 15 94 113 81 72 74 62] [ 94 15 113 51 29 98 112 72 94 81] [ 32 15 72 38 12 52 29 51 29 67] [ 12 32 72 94 67 29 72 94 110 16] [ 38 12 25 72 94 81 97 72 105 12] [ 81 72 52 38 29 81 72 38 29 72]] ###Markdown If you implemented `get_batches` correctly, the above output should look something like ```x [[25 8 60 11 45 27 28 73 1 2] [17 7 20 73 45 8 60 45 73 60] [27 20 80 73 7 28 73 60 73 65] [17 73 45 8 27 73 66 8 46 27] [73 17 60 12 73 8 27 28 73 45] [66 64 17 17 46 7 20 73 60 20] [73 76 20 20 60 73 8 60 80 73] [47 35 43 7 20 17 24 50 37 73]]y [[ 8 60 11 45 27 28 73 1 2 2] [ 7 20 73 45 8 60 45 73 60 45] [20 80 73 7 28 73 60 73 65 7] [73 45 8 27 73 66 8 46 27 65] [17 60 12 73 8 27 28 73 45 27] [64 17 17 46 7 20 73 60 20 80] [76 20 20 60 73 8 60 80 73 17] [35 43 7 20 17 24 50 37 73 36]] ``` although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`. --- Defining the network with PyTorchBelow is where you'll define the network.Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters. Model StructureIn `__init__` the suggested structure is as follows:* Create and store the necessary dictionaries (this has been done for you)* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)* Define a dropout layer with `drop_prob`* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)* Finally, initialize the weights (again, this has been given)Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`. ###Code # check if GPU is available train_on_gpu = torch.cuda.is_available() if(train_on_gpu): print('Training on GPU!') else: print('No GPU available, training on CPU; consider making n_epochs very small.') class CharRNN(nn.Module): def __init__(self, tokens, n_hidden=256, n_layers=2, drop_prob=0.5, lr=0.001): super().__init__() self.drop_prob = drop_prob self.n_layers = n_layers self.n_hidden = n_hidden self.lr = lr # creating character dictionaries self.chars = tokens self.int2char = dict(enumerate(self.chars)) self.char2int = {ch: ii for ii, ch in self.int2char.items()} # defining the layers of the model self.lstm = nn.LSTM(len(self.chars),n_hidden,n_layers,dropout=drop_prob,batch_first=True) self.dropout = nn.Dropout(drop_prob) self.fc = nn.Linear(n_hidden,len(self.chars)) def forward(self, x, hidden): ''' Forward pass through the network. These inputs are x, and the hidden/cell state `hidden`. ''' ##Getting the outputs and the new hidden state from the lstm r_output, hidden = self.lstm(x, hidden) ## passing through the dropout layer out = self.dropout(r_output) # Stack up LSTM outputs using view # you may need to use contiguous to reshape the output out = out.contiguous().view(-1, self.n_hidden) out = self.fc(out) # return the final output and the hidden state return out, hidden def init_hidden(self, batch_size): ''' Initializes hidden state ''' # Create two new tensors with sizes n_layers x batch_size x n_hidden, # initialized to zero, for hidden state and cell state of LSTM weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(), weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(), weight.new(self.n_layers, batch_size, self.n_hidden).zero_()) return hidden ###Output _____no_output_____ ###Markdown Time to trainA couple of details about training: >* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients. ###Code def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10): ''' Training a network Arguments --------- net: CharRNN network data: text data to train the network epochs: Number of epochs to train batch_size: Number of mini-sequences per mini-batch, aka batch size seq_length: Number of character steps per mini-batch lr: learning rate clip: gradient clipping val_frac: Fraction of data to hold out for validation print_every: Number of steps for printing training and validation loss ''' net.train() opt = torch.optim.Adam(net.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() # create training and validation data val_idx = int(len(data)*(1-val_frac)) data, val_data = data[:val_idx], data[val_idx:] if(train_on_gpu): net.cuda() counter = 0 n_chars = len(net.chars) for e in range(epochs): # initialize hidden state h = net.init_hidden(batch_size) for x, y in get_batches(data, batch_size, seq_length): counter += 1 # One-hot encode our data and make them Torch tensors x = one_hot_encode(x, n_chars) inputs, targets = torch.from_numpy(x), torch.from_numpy(y) if(train_on_gpu): inputs, targets = inputs.cuda(), targets.cuda() # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history h = tuple([each.data for each in h]) # zero accumulated gradients net.zero_grad() # get the output from the model output, h = net(inputs, h) # calculate the loss and perform backprop loss = criterion(output, targets.view(batch_size*seq_length).long()) loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. nn.utils.clip_grad_norm_(net.parameters(), clip) opt.step() # loss stats if counter % print_every == 0: # Get validation loss val_h = net.init_hidden(batch_size) val_losses = [] net.eval() for x, y in get_batches(val_data, batch_size, seq_length): # One-hot encode our data and make them Torch tensors x = one_hot_encode(x, n_chars) x, y = torch.from_numpy(x), torch.from_numpy(y) # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history val_h = tuple([each.data for each in val_h]) inputs, targets = x, y if(train_on_gpu): inputs, targets = inputs.cuda(), targets.cuda() output, val_h = net(inputs, val_h) val_loss = criterion(output, targets.view(batch_size*seq_length).long()) val_losses.append(val_loss.item()) net.train() # reset to train mode after iterationg through validation data print("Epoch: {}/{}...".format(e+1, epochs), "Step: {}...".format(counter), "Loss: {:.4f}...".format(loss.item()), "Val Loss: {:.4f}".format(np.mean(val_losses))) ###Output _____no_output_____ ###Markdown Instantiating the modelNow we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training! ###Code ## TODO: set your model hyperparameters # define and print the net n_hidden= 512 n_layers= 2 net = CharRNN(chars, n_hidden, n_layers) print(net) ###Output CharRNN( (lstm): LSTM(117, 512, num_layers=2, batch_first=True, dropout=0.5) (dropout): Dropout(p=0.5, inplace=False) (fc): Linear(in_features=512, out_features=117, bias=True) ) ###Markdown Set your training hyperparameters! ###Code batch_size = 256 seq_length = 120 n_epochs = 25 # train the model train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10) ###Output Epoch: 1/25... Step: 10... Loss: 2.7027... Val Loss: 2.6824 Epoch: 1/25... Step: 20... Loss: 2.6177... Val Loss: 2.6227 Epoch: 1/25... Step: 30... Loss: 2.5845... Val Loss: 2.6054 Epoch: 1/25... Step: 40... Loss: 2.5659... Val Loss: 2.5750 Epoch: 1/25... Step: 50... Loss: 2.5402... Val Loss: 2.5535 Epoch: 1/25... Step: 60... Loss: 2.5080... Val Loss: 2.5313 Epoch: 1/25... Step: 70... Loss: 2.5019... Val Loss: 2.5111 Epoch: 1/25... Step: 80... Loss: 2.4924... Val Loss: 2.4885 Epoch: 1/25... Step: 90... Loss: 2.4727... Val Loss: 2.4664 Epoch: 1/25... Step: 100... Loss: 2.4444... Val Loss: 2.4445 Epoch: 1/25... Step: 110... Loss: 2.4233... Val Loss: 2.4268 Epoch: 1/25... Step: 120... Loss: 2.3961... Val Loss: 2.4005 Epoch: 1/25... Step: 130... Loss: 2.3715... Val Loss: 2.3815 Epoch: 1/25... Step: 140... Loss: 2.3392... Val Loss: 2.3610 Epoch: 1/25... Step: 150... Loss: 2.3326... Val Loss: 2.3391 Epoch: 1/25... Step: 160... Loss: 2.2963... Val Loss: 2.3131 Epoch: 2/25... Step: 170... Loss: 2.2897... Val Loss: 2.2997 Epoch: 2/25... Step: 180... Loss: 2.2875... Val Loss: 2.2722 Epoch: 2/25... Step: 190... Loss: 2.2474... Val Loss: 2.2482 Epoch: 2/25... Step: 200... Loss: 2.2288... Val Loss: 2.2272 Epoch: 2/25... Step: 210... Loss: 2.2274... Val Loss: 2.2057 Epoch: 2/25... Step: 220... Loss: 2.2042... Val Loss: 2.1907 Epoch: 2/25... Step: 230... Loss: 2.1786... Val Loss: 2.1692 Epoch: 2/25... Step: 240... Loss: 2.1658... Val Loss: 2.1568 Epoch: 2/25... Step: 250... Loss: 2.1488... Val Loss: 2.1422 Epoch: 2/25... Step: 260... Loss: 2.1354... Val Loss: 2.1216 Epoch: 2/25... Step: 270... Loss: 2.1022... Val Loss: 2.0986 Epoch: 2/25... Step: 280... Loss: 2.1048... Val Loss: 2.0813 Epoch: 2/25... Step: 290... Loss: 2.0978... Val Loss: 2.0613 Epoch: 2/25... Step: 300... Loss: 2.0543... Val Loss: 2.0453 Epoch: 2/25... Step: 310... Loss: 2.0531... Val Loss: 2.0322 Epoch: 2/25... Step: 320... Loss: 2.0295... Val Loss: 2.0179 Epoch: 2/25... Step: 330... Loss: 2.0371... Val Loss: 2.0021 Epoch: 3/25... Step: 340... Loss: 2.0115... Val Loss: 1.9830 Epoch: 3/25... Step: 350... Loss: 1.9992... Val Loss: 1.9713 Epoch: 3/25... Step: 360... Loss: 1.9732... Val Loss: 1.9597 Epoch: 3/25... Step: 370... Loss: 1.9483... Val Loss: 1.9503 Epoch: 3/25... Step: 380... Loss: 1.9475... Val Loss: 1.9410 Epoch: 3/25... Step: 390... Loss: 1.9256... Val Loss: 1.9272 Epoch: 3/25... Step: 400... Loss: 1.9217... Val Loss: 1.9188 Epoch: 3/25... Step: 410... Loss: 1.9241... Val Loss: 1.9094 Epoch: 3/25... Step: 420... Loss: 1.8889... Val Loss: 1.8912 Epoch: 3/25... Step: 430... Loss: 1.8968... Val Loss: 1.8813 Epoch: 3/25... Step: 440... Loss: 1.9008... Val Loss: 1.8706 Epoch: 3/25... Step: 450... Loss: 1.8894... Val Loss: 1.8594 Epoch: 3/25... Step: 460... Loss: 1.8675... Val Loss: 1.8482 Epoch: 3/25... Step: 470... Loss: 1.8425... Val Loss: 1.8400 Epoch: 3/25... Step: 480... Loss: 1.8424... Val Loss: 1.8291 Epoch: 3/25... Step: 490... Loss: 1.8323... Val Loss: 1.8236 Epoch: 4/25... Step: 500... Loss: 1.8096... Val Loss: 1.8092 Epoch: 4/25... Step: 510... Loss: 1.8081... Val Loss: 1.7995 Epoch: 4/25... Step: 520... Loss: 1.8032... Val Loss: 1.7935 Epoch: 4/25... Step: 530... Loss: 1.8034... Val Loss: 1.7838 Epoch: 4/25... Step: 540... Loss: 1.8199... Val Loss: 1.7794 Epoch: 4/25... Step: 550... Loss: 1.7959... Val Loss: 1.7711 Epoch: 4/25... Step: 560... Loss: 1.7604... Val Loss: 1.7649 Epoch: 4/25... Step: 570... Loss: 1.7840... Val Loss: 1.7575 Epoch: 4/25... Step: 580... Loss: 1.7640... Val Loss: 1.7480 Epoch: 4/25... Step: 590... Loss: 1.7518... Val Loss: 1.7380 Epoch: 4/25... Step: 600... Loss: 1.7396... Val Loss: 1.7302 Epoch: 4/25... Step: 610... Loss: 1.7432... Val Loss: 1.7241 Epoch: 4/25... Step: 620... Loss: 1.7486... Val Loss: 1.7134 Epoch: 4/25... Step: 630... Loss: 1.7208... Val Loss: 1.7080 Epoch: 4/25... Step: 640... Loss: 1.7269... Val Loss: 1.7046 Epoch: 4/25... Step: 650... Loss: 1.7149... Val Loss: 1.7026 Epoch: 4/25... Step: 660... Loss: 1.7428... Val Loss: 1.6929 Epoch: 5/25... Step: 670... Loss: 1.7061... Val Loss: 1.6801 Epoch: 5/25... Step: 680... Loss: 1.7054... Val Loss: 1.6741 Epoch: 5/25... Step: 690... Loss: 1.6786... Val Loss: 1.6696 Epoch: 5/25... Step: 700... Loss: 1.6813... Val Loss: 1.6688 Epoch: 5/25... Step: 710... Loss: 1.6827... Val Loss: 1.6620 Epoch: 5/25... Step: 720... Loss: 1.6694... Val Loss: 1.6584 Epoch: 5/25... Step: 730... Loss: 1.6741... Val Loss: 1.6535 Epoch: 5/25... Step: 740... Loss: 1.6656... Val Loss: 1.6439 Epoch: 5/25... Step: 750... Loss: 1.6308... Val Loss: 1.6358 Epoch: 5/25... Step: 760... Loss: 1.6405... Val Loss: 1.6241 Epoch: 5/25... Step: 770... Loss: 1.6570... Val Loss: 1.6246 Epoch: 5/25... Step: 780... Loss: 1.6685... Val Loss: 1.6164 Epoch: 5/25... Step: 790... Loss: 1.6337... Val Loss: 1.6096 Epoch: 5/25... Step: 800... Loss: 1.6238... Val Loss: 1.6058 Epoch: 5/25... Step: 810... Loss: 1.6263... Val Loss: 1.6020 Epoch: 5/25... Step: 820... Loss: 1.6188... Val Loss: 1.5990 Epoch: 6/25... Step: 830... Loss: 1.5977... Val Loss: 1.5912 Epoch: 6/25... Step: 840... Loss: 1.5994... Val Loss: 1.5853 Epoch: 6/25... Step: 850... Loss: 1.6057... Val Loss: 1.5810 Epoch: 6/25... Step: 860... Loss: 1.6177... Val Loss: 1.5761 Epoch: 6/25... Step: 870... Loss: 1.6297... Val Loss: 1.5729 Epoch: 6/25... Step: 880... Loss: 1.5974... Val Loss: 1.5833 Epoch: 6/25... Step: 890... Loss: 1.5752... Val Loss: 1.5733 Epoch: 6/25... Step: 900... Loss: 1.6096... Val Loss: 1.5598 Epoch: 6/25... Step: 910... Loss: 1.5782... Val Loss: 1.5509 Epoch: 6/25... Step: 920... Loss: 1.5703... Val Loss: 1.5467 Epoch: 6/25... Step: 930... Loss: 1.5620... Val Loss: 1.5478 Epoch: 6/25... Step: 940... Loss: 1.5694... Val Loss: 1.5377 Epoch: 6/25... Step: 950... Loss: 1.5695... Val Loss: 1.5356 Epoch: 6/25... Step: 960... Loss: 1.5567... Val Loss: 1.5313 Epoch: 6/25... Step: 970... Loss: 1.5632... Val Loss: 1.5305 Epoch: 6/25... Step: 980... Loss: 1.5533... Val Loss: 1.5306 Epoch: 6/25... Step: 990... Loss: 1.5906... Val Loss: 1.5285 Epoch: 7/25... Step: 1000... Loss: 1.5538... Val Loss: 1.5168 Epoch: 7/25... Step: 1010... Loss: 1.5559... Val Loss: 1.5157 Epoch: 7/25... Step: 1020... Loss: 1.5247... Val Loss: 1.5112 Epoch: 7/25... Step: 1030... Loss: 1.5299... Val Loss: 1.5093 Epoch: 7/25... Step: 1040... Loss: 1.5427... Val Loss: 1.5029 Epoch: 7/25... Step: 1050... Loss: 1.5288... Val Loss: 1.5059 Epoch: 7/25... Step: 1060... Loss: 1.5293... Val Loss: 1.5028 Epoch: 7/25... Step: 1070... Loss: 1.5208... Val Loss: 1.5046 Epoch: 7/25... Step: 1080... Loss: 1.4898... Val Loss: 1.4965 Epoch: 7/25... Step: 1090... Loss: 1.5087... Val Loss: 1.4880 Epoch: 7/25... Step: 1100... Loss: 1.5210... Val Loss: 1.4878 Epoch: 7/25... Step: 1110... Loss: 1.5333... Val Loss: 1.4853 Epoch: 7/25... Step: 1120... Loss: 1.5132... Val Loss: 1.4885 Epoch: 7/25... Step: 1130... Loss: 1.4978... Val Loss: 1.4914 Epoch: 7/25... Step: 1140... Loss: 1.5032... Val Loss: 1.4834 Epoch: 7/25... Step: 1150... Loss: 1.4936... Val Loss: 1.4751 Epoch: 8/25... Step: 1160... Loss: 1.4869... Val Loss: 1.4713 Epoch: 8/25... Step: 1170... Loss: 1.4907... Val Loss: 1.4749 Epoch: 8/25... Step: 1180... Loss: 1.4868... Val Loss: 1.4764 Epoch: 8/25... Step: 1190... Loss: 1.5036... Val Loss: 1.4658 Epoch: 8/25... Step: 1200... Loss: 1.5197... Val Loss: 1.4632 Epoch: 8/25... Step: 1210... Loss: 1.4871... Val Loss: 1.4630 Epoch: 8/25... Step: 1220... Loss: 1.4711... Val Loss: 1.4608 Epoch: 8/25... Step: 1230... Loss: 1.5064... Val Loss: 1.4628 Epoch: 8/25... Step: 1240... Loss: 1.4782... Val Loss: 1.4598 Epoch: 8/25... Step: 1250... Loss: 1.4665... Val Loss: 1.4607 Epoch: 8/25... Step: 1260... Loss: 1.4634... Val Loss: 1.4522 Epoch: 8/25... Step: 1270... Loss: 1.4735... Val Loss: 1.4459 Epoch: 8/25... Step: 1280... Loss: 1.4670... Val Loss: 1.4448 Epoch: 8/25... Step: 1290... Loss: 1.4569... Val Loss: 1.4417 Epoch: 8/25... Step: 1300... Loss: 1.4685... Val Loss: 1.4414 Epoch: 8/25... Step: 1310... Loss: 1.4601... Val Loss: 1.4382 Epoch: 8/25... Step: 1320... Loss: 1.5010... Val Loss: 1.4373 Epoch: 9/25... Step: 1330... Loss: 1.4629... Val Loss: 1.4317 Epoch: 9/25... Step: 1340... Loss: 1.4650... Val Loss: 1.4310 ###Markdown Getting the best modelTo set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network. CheckpointAfter training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters. ###Code # change the name, for saving multiple files model_name = 'rnn_x_epoch.net' checkpoint = {'n_hidden': net.n_hidden, 'n_layers': net.n_layers, 'state_dict': net.state_dict(), 'tokens': net.chars} with open(model_name, 'wb') as f: torch.save(checkpoint, f) def predict(net, char, h=None, top_k=None): ''' Given a character, predict the next character. Returns the predicted character and the hidden state. ''' # tensor inputs x = np.array([[net.char2int[char]]]) x = one_hot_encode(x, len(net.chars)) inputs = torch.from_numpy(x) if(train_on_gpu): inputs = inputs.cuda() # detach hidden state from history h = tuple([each.data for each in h]) # get the output of the model out, h = net(inputs, h) # get the character probabilities p = F.softmax(out, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # get top characters if top_k is None: top_ch = np.arange(len(net.chars)) else: p, top_ch = p.topk(top_k) top_ch = top_ch.numpy().squeeze() # select the likely next character with some element of randomness p = p.numpy().squeeze() char = np.random.choice(top_ch, p=p/p.sum()) # return the encoded value of the predicted char and the hidden state return net.int2char[char], h ###Output _____no_output_____ ###Markdown Priming and generating text Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from. ###Code def sample(net, size, prime='The', top_k=None): if(train_on_gpu): net.cuda() else: net.cpu() net.eval() # eval mode # First off, run through the prime characters chars = [ch for ch in prime] h = net.init_hidden(1) for ch in prime: char, h = predict(net, ch, h, top_k=top_k) chars.append(char) # Now pass in the previous character and get a new one for ii in range(size): char, h = predict(net, chars[-1], h, top_k=top_k) chars.append(char) return ''.join(chars) print(sample(net, 1000, prime='I am ', top_k=5)) ###Output I am often as if his marked her activity of the distant mind, and see that she was the salary and that it was to bring the stop in the window. He did not have been those that is traced in the soldiers, the memery there seemed to him to that through his father. And as shoot on the cruelty, when her house stopped him with the sense of a certain serious and stoppings and watching her feet with a sentence where their hands and the position was now, and to them hardly. But the price of the son of harrows, at the son of whom he was continually forgetting there, and was in which he was stringed into the dream. "You can shall be already as able to mon horse, there's not," she said. "All you have been at a latter, but I won't be in the sense of the sound." "That's not always." "This most still have the children." "Why do you know, they have as an intention of society," he said angrily about her, and was an excitement so much stringling. "Oh, you'll be to do at her? You've been in the position o ###Markdown Loading a checkpoint ###Code # Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net` with open('rnn_x_epoch.net', 'rb') as f: checkpoint = torch.load(f) loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers']) loaded.load_state_dict(checkpoint['state_dict']) # Sample using a loaded model print(sample(loaded, 400, top_k=5, prime="Once he ")) ###Output Once he came to the manner of her bedroom of her son. He was the same thing in the classing of the party, as
02_deep_learning/transfer-learning/transfer_learning_tutorial.ipynb
###Markdown Transfer Learning Tutorial==========================**Author**: `Sasank Chilamkurthy `_In this tutorial, you will learn how to train your network usingtransfer learning. You can read more about the transfer learning at `cs231nnotes `__Quoting these notes, In practice, very few people train an entire Convolutional Network from scratch (with random initialization), because it is relatively rare to have a dataset of sufficient size. Instead, it is common to pretrain a ConvNet on a very large dataset (e.g. ImageNet, which contains 1.2 million images with 1000 categories), and then use the ConvNet either as an initialization or a fixed feature extractor for the task of interest.These two major transfer learning scenarios look as follows:- **Finetuning the convnet**: Instead of random initializaion, we initialize the network with a pretrained network, like the one that is trained on imagenet 1000 dataset. Rest of the training looks as usual.- **ConvNet as fixed feature extractor**: Here, we will freeze the weights for all of the network except that of the final fully connected layer. This last fully connected layer is replaced with a new one with random weights and only this layer is trained. ###Code # License: BSD # Author: Sasank Chilamkurthy from __future__ import print_function, division import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import os import copy plt.ion() # interactive mode ###Output _____no_output_____ ###Markdown Load Data---------We will use torchvision and torch.utils.data packages for loading thedata.The problem we're going to solve today is to train a model to classify**ants** and **bees**. We have about 120 training images each for ants and bees.There are 75 validation images for each class. Usually, this is a verysmall dataset to generalize upon, if trained from scratch. Since weare using transfer learning, we should be able to generalize reasonablywell.This dataset is a very small subset of imagenet... Note :: Download the data from `here `_ and extract it to the current directory. ###Code # Data augmentation and normalization for training # Just normalization for validation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'hymenoptera_data' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ###Output _____no_output_____ ###Markdown Visualize a few images^^^^^^^^^^^^^^^^^^^^^^Let's visualize a few training images so as to understand the dataaugmentations. ###Code def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, classes = next(iter(dataloaders['train'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out, title=[class_names[x] for x in classes]) ###Output _____no_output_____ ###Markdown Training the model------------------Now, let's write a general function to train a model. Here, we willillustrate:- Scheduling the learning rate- Saving the best modelIn the following, parameter ``scheduler`` is an LR scheduler object from``torch.optim.lr_scheduler``. ###Code def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() scheduler.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model ###Output _____no_output_____ ###Markdown Visualizing the model predictions^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Generic function to display predictions for a few images ###Code def visualize_model(model, num_images=6): was_training = model.training model.eval() images_so_far = 0 fig = plt.figure() with torch.no_grad(): for i, (inputs, labels) in enumerate(dataloaders['val']): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('predicted: {}'.format(class_names[preds[j]])) imshow(inputs.cpu().data[j]) if images_so_far == num_images: model.train(mode=was_training) return model.train(mode=was_training) ###Output _____no_output_____ ###Markdown Finetuning the convnet----------------------Load a pretrained model and reset final fully connected layer. ###Code model_ft = models.resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, 2) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() # Observe that all parameters are being optimized optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) ###Output Downloading: "https://download.pytorch.org/models/resnet18-5c106cde.pth" to C:\Users\tgoral/.torch\models\resnet18-5c106cde.pth 46827520it [00:42, 1095686.14it/s] ###Markdown Train and evaluate^^^^^^^^^^^^^^^^^^It should take around 15-25 min on CPU. On GPU though, it takes less than aminute. ###Code model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25) visualize_model(model_ft) ###Output _____no_output_____ ###Markdown ConvNet as fixed feature extractor----------------------------------Here, we need to freeze all the network except the final layer. We needto set ``requires_grad == False`` to freeze the parameters so that thegradients are not computed in ``backward()``.You can read more about this in the documentation`here `__. ###Code model_conv = torchvision.models.resnet18(pretrained=True) for param in model_conv.parameters(): param.requires_grad = False # Parameters of newly constructed modules have requires_grad=True by default num_ftrs = model_conv.fc.in_features model_conv.fc = nn.Linear(num_ftrs, 2) model_conv = model_conv.to(device) criterion = nn.CrossEntropyLoss() # Observe that only parameters of final layer are being optimized as # opposed to before. optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1) ###Output _____no_output_____ ###Markdown Train and evaluate^^^^^^^^^^^^^^^^^^On CPU this will take about half the time compared to previous scenario.This is expected as gradients don't need to be computed for most of thenetwork. However, forward does need to be computed. ###Code model_conv = train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=25) visualize_model(model_conv) plt.ioff() plt.show() ###Output _____no_output_____
quant_finance_lectures/Lecture34-Factor-Risk-Exposure.ipynb
###Markdown © Copyright Quantopian Inc.© Modifications Copyright QuantRocket LLCLicensed under the [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/legalcode).Disclaimer Factor Risk ExposureBy Evgenia "Jenny" Nitishinskaya, Delaney Granizo-Mackenzie, and Maxwell Margenot. DISCLAIMERAs always, this analysis is based on historical data, and risk exposures estimated on historical data may or may not affect the exposures going forward. As such, computing the risk exposure of a factor is not enough. You must put confidence bounds on that risk exposure, and determine whether the risk exposure can even be modeled reasonably. For more information on this, please see our other lectures, especially Instability of Parameter Estimates. Using Factor Models to Determine Risk ExposureWe can use factor models to analyze the sources of risks and returns in portfolios. Recall that a factor model expresses the returns as$$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \ldots + b_{iK} F_K + \epsilon_i$$By modelling the historical returns, we can see how much of them is due to speculation on different factors and how much to asset-specific fluctuations ($\epsilon_p$). We can also examine what sources of risk the portfolio is exposed to. In risk analysis, we often model active returns (returns relative to a benchmark) and active risk (standard deviation of active returns, also known as tracking error or tracking risk).For instance, we can find a factor's marginal contribution to active risk squared (FMCAR). For factor $j$, this is$$ \text{FMCAR}_j = \frac{b_j^a \sum_{i=1}^K b_i^a Cov(F_j, F_i)}{(\text{Active risk})^2} $$where $b_i^a$ is the portfolio's active exposure to factor $i$. This tells us how much risk we incur by being exposed to factor $j$, given all the other factors we're already exposed to.Fundamental factor models are often used to evaluate portfolios because they correspond directly to investment choices (e.g. whether we invest in small-cap or large-cap stocks, etc.). Below, we construct a model to evaluate a single asset; for more information on the model construction, check out the fundamental factor models notebook.We'll use the canonical Fama-French factors for this example, which are the returns of portfolios constructred based on fundamental factors. How many factors do you want?In the Arbitrage Pricing Theory lecture we mention that for predictive models you want fewer parameters. However, this doesn't quite hold for risk exposure. Instead of trying to not overfit a predictive model, you are looking for any possible risk factor that could be influencing your returns. Therefore it's actually safer to estimate exposure to many many risk factors to see if any stick. Anything left over in our $\alpha$ is risk exposure that is currently unexplained by the selected factors. You want your strategy's return stream to be all alpha, and to be unexplained by as many parameters as possible. If you can show that your historical returns have little to no dependence on many factors, this is very positive. Certainly some unrelated risk factors might have spurious relationships over time in a large dataset, but those are not likely to be consistent. SetupThe first thing we do is compute a year's worth of factor returns. NOTEThe process for doing this is described in the Fundamental Factor Models lecture and uses pipeline. For more information please see that lecture. ###Code import numpy as np import statsmodels.api as sm import scipy.stats as stats from statsmodels import regression import matplotlib.pyplot as plt import pandas as pd import numpy as np from zipline.pipeline import Pipeline from zipline.pipeline.data import sharadar, EquityPricing from zipline.pipeline.factors import CustomFactor, Returns def make_pipeline(): """ Create and return our pipeline. We break this piece of logic out into its own function to make it easier to test and modify in isolation. In particular, this function can be copy/pasted into research and run by itself. """ pipe = Pipeline() Fundamentals = sharadar.Fundamentals.slice(dimension='ARQ', period_offset=0) # Add our factors to the pipeline market_cap = Fundamentals.MARKETCAP.latest # Raw market cap and book to price data gets fed in here pipe.add(market_cap, "market_cap") book_to_price = 1/Fundamentals.PB.latest pipe.add(book_to_price, "book_to_price") # We also get daily returns returns = Returns(inputs=[EquityPricing.close], window_length=2) pipe.add(returns, "returns") # We compute a daily rank of both factors, this is used in the next step, # which is computing portfolio membership. market_cap_rank = market_cap.rank() pipe.add(market_cap_rank, 'market_cap_rank') book_to_price_rank = book_to_price.rank() pipe.add(book_to_price_rank, 'book_to_price_rank') # Build Filters representing the top and bottom 1000 stocks by our combined ranking system. biggest = market_cap_rank.top(1000) smallest = market_cap_rank.bottom(1000) highpb = book_to_price_rank.top(1000) lowpb = book_to_price_rank.bottom(1000) # Don't return anything not in this set, as we don't need it. pipe.set_screen(biggest | smallest | highpb | lowpb) # Add the boolean flags we computed to the output data pipe.add(biggest, 'biggest') pipe.add(smallest, 'smallest') pipe.add(highpb, 'highpb') pipe.add(lowpb, 'lowpb') return pipe pipe = make_pipeline() from zipline.research import run_pipeline start_date = '2014-01-01' end_date = '2015-01-01' results = run_pipeline(pipe, start_date=start_date, end_date=end_date, bundle='usstock-1d-bundle') R_biggest = results[results.biggest]['returns'].groupby(level=0).mean() R_smallest = results[results.smallest]['returns'].groupby(level=0).mean() R_highpb = results[results.highpb]['returns'].groupby(level=0).mean() R_lowpb = results[results.lowpb]['returns'].groupby(level=0).mean() SMB = R_smallest - R_biggest HML = R_highpb - R_lowpb ###Output _____no_output_____ ###Markdown How did each factor do over 2014? ###Code SMB_CUM = np.cumprod(SMB+1) HML_CUM = np.cumprod(HML+1) plt.plot(SMB_CUM.index, SMB_CUM.values) plt.plot(HML_CUM.index, HML_CUM.values) plt.ylabel('Cumulative Return') plt.legend(['SMB Portfolio Returns', 'HML Portfolio Returns']); ###Output _____no_output_____ ###Markdown Computing Risk ExposureNow we can determine how exposed another return stream is to each of these factors. We can do this by running static or rolling linear regressions between our return stream and the factor portfolio returns. First we'll compute the active returns (returns - benchmark) of some random asset and then model that asset as a linear combination of our two factors. The more a factor contributes to the active returns, the more exposed the active returns are to that factor. ###Code from quantrocket.master import get_securities from quantrocket import get_prices securities = get_securities(symbols=['MSFT', 'AAPL', 'YHOO', 'FB', 'TSLA'], vendors='usstock') # Get returns data for our portfolio portfolio = get_prices( 'usstock-1d-bundle', data_frequency='daily', sids=securities.index.tolist(), fields='Close', start_date=start_date, end_date=end_date).loc['Close'].pct_change()[1:] R = np.mean(portfolio, axis=1) SPY = get_securities(symbols='SPY', vendors='usstock').index[0] bench = get_prices( 'usstock-1d-bundle', data_frequency='daily', sids=SPY, fields='Close', start_date=start_date, end_date=end_date).loc['Close'][SPY].pct_change()[1:] # The excess returns of our active management, in this case just holding a portfolio of our one asset active = R - bench # Define a constant to compute intercept constant = pd.Series(np.ones(len(active.index)), index=active.index) df = pd.DataFrame({'R': active, 'F1': SMB.tz_localize(None), 'F2': HML.tz_localize(None), 'Constant': constant}) df = df.dropna() # Perform linear regression to get the coefficients in the model b1, b2 = regression.linear_model.OLS(df['R'], df[['F1', 'F2']]).fit().params # Print the coefficients from the linear regression print('Sensitivities of active returns to factors:\nSMB: %f\nHML: %f' % (b1, b2)) ###Output Sensitivities of active returns to factors: SMB: -0.031226 HML: -0.068289 ###Markdown Using the formula from the start of the notebook, we can compute the factors' marginal contributions to active risk squared: ###Code F1 = df['F1'] F2 = df['F2'] cov = np.cov(F1, F2) ar_squared = (active.std())**2 fmcar1 = (b1*(b2*cov[0,1] + b1*cov[0,0]))/ar_squared fmcar2 = (b2*(b1*cov[0,1] + b2*cov[1,1]))/ar_squared print('SMB Risk Contribution:', fmcar1) print('HML Risk Contribution:', fmcar2) ###Output SMB Risk Contribution: 0.000310877026642 HML Risk Contribution: 0.00130904951642 ###Markdown © Copyright Quantopian Inc.© Modifications Copyright QuantRocket LLCLicensed under the [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/legalcode).Disclaimer Factor Risk ExposureBy Evgenia "Jenny" Nitishinskaya, Delaney Granizo-Mackenzie, and Maxwell Margenot. DISCLAIMERAs always, this analysis is based on historical data, and risk exposures estimated on historical data may or may not affect the exposures going forward. As such, computing the risk exposure of a factor is not enough. You must put confidence bounds on that risk exposure, and determine whether the risk exposure can even be modeled reasonably. For more information on this, please see our other lectures, especially Instability of Parameter Estimates. Using Factor Models to Determine Risk ExposureWe can use factor models to analyze the sources of risks and returns in portfolios. Recall that a factor model expresses the returns as$$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \ldots + b_{iK} F_K + \epsilon_i$$By modelling the historical returns, we can see how much of them is due to speculation on different factors and how much to asset-specific fluctuations ($\epsilon_p$). We can also examine what sources of risk the portfolio is exposed to. In risk analysis, we often model active returns (returns relative to a benchmark) and active risk (standard deviation of active returns, also known as tracking error or tracking risk).For instance, we can find a factor's marginal contribution to active risk squared (FMCAR). For factor $j$, this is$$ \text{FMCAR}_j = \frac{b_j^a \sum_{i=1}^K b_i^a Cov(F_j, F_i)}{(\text{Active risk})^2} $$where $b_i^a$ is the portfolio's active exposure to factor $i$. This tells us how much risk we incur by being exposed to factor $j$, given all the other factors we're already exposed to.Fundamental factor models are often used to evaluate portfolios because they correspond directly to investment choices (e.g. whether we invest in small-cap or large-cap stocks, etc.). Below, we construct a model to evaluate a single asset; for more information on the model construction, check out the fundamental factor models notebook.We'll use the canonical Fama-French factors for this example, which are the returns of portfolios constructred based on fundamental factors. How many factors do you want?In the Arbitrage Pricing Theory lecture we mention that for predictive models you want fewer parameters. However, this doesn't quite hold for risk exposure. Instead of trying to not overfit a predictive model, you are looking for any possible risk factor that could be influencing your returns. Therefore it's actually safer to estimate exposure to many many risk factors to see if any stick. Anything left over in our $\alpha$ is risk exposure that is currently unexplained by the selected factors. You want your strategy's return stream to be all alpha, and to be unexplained by as many parameters as possible. If you can show that your historical returns have little to no dependence on many factors, this is very positive. Certainly some unrelated risk factors might have spurious relationships over time in a large dataset, but those are not likely to be consistent. SetupThe first thing we do is compute a year's worth of factor returns. **NOTE**The process for doing this is described in the Fundamental Factor Models lecture and uses pipeline. For more information please see that lecture. ###Code import numpy as np import statsmodels.api as sm import scipy.stats as stats from statsmodels import regression import matplotlib.pyplot as plt import pandas as pd import numpy as np from zipline.pipeline import Pipeline from zipline.pipeline.data import sharadar, EquityPricing from zipline.pipeline.factors import CustomFactor, Returns def make_pipeline(): """ Create and return our pipeline. We break this piece of logic out into its own function to make it easier to test and modify in isolation. In particular, this function can be copy/pasted into research and run by itself. """ pipe = Pipeline() Fundamentals = sharadar.Fundamentals.slice(dimension='ARQ', period_offset=0) # Add our factors to the pipeline market_cap = Fundamentals.MARKETCAP.latest # Raw market cap and book to price data gets fed in here pipe.add(market_cap, "market_cap") book_to_price = 1/Fundamentals.PB.latest pipe.add(book_to_price, "book_to_price") # We also get daily returns returns = Returns(inputs=[EquityPricing.close], window_length=2) pipe.add(returns, "returns") # We compute a daily rank of both factors, this is used in the next step, # which is computing portfolio membership. market_cap_rank = market_cap.rank() pipe.add(market_cap_rank, 'market_cap_rank') book_to_price_rank = book_to_price.rank() pipe.add(book_to_price_rank, 'book_to_price_rank') # Build Filters representing the top and bottom 1000 stocks by our combined ranking system. biggest = market_cap_rank.top(1000) smallest = market_cap_rank.bottom(1000) highpb = book_to_price_rank.top(1000) lowpb = book_to_price_rank.bottom(1000) # Don't return anything not in this set, as we don't need it. pipe.set_screen(biggest | smallest | highpb | lowpb) # Add the boolean flags we computed to the output data pipe.add(biggest, 'biggest') pipe.add(smallest, 'smallest') pipe.add(highpb, 'highpb') pipe.add(lowpb, 'lowpb') return pipe pipe = make_pipeline() from zipline.research import run_pipeline start_date = '2014-01-01' end_date = '2015-01-01' results = run_pipeline(pipe, start_date=start_date, end_date=end_date, bundle='usstock-1d-bundle') R_biggest = results[results.biggest]['returns'].groupby(level=0).mean() R_smallest = results[results.smallest]['returns'].groupby(level=0).mean() R_highpb = results[results.highpb]['returns'].groupby(level=0).mean() R_lowpb = results[results.lowpb]['returns'].groupby(level=0).mean() SMB = R_smallest - R_biggest HML = R_highpb - R_lowpb ###Output _____no_output_____ ###Markdown How did each factor do over 2014? ###Code SMB_CUM = np.cumprod(SMB+1) HML_CUM = np.cumprod(HML+1) plt.plot(SMB_CUM.index, SMB_CUM.values) plt.plot(HML_CUM.index, HML_CUM.values) plt.ylabel('Cumulative Return') plt.legend(['SMB Portfolio Returns', 'HML Portfolio Returns']); ###Output _____no_output_____ ###Markdown Computing Risk ExposureNow we can determine how exposed another return stream is to each of these factors. We can do this by running static or rolling linear regressions between our return stream and the factor portfolio returns. First we'll compute the active returns (returns - benchmark) of some random asset and then model that asset as a linear combination of our two factors. The more a factor contributes to the active returns, the more exposed the active returns are to that factor. ###Code from quantrocket.master import get_securities from quantrocket import get_prices securities = get_securities(symbols=['MSFT', 'AAPL', 'YHOO', 'FB', 'TSLA'], vendors='usstock') # Get returns data for our portfolio portfolio = get_prices( 'usstock-1d-bundle', data_frequency='daily', sids=securities.index.tolist(), fields='Close', start_date=start_date, end_date=end_date).loc['Close'].pct_change()[1:] R = np.mean(portfolio, axis=1) SPY = get_securities(symbols='SPY', vendors='usstock').index[0] bench = get_prices( 'usstock-1d-bundle', data_frequency='daily', sids=SPY, fields='Close', start_date=start_date, end_date=end_date).loc['Close'][SPY].pct_change()[1:] # The excess returns of our active management, in this case just holding a portfolio of our one asset active = R - bench # Define a constant to compute intercept constant = pd.Series(np.ones(len(active.index)), index=active.index) df = pd.DataFrame({'R': active, 'F1': SMB.tz_localize(None), 'F2': HML.tz_localize(None), 'Constant': constant}) df = df.dropna() # Perform linear regression to get the coefficients in the model b1, b2 = regression.linear_model.OLS(df['R'], df[['F1', 'F2']]).fit().params # Print the coefficients from the linear regression print('Sensitivities of active returns to factors:\nSMB: %f\nHML: %f' % (b1, b2)) ###Output Sensitivities of active returns to factors: SMB: -0.027129 HML: -0.047605 ###Markdown Using the formula from the start of the notebook, we can compute the factors' marginal contributions to active risk squared: ###Code F1 = df['F1'] F2 = df['F2'] cov = np.cov(F1, F2) ar_squared = (active.std())**2 fmcar1 = (b1*(b2*cov[0,1] + b1*cov[0,0]))/ar_squared fmcar2 = (b2*(b1*cov[0,1] + b2*cov[1,1]))/ar_squared print('SMB Risk Contribution:', fmcar1) print('HML Risk Contribution:', fmcar2) ###Output SMB Risk Contribution: 0.00022347420398894437 HML Risk Contribution: 0.000645267708587981
notebooks/Function_Testing.ipynb
###Markdown Testing Notebook ###Code # test View import torch from torch import nn # Torch uses NCHW image_tensor = torch.randn(64, 3, 200, 200) # define some layers # In channel, out channel, kernel, stride conv2d_depth = nn.Conv2d(3, 3*10, 3, 1, 1) image_tensor.size() ###Output _____no_output_____ ###Markdown depthwise conv increases the num channels ###Code conv2d_depth(image_tensor).size() conv2d_2 = nn.Conv2d(3, 3*10, 7, 1, 1) ###Output _____no_output_____ ###Markdown due to the kernel / stride and padding the image dimensions can change ###Code conv2d_2(image_tensor).size() # Testing the view function image_tensor.view(image_tensor.size(0), -1).size() # Quick Resnet print import torchvision.models as models resnet18 = models.resnet18() print(resnet18) import torch import torchvision import torchprof model = torchvision.models.alexnet(pretrained=False).cuda() x = torch.rand([1, 3, 224, 224]).cuda() with torchprof.Profile(model, use_cuda=True) as prof: model(x) print(prof.display(show_events=False)) # equivalent to `print(prof)` and `print(prof.display())` ###Output Module | Self CPU total | CPU total | CUDA total | Occurrences ---------------|----------------|-----------|------------|------------ AlexNet | | | | ├── features | | | | │├── 0 | 359.165ms | 1.437s | 1.437s | 1 │├── 1 | 959.043us | 1.714ms | 1.875ms | 1 │├── 2 | 231.398us | 504.635us | 574.112us | 1 │├── 3 | 785.574us | 3.403ms | 3.620ms | 1 │├── 4 | 80.059us | 99.289us | 100.064us | 1 │├── 5 | 195.049us | 433.918us | 436.928us | 1 │├── 6 | 881.273us | 3.541ms | 3.526ms | 1 │├── 7 | 63.520us | 78.270us | 78.144us | 1 │├── 8 | 732.715us | 2.950ms | 3.202ms | 1 │├── 9 | 74.500us | 93.040us | 94.048us | 1 │├── 10 | 446.876us | 1.797ms | 1.834ms | 1 │├── 11 | 67.679us | 84.399us | 84.704us | 1 │└── 12 | 171.409us | 380.277us | 376.192us | 1 ├── avgpool | 165.409us | 368.668us | 367.328us | 1 └── classifier | | | | ├── 0 | 634.294us | 1.349ms | 780.512us | 1 ├── 1 | 1.960ms | 2.021ms | 2.289ms | 1 ├── 2 | 79.449us | 100.449us | 100.000us | 1 ├── 3 | 200.019us | 471.578us | 467.008us | 1 ├── 4 | 209.079us | 265.389us | 380.192us | 1 ├── 5 | 63.530us | 78.220us | 79.392us | 1 └── 6 | 200.908us | 255.868us | 278.592us | 1
plotting/MNIST_DBM.ipynb
###Markdown Load results Load or create evaluation results of initial model ###Code n_iter = 10 n_checkpoint = 2 perc = 10 res_path = os.path.join('..', 'models', 'MNIST') initial_path = os.path.join(res_path,'initial') assert os.path.exists(initial_path), "Model does not exist yet. Train initial DBM first by running pruning/MNIST_Baselines.py - requires GPU" dbm = get_initial_DBM(initial_path) # loads initial DBM. params = dbm.get_params() n_vis = params['n_visible_'] nh1 = initial_units_l1 = params['n_hiddens_'][0] nh2 = initial_units_l2 = params['n_hiddens_'][1] rec_fields = dbm.get_tf_params(scope='masks')['rf_mask'] initial_n_weights_l1 = len(rec_fields.flatten()[rec_fields.flatten()==1]) initial_n_weights_l2 = int(nh1 * nh2) del dbm try: initial_probs_win_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[1,:] initial_count_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[2,:] initial_logreg = np.load(os.path.join(initial_path, 'Accuracy_hidden_layer_reps.npy')) except IOError: evaluate_initial_DBM(dbm, initial_path) initial_probs_win_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[1,:] initial_count_digits = np.load(os.path.join(initial_path,'ProbsWinDig_Initial_Samples.npy'))[2,:] initial_logreg = np.load(os.path.join(initial_path, 'Accuracy_hidden_layer_reps.npy')) ###Output Loading RBM #1 ... No pruning, array of ones is initialized Loading RBM #2 ... No pruning, array of ones is initialized Loading DBM ... INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer1/model INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer2/model INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer1/model INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_DBM_Layer2/model INFO:tensorflow:Restoring parameters from ../models/MNIST/initial/MNIST_InitialDBM/model ###Markdown Load or create MNIST baselines ###Code if not os.path.exists(os.path.join(res_path, "logreg_MNIST.pkl")): _ = create_baseline_classifier(res_path) test_probs_digits = np.load(os.path.join(res_path,'ProbsWinDig_TestDigits.npy'))[1,:] test_count_digits = np.load(os.path.join(res_path,'ProbsWinDig_TestDigits.npy'))[2,:] mean_test_prob = np.mean(test_probs_digits) random_probs_digits = np.load(os.path.join(res_path,'ProbsWinDig_Random.npy'))[1,:] random_count_digits = np.load(os.path.join(res_path,'ProbsWinDig_Random.npy'))[2,:] mean_random_prob = np.mean(random_probs_digits) acc_rawdigits_logreg = np.load(os.path.join(res_path, 'Accuracy_TestDigits.npy')) ###Output _____no_output_____ ###Markdown Load results of pruned models ###Code exp_labels=['Variance FI', 'Heuristic FI', r'|w|', 'Random', 'Anti-FI'] exp_names = [f'varianceFI_{perc}perc_{n_iter}sessions', f'heuristicFI_{perc}perc_{n_iter}sessions', f'w_{perc}perc_{n_iter}sessions', f'random_{perc}perc_{n_iter}sessions', f'antiFI_{perc}perc_{n_iter}sessions'] exp_colors = GATHER for i, exp in enumerate(exp_names): assert os.path.exists(os.path.join(res_path, exp)), f"Pruning experiment does not exist yet. Run {exp_labels[i]} pruning script" ###Output _____no_output_____ ###Markdown Load encoding performance results of pruned models ###Code acc_logreg = [] n_act_weights_l1 = [] n_act_weights_l2 = [] n_hid_units_L1 = [] n_hid_units_L2 = [] for exp in range(len(exp_names)): temp_link = res_path+'/{}/res'.format(exp_names[exp]) acc_logreg.append(np.load(os.path.join(temp_link,'AccLogReg.npy')).flatten()) n_act_weights_l1.append(np.load(os.path.join(temp_link, 'n_active_weights_L1.npy')).flatten()) n_act_weights_l2.append(np.load(os.path.join(temp_link, 'n_active_weights_L2.npy')).flatten()) n_hid_units_L1.append(np.load(os.path.join(temp_link, 'n_hid_units_L1.npy')).flatten()) n_hid_units_L2.append(np.load(os.path.join(temp_link, 'n_hid_units_L2.npy')).flatten()) # add the initial performance acc_logreg[exp] = np.insert(acc_logreg[exp], 0, initial_logreg) n_hid_units_L1[exp] = np.insert(n_hid_units_L1[exp],0, initial_units_l1) n_hid_units_L2[exp] = np.insert(n_hid_units_L2[exp], 0, initial_units_l2) n_act_weights_l1[exp] = np.insert(n_act_weights_l1[exp], 0, initial_n_weights_l1) n_act_weights_l2[exp] = np.insert(n_act_weights_l2[exp], 0, initial_n_weights_l2) # convert lists to arrays acc_logreg = np.asarray(acc_logreg)[:,:n_iter*2+1] n_act_weights_l1 = np.asarray(n_act_weights_l1)[:,:n_iter*2+1] n_act_weights_l2 = np.asarray(n_act_weights_l2)[:,:n_iter*2+1] n_hid_units_L1 = np.asarray(n_hid_units_L1)[:,:n_iter*2+1] n_hid_units_L2 = np.asarray(n_hid_units_L2)[:,:n_iter*2+1] ###Output _____no_output_____ ###Markdown Load generative performance results of pruned models ###Code diversity_digits_all = [[] for i in range(len(exp_names))] mean_prob_all = [[] for i in range(len(exp_names))] min_prob_all = [[] for i in range(len(exp_names))] max_prob_all = [[] for i in range(len(exp_names))] entropy_all = [[] for i in range(len(exp_names))] probs_win_digits_all = [[] for i in range(len(exp_names))] count_digits_all = [[] for i in range(len(exp_names))] which_digit_all = [[] for i in range(len(exp_names))] for exp in range(len(exp_names)): temp_link = res_path+'/{}/res'.format(exp_names[exp]) probs_win_digits = [] count_digits = [] probs_win_digits.append(initial_probs_win_digits) count_digits.append(initial_count_digits) entropy_all[exp].append(entropy(initial_count_digits)) count_digits_all[exp].append(initial_count_digits) which_digit_all[exp].append(range(10)) probs_win_digits_all[exp].append(initial_probs_win_digits) for sess in np.arange(1,n_iter+1): for checkpoint in np.arange(1,n_checkpoint+1): which_digit_all[exp].append(np.load(os.path.join(temp_link, 'ProbsWinDig_sess{}_checkpoint{}.npy'.format(sess,checkpoint)))[0,:]) temp = np.load(os.path.join(temp_link, 'ProbsWinDig_sess{}_checkpoint{}.npy'.format(sess,checkpoint)))[1,:] count = np.load(os.path.join(temp_link, 'ProbsWinDig_sess{}_checkpoint{}.npy'.format(sess,checkpoint)))[2,:] entropy_all[exp].append(entropy(count)) probs_win_digits.append(temp) count_digits.append(count) count_digits_all[exp].append(count) probs_win_digits_all[exp].append(temp) probs_win_digits = np.asarray(probs_win_digits) count_digits = np.asarray(count_digits) mean_prob = np.zeros(len(probs_win_digits)) max_prob = np.zeros(len(probs_win_digits)) min_prob = np.zeros(len(probs_win_digits)) for i in range(len(probs_win_digits)): mean_prob[i] = np.mean(probs_win_digits[i], axis=0) max_prob[i] = probs_win_digits[i].max() min_prob[i] = probs_win_digits[i].min() # we only saved these two times diversity_digits = np.zeros(len(count_digits)) for i in range(len(count_digits)): if len(count_digits[i]) < 10: # if one category has a zero count min_counts = 0 else: min_counts = count_digits[i].min() max_counts = count_digits[i].max() diversity_digits[i] = min_counts/max_counts diversity_digits_all[exp].append(diversity_digits) mean_prob_all[exp].append(mean_prob) min_prob_all[exp].append(min_prob) max_prob_all[exp].append(max_prob) entropy_all = np.asarray(entropy_all) diversity_digits_all = np.asarray(diversity_digits_all)[:,0,:] mean_prob_all = np.asarray(mean_prob_all)[:,0,:] min_prob_all = np.asarray(min_prob_all)[:,0,:] max_prob_all = np.asarray(max_prob_all)[:,0,:] ###Output _____no_output_____ ###Markdown Figure: Final visible layers Retrieve final visible layers ###Code indices_unconnected_v = [] for exp in range(len(exp_names)): temp_path = os.path.join(res_path, f'{exp_names[exp]}', 'res') if not os.path.exists(os.path.join(temp_path, 'final_indices_of_lost_visibles.npy')): model_path = os.path.join(os.path.join(res_path, f'{exp_names[exp]}', f'MNIST_PrunedDBM_both_Sess{n_iter}')) dbm = DBM.load_model(model_path+'/') prune_mask = dbm.get_tf_params(scope='masks')['prune_mask'] out_synapses = np.sum(mask, axis=1) # sum of outgoing synapses from the visible layer current_unconnected_v = sum(out_synapses==0) ind = np.argwhere(out_synapses == 0) if len(ind)==0: indices_unconnected_v.append(None) else: indices_unconnected_v.append(ind) np.save(os.path.join(temp_path, 'final_indices_of_lost_visibles.npy'), ind) del dbm else: indices_unconnected_v.append(np.load(os.path.join(temp_path, 'final_indices_of_lost_visibles.npy'))) fig = plt.figure(figsize=(12,6))#constrained_layout=True gs = gridspec.GridSpec(1, len(exp_names)) for i in range(len(exp_names)): ax = fig.add_subplot(gs[0, i]) visible = np.ones(400) visible[indices_unconnected_v[i]]=0 im = ax.imshow(visible.reshape(20,20), cmap=plt.cm.binary_r) ax.set_xticks(np.arange(0.5,20), []) ax.set_yticks(np.arange(0.5,20), []) ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') ax.set_yticklabels([]) ax.set_xticklabels([]) title= exp_labels[i] ax.set_title(title, fontsize=fs+2) ax.grid() plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Figure: Exemplary visible samples Retrieve final visible samples ###Code samples_v = [] n_samples = 25 for exp in range(len(exp_names)): temp_path = os.path.join(res_path, f'{exp_names[exp]}', 'res') if not os.path.exists(os.path.join(temp_path, f'final_visible_samples_n{n_samples}.npy')): model_path = os.path.join(os.path.join(res_path, f'{exp_names[exp]}', f'MNIST_PrunedDBM_both_Sess{n_iter}')) dbm = DBM.load_model(model_path+'/') sample = dbm.sample_gibbs(n_gibbs_steps=10, save_model=True, n_runs=np.max([n_samples, 1000])) sample_v = sample[:,:n_vis] # extract visible samples random_indices = random.sample(range(sample_v.shape[0]), n_samples) random_sample_v = sample_v[random_indices, :].astype('bool') # randomly select some visible samples samples_v.append(random_sample_v) np.save(os.path.join(temp_path, f'final_visible_samples_n{n_samples}.npy'), random_sample_v) del dbm else: samples_v.append(np.load(os.path.join(temp_path, f'final_visible_samples_n{n_samples}.npy'))) fig = plt.figure(figsize=(14, 3)) outer = gridspec.GridSpec(1, len(exp_names)) for j in range(len(exp_names)): inner = gridspec.GridSpecFromSubplotSpec(int(math.sqrt(n_samples)), int(math.sqrt(n_samples)), subplot_spec=outer[j], wspace=0.1, hspace=0.1) for i in range(int(math.sqrt(n_samples))*int(math.sqrt(n_samples))): if i < len(samples_v[j].T): img = samples_v[j][i].reshape((int(math.sqrt(n_vis)),int(math.sqrt(n_vis)))) ax = plt.Subplot(fig, inner[i]) for d in ('bottom', 'top', 'left', 'right'): ax.spines[d].set_linewidth(1.) if i == 2: ax.set_title(exp_labels[j], fontsize=16) ax.axis('off') ax.imshow(img, cmap =plt.cm.binary) fig.add_subplot(ax) ###Output _____no_output_____ ###Markdown Figure: Encoding performance ###Code PostPrune = False # whether to show performance immediately after pruning show_every = 2# set to 1 if you want to display pruned & retrained in the same plot fig = plt.figure(figsize=(12,6))#constrained_layout=True gs = gridspec.GridSpec(2, len(exp_names)) ax1 = fig.add_subplot(gs[0:2, 0:3]) ax1.axhline(1-acc_rawdigits_logreg, color='black', linestyle='--', label='Performance on raw digits') ax1.text(-0.1, 1.1, string.ascii_uppercase[0], transform=ax1.transAxes, size=20, weight='bold') for exp in range(len(exp_names)): ax1.plot(n_act_weights_l2[exp][0::show_every]+n_act_weights_l1[exp][0::show_every], 1-acc_logreg[exp][0::show_every], c=exp_colors[exp],marker=".") if PostPrune: ax1.plot(n_act_weights_l2[exp][1::show_every]+n_act_weights_l1[exp][1::show_every], 1-acc_logreg[exp][1::show_every], c=exp_colors[exp],marker="v", linestyle='None') ax1.set_xscale('log') ax1.set_yscale('log') ax1.yaxis.set_major_formatter(ScalarFormatter()) ax1.set_ylim(None,1) ax1.set_xlabel(r'$n_w$', fontsize=fs+2) ax1.invert_xaxis() ax1.set_ylabel('Classification Error', fontsize=fs+2) for exp in range(len(exp_names)): ax1.plot([-5], [0], marker='o', markersize=10, color=exp_colors[exp], label=exp_labels[exp], linestyle='None') ax1.legend(loc='best', fontsize=fs) ax1.grid(True, which='both') ax2 = fig.add_subplot(gs[0, 3:]) ax2.text(-0.1, 1.25, string.ascii_uppercase[1], transform=ax2.transAxes, size=20, weight='bold') for exp in range(len(exp_names)): ax2.plot(n_act_weights_l1[exp], n_hid_units_L1[exp], c=exp_colors[exp], marker=".") ax2.invert_xaxis() ax2.set_xscale('log') ax2.set_xlabel(r'$n_{w_{\mathbf{h}^1}}$', fontsize=fs+2) ax2.set_ylabel(r'$n_{\mathbf{h}^1}$', fontsize=fs+2) ax2.grid(True, which='both') ax3 = fig.add_subplot(gs[1, 3:]) for exp in reversed(range(len(exp_names))): ax3.plot(n_act_weights_l2[exp], n_hid_units_L2[exp], c=exp_colors[exp], marker=".") ax3.invert_xaxis() ax3.set_xscale('log') ax3.set_xlabel(r'$n_{w_{\mathbf{h}^2}}$', fontsize=fs+2) ax3.set_ylabel(r'$n_{\mathbf{h}^2}$', fontsize=fs+2) ax3.grid(True, which='both') ax3.set_yticks([0,250,500,676]) plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Figure: digit quality during pruning ###Code fig = plt.figure(figsize=(12,4))#constrained_layout=True plt.rcParams.update({'font.size': 14}) gs = gridspec.GridSpec(2, 3) ax1 = fig.add_subplot(gs[0:2, 0:2]) for exp in range(len(exp_names)): ax1.plot(n_act_weights_l2[exp][0::show_every]+n_act_weights_l1[exp][0::show_every], mean_prob_all[exp][0::show_every], c=exp_colors[exp], marker='.') if PostPrune: ax1.plot(n_act_weights_l2[exp][1::show_every]+n_act_weights_l1[exp][1::show_every], mean_prob_all[exp][1::show_every], c=exp_colors[exp], marker='v', linestyle='None') for exp in range(len(exp_names)): ax1.plot([-5], [0], marker='o', markersize=10, color=exp_colors[exp], label=exp_labels[exp], linestyle='None') ax1.set_xscale('log') ax1.text(-0.1, 1.1, string.ascii_uppercase[0], transform=ax1.transAxes, size=15, weight='bold') ax1.invert_xaxis() ax1.set_title('Digit quality during pruning') ax1.set_ylabel('Probability of winning class', fontsize=fs-2) ax1.axhline(mean_test_prob, label='Test digits', color='black', linestyle='--') ax1.axhline(mean_random_prob, label='Random patterns', color='grey', linestyle='--') ax1.set_ylim(0.45,1.) ax1.set_xlabel(r'$n_w$', fontsize=fs) ax1.set_yticks(np.arange(0.5, 1.01, step=0.1)) ax1.grid(True, which='both') ax1.legend(loc='upper right', fontsize=fs-2, bbox_to_anchor=(1.45, 1.05)) plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Figure: digit diversity during pruning ###Code fig = plt.figure(figsize=(5,8))#constrained_layout=True gs = gridspec.GridSpec(len(exp_names), 1) for exp in range(len(exp_names)): ax = fig.add_subplot(gs[exp, 0]) ax.set_ylabel('% of samples', fontsize=fs-2) for number in range(10): ax.plot(n_act_weights_l2[exp][0::show_every]+n_act_weights_l1[exp][0::show_every], count_digit_complete[exp,0::show_every,number].astype(float)/600, marker='${}$'.format(int(number)), c=plt.cm.Set3.colors[number]) if PostPrune: ax.plot(n_act_weights_l2[exp][1::show_every]+n_act_weights_l1[exp][1::show_every], count_digit_complete[exp,1::show_every,number].astype(float)/600, marker='${}$'.format(int(number)), linestyle='None', c=plt.cm.Set3.colors[number]) ax.axhline(10.0, color='black', linestyle='--') ax.invert_xaxis() ax.grid(which='both') ax.set_xscale('log') ax.set_yscale('symlog') ax.yaxis.set_major_formatter(ScalarFormatter()) ax.set_ylim(0,100) ax.set_title(exp_labels[exp], fontsize=fs-2) if exp==len(exp_names)-1: ax.set_xlabel(r'$n_w$', fontsize=fs-2) plt.tight_layout() plt.show() ###Output _____no_output_____
LibroDeClasesDigital_ValidarJSON_V02.ipynb
###Markdown Sección: CARGAR LIBRERÍAS DE TRABAJO--- ###Code !sudo apt install sqlcipher libsqlcipher0 libsqlcipher-dev -q -y !sudo -H pip3 install pysqlcipher3 !pip install pycryptodome !pip install validate_email !pip install pyDNS !apt-get install python3-dns import re from datetime import datetime import pytz from time import time #importamos la función time para capturar tiempos import os import pandas as pd from pandas.io.json import json_normalize import numpy as np import json from zipfile import ZipFile import requests, io from sqlalchemy import create_engine import csv import string, random import Crypto from Crypto.PublicKey import RSA from Crypto.Cipher import PKCS1_OAEP from Crypto import Random import ast #https://www.novixys.com/blog/using-aes-encryption-decryption-python-pycrypto/ #---------------------------------------------------------------------------- # Encripta la clave de la BD SQLCipher utilizando la llave pública de la # Superintendencia de Educación. #---------------------------------------------------------------------------- def encryptTextUsingSiePublicKey(txt): url_to_pem_file = "https://static.superintendencia-educacion.cl/KP/clave.pub.txt" r = requests.get(url_to_pem_file, verify=False, stream=True) path_to_public_pem_file = io.BytesIO(r.content).read() publickey = RSA.importKey(path_to_public_pem_file) encryptor = PKCS1_OAEP.new(publickey) encrypted = encryptor.encrypt(bytes(txt,"utf-8")) return encrypted encryptTextUsingSiePublicKey('El Mundo') from itertools import cycle def validarRut(rut): if(rut is not None): dv = ''.join([c for c in list(rut.upper()) if c.isalpha()]) aux = ''.join([c for c in list(rut) if c.isdigit()]) if(dv == ''): dv = aux[-1:] aux = aux[:-1] revertido = map(int, reversed(str(aux))) factors = cycle(range(2,8)) s = sum(d * f for d, f in zip(revertido,factors)) res = (-s)%11 if ((str(res) == dv) or (dv=="K" and res==10)): return True return False validarRut('22171685-K') import requests from lxml import html def nameFromRUN(r,d): root = 'https://zeus.sii.cl/cvc_cgi/nar/nar_consulta' url = f'{root}?ACEPTAR=consulta&RUT={r}&DV={d}' pageContent=requests.get(url) tree = html.fromstring(pageContent.content) return tree.xpath('//*/tr[1]/td/*/text()')[1] nameFromRUN('1','9') from validate_email import validate_email is_valid = validate_email('[email protected]') print(is_valid) test = """ def openConnection(DB_NAME,secPhrase): global dfLog,_sep,_encode _r = True try: params = 'cipher=aes-256-cfb&kdf_iter=256000&cipher_page_size=4096' engine = create_engine(f"sqlite+pysqlcipher://:{secPhrase}@/{DB_NAME}?{params}") engine.execute("PRAGMA cipher_compatibility = 4;") conn = engine.connect() rows = conn.execute("SELECT * FROM Person;") if(not rows.returns_rows): raise Exception("Error al leer los datos de la BD") else: print(rows) except Exception as e: _t = "ERROR COMMIT: "+str(e) print(_t) _r = False finally: conn.close() return _r openConnection('/content/ceds-nds-v7_1_encryptedD4.db','test') """ ###Output _____no_output_____ ###Markdown Sección: DEFINICIÓN DE FUNCIONES--- ###Code #---------------------------------------------------------------------------- #PASO N° 20 - Transformar el archivo JSON en archivos CSV's. Uno por tabla. #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- # Transforma archivo JSON en un DataFrame de pandas con todas sus columnas. # Agrega las columnas que faltan. #---------------------------------------------------------------------------- def jsonToDataframe(elem, jsonData): global dfLog data=json_normalize(jsonData[elem['JSONGroupName']],elem['TableName']) df = pd.DataFrame(data, columns=elem['ColumnList']) df.drop_duplicates(inplace=True) if(elem['TableName']=='Person'): print(df) _t = f"Tabla: {elem['TableName']} cargada como DataFrame exitosamente" print(_t); dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return df def validaLosTiposDeDatos(df, elem): global dfLog #Mapeo de tipos de datos SQL -> Pyhton _dTypes = { "bit": [pd.api.types.is_bool_dtype,np.int_,"bool"], "char": [pd.api.types.is_string_dtype,np.unicode_,"str"], "nchar": [pd.api.types.is_string_dtype,np.unicode_,"str"], "nvarchar": [pd.api.types.is_string_dtype,np.unicode_,"str"], "nvarcharmax": [pd.api.types.is_string_dtype,np.unicode_,"str"], "varchar": [pd.api.types.is_string_dtype,np.unicode_,"str"], "bigint": [pd.api.types.is_integer_dtype,np.int_,"int64"], "int": [pd.api.types.is_integer_dtype,np.int_,"int32"], "smallint": [pd.api.types.is_integer_dtype,np.int_,"int64"], "tinyint": [pd.api.types.is_integer_dtype,np.int_,"int64"], "float": [pd.api.types.is_float_dtype,np.float_,"float64"], "real": [pd.api.types.is_float_dtype,np.float_,"float64"], "decimal": [pd.api.types.is_float_dtype,np.float_,"float64"], "numeric": [pd.api.types.is_float_dtype,np.float_,"float64"], "varbinary": ['bytes'], "binary": ['raw'], "date": [pd.api.types.is_string_dtype,np.unicode_,"str"], "time": [pd.api.types.is_string_dtype,np.unicode_,"str"], "datetime": [pd.api.types.is_string_dtype,np.unicode_,"str"]} _columnNames = elem['ColumnList'] _dataTypes=elem['DataType'] for idx,dt in enumerate(_dataTypes): _tipo = ''.join([s for s in list(dt) if s.isalpha()]) field = _columnNames[idx] fn = _dTypes[_tipo][1] if(_tipo=='bit'): df[field] = df[field].astype(fn, errors='ignore') elif(_tipo=='date'): df[field].replace('0000-00-00','',inplace=True) _t = f"Tipos de datos de la tabla {elem['TableName']} verificados con éxito"; print(_t); dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return df #---------------------------------------------------------------------------- # Convierte archivo JSON en varios archivos CSV. Uno por cada tabla del modelo. # Se genera un elemento por cada tabla del JSON # {'Column': ['OrganizationCalendarId','OrganizationId','CalendarCode','CalendarDescription','CalendarYear'], # 'DataType': ['int', 'int', 'nvarchar(30)', 'nvarchar(60)', 'nchar(4)'], # 'JSONGroup': '_Calendarios', # 'SIERequired': ['YES', 'YES', 'NO', 'YES', 'NO'], # 'Table': 'OrganizationCalendar' # } #---------------------------------------------------------------------------- def parseJsonToCSVs(path_to_zip_file,path_to_dir_csv_file): global dfLog, _encode, _sep xd = cargarPlanillaConDatosDelModelo() jsonData,jsonFileName = readJsonData(path_to_zip_file) for row in list(xd[xd["JSONGroup"].notnull()].groupby(["JSONGroup","Table"])): elem = { "JSONGroupName":row[0][0], "TableName":row[0][1], "ColumnList":list(row[1]["Column"]), "DataType": list(row[1]["Data Type"]), "SIERequired": list(row[1]["SIERequired"]) } df = jsonToDataframe(elem,jsonData) df = validaLosTiposDeDatos(df, elem) _fileName = path_to_dir_csv_file+elem['TableName']+'.csv' _c = str(df.count()[0]) print('Guardando : '+_fileName+' -> '+_c+' registros procesados.\n') df.to_csv(_fileName,sep=_sep,encoding=_encode,index=False) dfLog = dfLog.append(pd.Series({'json':jsonFileName, 'csv': elem['TableName']+'.csv', '#savingRows':_c, 'resultSaving':'OK'}), ignore_index=True) _t = 'Archivo JSON completamente transformado.' print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return True def eliminarDuplicados(mylist): seen = set() newlist = [] for item in mylist: t = tuple(item) if t not in seen: newlist.append(item) seen.add(t) return newlist def crearCSV(jsonFileName, fileName,TableName,columnList,unique_records): #https://pymotw.com/2/csv/ global dfLog,_sep try: csv.register_dialect('escaped', delimiter=_sep, lineterminator ='\n', skipinitialspace=0, escapechar=None, doublequote=True, quoting=csv.QUOTE_MINIMAL, quotechar='"') _c = len(unique_records) _f = open(fileName, 'w', encoding=_encode) dialect = csv.get_dialect("escaped") writer = csv.writer(_f, dialect=dialect) writer.writerow(columnList) writer.writerows(unique_records) _t = f"Table {TableName} -> {_c} registros procesados." except Exception as e: _t = f"ERROR:'{str(e)}'. Tabla:'{TableName}'. {_c} registros perdidos." finally: _f.close() print(_t); dfLog = dfLog.append(pd.Series({'json':jsonFileName, 'csv': fileName, '#savingRows':_c, 'resultSaving':_t}), ignore_index=True) return True def readJsonData(path_to_zip_file): global dfLog # Descomprime el contenido del archivo ZIP y lo carga en memoria if(path_to_zip_file): with ZipFile(path_to_zip_file, 'r') as zip_ref: zip_ref.extractall('./') _t=f'Archivo ZIP "{path_to_zip_file}" descomprimido con éxito'; print(_t) dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) for file in zip_ref.namelist(): _t=f"Trabajando sobre archivo: '{file}'"; print(_t) dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) with open(file, mode='r', encoding="utf-8") as jsonfile: jsonData = json.load(jsonfile) _t=f"Archivo '{jsonfile}' leído sin inconvenientes\n"; print(_t) dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) jsonfile.close() os.remove(file) return jsonData,file def cargarPlanillaConDatosDelModelo(): global dfLog #Carga planilla con todas las tablas y campos del modelo https://ceds.ed.gov idFile = '1R8iEWpa2-buQijoI9NzniCbyZm5-zZcN' url = f'http://drive.google.com/uc?export=download&id={idFile}' xd = pd.read_excel(url,'NDS Columns') _t=f'Planilla {url} cargada satisfactoriamente'; print(_t) dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return xd; def leerTodosLosRegistrosDeLaTalaDesdeArchivoJson(jsonData,elem): #Mapeo de tipos de datos SQL -> Pyhton records = [] for grupo in jsonData[elem['JSONGroupName']]: for tbl in grupo[elem['TableName']]: record = [] for indice,col in enumerate(elem['ColumnList']): dt = elem['DataType'][indice] _tipo = ''.join([s for s in list(dt) if s.isalpha()]) value = tbl.get(col) if (tbl.get(col) is not None) else '' if(_tipo in {'bit', 'bigint', 'int', 'smallint', 'tinyint'} and value!=''): value = int(value) elif(_tipo=='date'): value = str(value).replace('0000-00-00','') record.append(value) records.append(record) return eliminarDuplicados(records) def readJsonSaveCSV(path_to_zip_file,path_to_dir_csv_file): global dfLog, _encode, _sep xd = cargarPlanillaConDatosDelModelo() jsonData,jsonFileName = readJsonData(path_to_zip_file) for row in list(xd[xd["JSONGroup"].notnull()].groupby(["JSONGroup","Table"])): elem = { "JSONGroupName":row[0][0], "TableName":row[0][1], "ColumnList":list(row[1]["Column"]), "DataType": list(row[1]["Data Type"]), "SIERequired": list(row[1]["SIERequired"]) } records = leerTodosLosRegistrosDeLaTalaDesdeArchivoJson(jsonData,elem) crearCSV(jsonFileName,path_to_dir_csv_file+elem['TableName']+'.csv', elem['TableName'], elem['ColumnList'], records) _t = 'Archivo JSON completamente transformado.' print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return True #---------------------------------------------------------------------------- #PASO N° 30 - Guarda elmentos en CSV en archivos CSV #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- # Zip the files from given directory that matches the filter #---------------------------------------------------------------------------- def zipFilesInDir(dirName, zipFileName, filter): global dfLog # create a ZipFile object with ZipFile(zipFileName, 'w') as zipObj: # Iterate over all the files in directory for folderName, subfolders, filenames in os.walk(dirName): for filename in filenames: if filter(filename): # create complete filepath of file in directory filePath = os.path.join(folderName, filename) # Add file to zip zipObj.write(filePath) _t = "Archivo ZIP con todos los CSV's creado con éxito"; print(_t) dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return True def transferCSVToSQL_withPandas(path_to_dir_csv_file,DB_NAME,timestamp): global dfLog,_sep,_encode _r = True secPhase = 'BD en blanco solo con parámetros definidos por Enlaces-Mineduc' engine = create_engine(f"sqlite+pysqlcipher://:{secPhase}@/{DB_NAME}?cipher=aes-256-cfb&kdf_iter=64000") conn = engine.connect() try: for root, dirs, files in os.walk(path_to_dir_csv_file, topdown=False): for name in files: _fileName = os.path.join(root, name) df = pd.read_csv(_fileName,sep=_sep,encoding=_encode) _c = str(df.count()[0]) tbl = name[:-4] print(f'\nLeyendo: {_fileName} -> {_c} registros procesados.'); try: df.to_sql(tbl, con = conn, index=False, if_exists='append') _result = "OK" except Exception as e: print(f'RollBack') _result='ERROR: '+str(e) _r = False pass finally: print("name:",name) dfLog.loc[dfLog['csv']==_fileName,dfLog.columns=='#readingRows']=_c dfLog.loc[dfLog['csv']==_fileName,dfLog.columns=='resultReading']=_result print("Table:",tbl,"#Rows:",len(df.index),_result) #trans.commit() #-------------------- REVISION DE REGLAS DEL NEGOCIO -------------------------- # VERIFICA LA INTEGRIDAD REFERENCIAL DE LOS DATOS print("# VERIFICA LA INTEGRIDAD REFERENCIAL DE LOS DATOS") rows = conn.execute("PRAGMA foreign_key_check;") if(rows.returns_rows): pd.DataFrame(rows ,columns=['Table', 'rowId', 'Parent', 'FkId'] ).to_csv('ForenKeyErrors.csv' ,sep=_sep ,encoding=_encode ,index=False) raise Exception("BD con errores de Integridad Referencial. Revise ForenKeyErrors.csv para más detalle") # VERIFICA QUE LA BD CONTENGA EL RBD DEL ESTABLECIMIENTO print("# VERIFICA QUE LA BD CONTENGA EL RBD DEL ESTABLECIMIENTO") RBD = conn.execute(""" SELECT i.Identifier as RBD ,Organization.Name as 'NombreEstablecimiento' ,i.OrganizationId FROM OrganizationIdentifier i INNER JOIN Organization USING(OrganizationId) INNER JOIN RefOrganizationIdentificationSystem rbd ON i.RefOrganizationIdentificationSystemId = rbd.RefOrganizationIdentificationSystemId AND i.RefOrganizationIdentificationSystemId = ( SELECT RefOrganizationIdentificationSystemId FROM RefOrganizationIdentificationSystem WHERE Code = 'RBD') INNER JOIN RefOrganizationIdentifierType Mineduc ON i.RefOrganizationIdentifierTypeId = Mineduc.RefOrganizationIdentifierTypeId AND i.RefOrganizationIdentifierTypeId = ( SELECT RefOrganizationIdentifierTypeId FROM RefOrganizationIdentifierType WHERE Code = 'Mineduc') """) if(not RBD.returns_rows): raise Exception("RBD del establecimiento no fue encontrado en la Base de datos") else: row = RBD.fetchall(); r = re.compile('^RBD[0-9]{5}$') print(row[0][0]) if r.match(row[0][0]) is not None: print('RBD con formato correcto',row[0][0]) else: raise Exception("RBD con formato incorrecto", row[0][0]) # VERIFICA QUE LA BD CONTENGA LA INFORMACIÓN DE SOLO UN AÑO print("# VERIFICA QUE LA BD CONTENGA LA INFORMACIÓN DE SOLO UN AÑO") Year = conn.execute(""" SELECT CalendarYear as 'AñoEscolar' FROM OrganizationCalendar GROUP BY CalendarYear """); if(Year.returns_rows): rows = Year.fetchall(); print("Año: ",rows[0]) if(len(rows)!=1): raise Exception("La BD contiene más de un año de referencia en los datos") else: raise Exception("La BD no contiene el año de referencia de los datos") # VERIFICA JERARQUIA DE LOS DATOS # la jerarquí es: # RBD -> Modalidad -> Jornada -> Niveles -> Rama -> # Sector Económico (shorName) + Especialidad (Name) # Tipo de Curso -> COD_ENSE (shortName) + Grado (Name) -> Curso -> Asignatura print("# VERIFICA JERARQUIA DE LOS DATOS") Jerarquias = conn.execute(""" SELECT ee.RBD , ee.nombreEstablecimiento , modalidad.Name as modalidad , jornada.Name as jornada , nivel.Name as nivel , rama.Name as rama , sector.Name as sector , especialidad.Name as especialidad , tipoCurso.Name as tipoCurso , codEnse.Name as codigoEnseñanza , grado.Name as grado , curso.Name as letraCurso , curso.OrganizationId as OrganizationIdDelCurso , profesorJefe.apellidoPaternoDocenteLiderCurso , profesorJefe.apellidoMaternoDocenteLiderCurso , profesorJefe.primerNombreDocenteLiderCurso , profesorJefe.otrosNombresDocenteLiderCurso , profesorJefe.runDocenteLiderCurso FROM Organization as curso INNER JOIN OrganizationRelationship as rsCurso on curso.OrganizationId=rsCurso.OrganizationId INNER JOIN Organization as grado on grado.OrganizationId=rsCurso.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsGrado on grado.OrganizationId=rsGrado.OrganizationId INNER JOIN Organization as codEnse on codEnse.OrganizationId=rsGrado.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsCodEnse on codEnse.OrganizationId=rsCodEnse.OrganizationId INNER JOIN Organization as tipoCurso on tipoCurso.OrganizationId=rsCodEnse.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsTipoCurso on tipoCurso.OrganizationId=rsTipoCurso.OrganizationId INNER JOIN Organization as especialidad on especialidad.OrganizationId=rsTipoCurso.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsEspecialidad on especialidad.OrganizationId=rsEspecialidad.OrganizationId INNER JOIN Organization as sector on sector.OrganizationId=rsEspecialidad.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsSector on sector.OrganizationId=rsSector.OrganizationId INNER JOIN Organization as rama on rama.OrganizationId=rsSector.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsRama on rama.OrganizationId=rsRama.OrganizationId INNER JOIN Organization as nivel on nivel.OrganizationId=rsRama.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsNivel on nivel.OrganizationId=rsNivel.OrganizationId INNER JOIN Organization as jornada on jornada.OrganizationId=rsNivel.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsJornada on jornada.OrganizationId=rsJornada.OrganizationId INNER JOIN Organization as modalidad on modalidad.OrganizationId=rsJornada.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsModalidad on modalidad.OrganizationId=rsModalidad.OrganizationId INNER JOIN ( SELECT i.Identifier as RBD ,Organization.Name as 'nombreEstablecimiento' ,i.OrganizationId as OrganizationId FROM OrganizationIdentifier i INNER JOIN Organization USING(OrganizationId) INNER JOIN RefOrganizationIdentificationSystem rbd ON i.RefOrganizationIdentificationSystemId = rbd.RefOrganizationIdentificationSystemId AND i.RefOrganizationIdentificationSystemId = ( SELECT RefOrganizationIdentificationSystemId FROM RefOrganizationIdentificationSystem WHERE Code = 'RBD' ) INNER JOIN RefOrganizationIdentifierType Mineduc ON i.RefOrganizationIdentifierTypeId = Mineduc.RefOrganizationIdentifierTypeId AND i.RefOrganizationIdentifierTypeId = ( SELECT RefOrganizationIdentifierTypeId FROM RefOrganizationIdentifierType WHERE Code = 'Mineduc' )) as ee on ee.OrganizationId=rsModalidad.Parent_OrganizationId INNER JOIN ( SELECT OrganizationPersonRoleId , OrganizationId , PersonId , LastName as 'apellidoPaternoDocenteLiderCurso' , SecondLastName as 'apellidoMaternoDocenteLiderCurso' , FirstName as 'primerNombreDocenteLiderCurso' , MiddleName as 'otrosNombresDocenteLiderCurso' , runDocenteLiderCurso FROM K12StaffAssignment INNER JOIN OrganizationPersonRole USING(OrganizationPersonRoleId) INNER JOIN ( SELECT DISTINCT Person.PersonId ,Person.LastName ,Person.SecondLastName ,Person.FirstName ,Person.MiddleName ,rut.Identifier as RunDocenteLiderCurso FROM Person INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId AND rut.RefPersonIdentificationSystemId = 51 ) USING(PersonId) WHERE RefTeachingAssignmentRoleId = 1 ) profesorJefe ON OrganizationIdDelCurso = profesorJefe.OrganizationId WHERE curso.RefOrganizationTypeId = 21 """); print("Jerarquias.returns_rows->",Jerarquias.returns_rows) if(Jerarquias.returns_rows): rows = Jerarquias.fetchall() if(len(rows)==0): raise Exception("No se encuentra ningún dato de jerarquía") else: raise Exception("No se encuentra ningún dato de jerarquía") modalidades = list(set([m[2] for m in rows])) madalidadesList = ['Regular','Especial','Adulto'] if(False in [m in madalidadesList for m in modalidades]): raise Exception("La modalidad de enseñanza no corresponde") jornadas = list(set([m[3] for m in rows])) jornadasList = ['Mañana','Tarde','Mañana y Tarde','Vespertina/Nocturna'] if(False in [m in jornadasList for m in jornadas]): raise Exception("La jornada de enseñanza no corresponde") nivel = list(set([m[4] for m in rows])) nivelList = [ '01:Educación Parvularia', '02:Enseñanza Básica Niños', '03:Educación Básica Adultos', '04:Educación Especial', '05:Enseñanza Media Humanístico Científica Jóvenes', '06:Educación Media Humanístico Científica Adultos', '07:Enseñanza Media Técnico Profesional y Artística, Jóvenes', '08:Educación Media Técnico Profesional y Artística, Adultos'] if(False in [m in nivelList for m in nivel]): raise Exception("El nivel de enseñanza agrupado no corresponde") rama = list(set([m[5] for m in rows])) ramaList = ['000:Ciclo General', '000:Sin Información', '400:Comercial', '500:Industrial', '600:Técnica', '700:Agrícola', '800:Marítima', '900:Artística',] if(False in [m in ramaList for m in rama]): raise Exception("La rama de enseñanza no corresponde") sector = list(set([m[6] for m in rows])) # Ciclo general corresponde a alumnos de 1° y 2° Medio #en Enseñanza Media T-P y Artística niños y jóvenes y #primer nivel en educación media T-P y Artística Adultos. #En todo otro caso colocar "Sin Información" sectorList = ['000:Ciclo General', '000:Sin Información', '410:Administración y Comercio', '510:Construcción', '520:Metalmecánico', '530:Electricidad', '540:Minero', '550:Gráfica', '560:Químico', '570:Confección', '580:Tecnología y Telecomunicaciones', '610:Alimentación', '620:Programas y Proyectos Sociales', '630:Hotelería y Turismo', '640:Salud y Educación', '710:Maderero', '720:Agropecuario', '810:Marítimo', '910:Artes Visuales', '920:Artes Escénicas Teatro', '930:Artes Escénicas Danza'] if(False in [m in sectorList for m in sector]): raise Exception("El sector de enseñanza no corresponde") especialidad = list(set([m[7] for m in rows])) especialidadList = ['000:Ciclo General', '000:Sin Información', '410.41001:Administración', '410.41002:Contabilidad', '410.41003:Secretariado', '410.41004:Ventas', '410.41005:Administración (con mención)', '510.51001:Edificación', '510.51002:Terminaciones de Construcción', '510.51003:Montaje Industrial', '510.51004:Obras viales y de infraestructura', '510.51005:Instalaciones sanitarias', '510.51006:Refrigeración y climatización', '510.51009:Construcción (con mención)', '520.52008:Mecánica Industrial', '520.52009:Construcciones Metálicas', '520.52010:Mecánica Automotriz', '520.52011:Matricería', '520.52012:Mecánica de mantención de aeronaves', '520.52013:Mecánica Industrial (con mención)', '530.53014:Electricidad', '530.53015:Electrónica', '530.53016:Telecomunicaciones hasta el año 2015', '540.54018:Explotación minera', '540.54019:Metalurgia Extractiva', '540.54020:Asistencia de geología', '550.55022:Gráfica', '550.55023:Dibujo Técnico', '560.56025:Operación de planta química', '560.56026:Laboratorio químico', '560.56027:Química Industrial (con mención)', '570.57028:Tejido', '570.57029:Textil', '570.57030:Vestuario y Confección Textil', '570.57031:Productos del cuero', '580.58033:Conectividad y Redes', '580.58034:Programación', '580.58035:Telecomunicaciones', '610.61001:Elaboración Industrial de Alimentos', '610.61002:Servicio de Alimentación Colectiva', '610.61003:Gastronomía (con mención)', '620.62004:Atención de párvulos hasta año 2015', '620.62005:Atención de adultos mayores', '620.62006:Atención de Enfermería', '620.62007:Atención Social y Recreativa', '620.62008:Atención de Enfermería (con mención) hasta año 2015', '630.63009:Servicio de turismo', '630.63010:Servicios Hoteleros', '630.63011:Servicio de hotelería', '640.64001:Atención de párvulos', '640.64008:Atención de Enfermería (con mención)', '710.71001:Forestal', '710.71002:Procesamiento de la madera', '710.71003:Productos de la madera', '710.71004:Celulosa y Papel', '710.71005:Muebles y Terminaciones de la madera', '720.72006:Agropecuaria', '720.72007:Agropecuaria (con mención)', '810.81001:Naves mercantes y especiales', '810.81002:Pesquería', '810.81003:Acuicultura', '810.81004:Operación portuaria', '810.81005:Tripulación naves mercantes y especiales', '910.91001:Artes Visuales', '910.91002:Artes Audiovisuales', '910.91003:Diseño', '920.92004:Interpretación Teatral', '920.92005:Diseño Escénico', '930.93006:Interpretación en Danza de Nivel Intermedio', '930.93007:Monitoría de Danza'] if(False in [m in especialidadList for m in especialidad]): raise Exception("La especialidad de enseñanza no corresponde") tipoCurso = list(set([m[8] for m in rows])) tipoCursoList = ['01:Simple','02:Combinado'] if(False in [m in tipoCursoList for m in tipoCurso]): raise Exception("El codigo de nivel agrupado no corresponde") codigoEnse = list(set([m[9] for m in rows])) codigoEnseList = ['010:Educación Parvularia', '110:Enseñanza Básica', '160:Educación Básica Común Adultos (Decreto 584/2007)', '161:Educación Básica Especial Adultos', '163:Escuelas Cárceles (Básica Adultos)', '165:Educación Básica Adultos Sin Oficios (Decreto 584/2007)' '167:Educación Básica Adultos Con Oficios (Decreto 584/2007 y 999/2009)', '211:Educación Especial Discapacidad Auditiva', '212:Educación Especial Discapacidad Intelectual', '213:Educación Especial Discapacidad Visual', '214:Educación Especial Trastornos Específicos del Lenguaje', '215:Educación Especial Trastornos Motores', '216:Educación Especial Autismo', '217:Educación Especial Discapacidad Graves Alteraciones en la Capacidad de Relación y Comunicación', '299:Opción 4 Programa Integración Escolar', '310:Enseñanza Media H-C niños y jóvenes', '360:Educación Media H-C adultos vespertino y nocturno (Decreto N° 190/1975)', '361:Educación Media H-C adultos (Decreto N° 12/1987)', '362:Escuelas Cárceles (Media Adultos)', '363:Educación Media H-C Adultos (Decreto N°1000/2009)', '410:Enseñanza Media T-P Comercial Niños y Jóvenes', '460:Educación Media T-P Comercial Adultos (Decreto N° 152/1989)', '461:Educación Media T-P Comercial Adultos (Decreto N° 152/1989)', '463:Educación Media T-P Comercial Adultos (Decreto N° 1000/2009)', '510:Enseñanza Media T-P Industrial Niños y Jóvenes', '560:Educación Media T-P Industrial Adultos (Decreto N° 152/1989)', '561:Educación Media T-P Industrial Adultos (Decreto N° 152/1989)', '563:Educación Media T-P Industrial Adultos (Decreto N° 1000/2009)', '610:Enseñanza Media T-P Técnica Niños y Jóvenes', '660:Educación Media T-P Técnica Adultos (Decreto N° 152/1989)', '661:Educación Media T-P Técnica Adultos (Decreto N° 152/1989)', '663:Educación Media T-P Técnica Adultos (Decreto N° 1000/2009)', '710:Enseñanza Media T-P Agrícola Niños y Jóvenes', '760:Educación Media T-P Agrícola Adultos (Decreto N° 152/1989)', '761:Educación Media T-P Agrícola Adultos (Decreto N° 152/1989)', '763:Educación Media T-P Agrícola Adultos (Decreto N° 1000/2009)', '810:Enseñanza Media T-P Marítima Niños y Jóvenes', '860:Enseñanza Media T-P Marítima Adultos (Decreto N° 152/1989)', '863:Enseñanza Media T-P Marítima Adultos (Decreto N° 1000/2009)', '910:Enseñanza Media Artística Niños y Jóvenes', '963:Enseñanza Media Artística Adultos',] if(False in [m in codigoEnseList for m in codigoEnse]): raise Exception("El código de enseñanza no corresponde") grado = list(set([m[10] for m in rows])) gradoList = ['010.01:Sala Cuna', '010.02:Nivel Medio Menor', '010.03:Nivel Medio Mayor', '010.04:Primer Nivel de Transición (Pre-kinder)', '010.05:Segundo Nivel de Transición (Kinder)', '110.01:1º Básico', '110.02:2º Básico', '110.03:3º Básico', '110.04:4º Básico', '110.05:5º Básico', '110.06:6º Básico', '110.07:7º Básico', '110.08:8º Básico', '165.01:Nivel Básico 1 (1º a 4º básico)', '165.02:Nivel Básico 2 (5º a 6º básico)', '165.03:Nivel Básico 3 (7º a 8º básico)', '167.02:Nivel Básico 2 (5º a 6º básico)', '167.03:Nivel Básico 3 (7º a 8º básico)', '211.01:Prebásico materno 1º', '211.02:Prebásico 1º - 1', '211.03:Prebásico 1º - 2', '211.04:Prebásico 1º - 3', '211.05:Prebásico 2º - 4', '211.06:Prebásico 2º - 5', '211.07:Básico 1º - 1', '211.08:Básico 1º - 2', '211.09:Básico 1º - 3', '211.10:Básico 1º - 4', '211.11:Básico 2º - 5', '211.12:Básico 2º - 6', '211.13:Básico 2º - 7', '211.14:Básico 2º - 8', '211.15:Laboral 1', '211.16:Laboral 2', '211.17:Laboral 3', '212.01:Prebásico 1º - 1', '212.02:Prebásico 1º - 2', '212.03:Prebásico 1º - 3', '212.04:Prebásico 2º - 4', '212.05:Básico 1º - 5', '212.06:Básico 1º - 6', '212.07:Básico 1º - 7', '212.08:Básico 2º - 8', '212.09:Básico 2º - 9', '212.10:Básico 2º - 10', '212.11:Laboral 1', '212.12:Laboral 2', '212.13:Laboral 3', '212.14:Prebásico Materno 1° (Estimulación Temprana)', '213.01:Estimulación temprana 1º - 1', '213.02:Estimulación temprana 1º - 2', '213.03:Prebásico 1º - 1', '213.04:Prebásico 1º - 2', '213.05:Prebásico 2º - 3', '213.06:Prebásico 2º - 4', '213.07:Básico 1º - 1', '213.08:Básico 1º - 2', '213.09:Básico 1º - 3', '213.10:Básico 1º - 4', '213.11:Básico 2º - 5', '213.12:Básico 2º - 6', '213.13:Básico 2º - 7', '213.14:Básico 2º - 8', '213.15:Laboral 1º - 1', '213.16:Laboral 1º - 2', '213.17:Laboral 2º - 3', '213.18:Laboral 2º - 4', '214.01:Medio Menor', '214.02:Medio Mayor', '214.03:Primer Nivel de Transición (Pre-kinder)', '214.04:Segundo Nivel de Transición (Kinder)', '215.01:Estimulación temprana 1º - 1', '215.02:Estimulación temprana 1º - 2', '215.03:Prebásico 1º - 1', '215.04:Prebásico 1º - 2', '215.05:Prebásico 1º - 3', '215.06:Prebásico 2º - 4', '215.07:Prebásico 2º - 5', '215.08:Básico 1º - 1', '215.09:Básico 1º - 2', '215.10:Básico 1º - 3', '215.11:Básico 1º - 4', '215.12:Básico 2º - 5', '215.13:Básico 2º - 6', '215.14:Básico 2º - 7', '215.15:Básico 2º - 8', '215.16:Laboral 1º - 1', '215.17:Laboral 1º - 2', '215.18:Laboral 2º - 3', '215.19:Laboral 2º - 4', '216.05:Básico 1° - 5', '216.06:Básico 1° - 6', '216.07:Básico 1° - 7', '216.08:Básico 2° - 8', '216.09:Básico 2° - 9', '216.10:Básico 2° - 10', '216.11:Laboral 1', '216.12:Laboral 2', '216.13:Laboral 3', '216.14:Prebásico Materno 1° (Estimulación Temprana)', '216.15:Prebásico 1° - 1', '216.16:Prebásico 1° - 2', '216.17:Prebásico 2° - 3', '216.18:Prebásico 2° - 4', '217.01:Prebásico 1º - 1', '217.02:Prebásico 1º - 2', '217.03:Prebásico 2º - 3', '217.04:Prebásico 2º - 4', '217.05:Básico 1º - 5', '217.06:Básico 1º - 6', '217.07:Básico 1º - 75', '217.08:Básico 2º - 8', '217.09:Básico 2º - 9', '217.10:Básico 2º - 10', '217.11:Laboral 1', '217.12:Laboral 2', '217.13:Laboral 3', '217.14:Prebásico Materno 1º (Estimulación temprana)', '299.01:Prebásico 1', '299.02:Prebásico 2', '299.03:Prebásico 3', '299.04:Prebásico 4', '299.05:Prebásico 5', '299.06:Básico 1', '299.07:Básico 2', '299.08:Básico 3', '299.09:Básico 4', '299.10:Básico 5', '299.11:Básico 6', '299.12:Básico 7', '299.13:Básico 8', '299.14:Básico 9', '299.15:Básico 10', '299.16:Laboral 1', '299.17:Laboral 2', '299.18:Laboral 3', '299.19:Laboral 4', '310.01:1º medio', '310.02:2º medio', '310.03:3º medio', '310.04:4º medio', '363.01:Primer nivel (1º y 2º medio)', '363.03:Segundo nivel (3º y 4º medio)', '410.01:1º medio', '410.02:2º medio', '410.03:3º medio', '410.04:4º medio', '463.01:Primer nivel (1º y 2º medio)', '463.03:Segundo nivel (3º medio)', '463.04:Tercero nivel (4º medio)', '510.01:1º medio', '510.02:2º medio', '510.03:3º medio', '510.04:4º medio', '563.01:Primer nivel (1º y 2º medio)', '563.03:Segundo nivel (3º medio)', '563.04:Tercero nivel (4º medio)', '610.01:1º medio', '610.02:2º medio', '610.03:3º medio', '610.04:4º medio', '663.01:Primer nivel (1º y 2º medio)', '663.03:Segundo nivel (3º medio)', '663.04:Tercero nivel (4º medio)', '710.01:1º medio', '710.02:2º medio', '710.03:3º medio', '710.04:4º medio', '763.01:Primer nivel (1º y 2º medio)', '763.03:Segundo nivel (3º medio)', '763.04:Tercero nivel (4º medio)', '810.01:1º medio', '810.02:2º medio', '810.03:3º medio', '810.04:4º medio', '863.01:Primer nivel (1º y 2º medio)', '863.03:Segundo nivel (3º medio)', '863.04:Tercero nivel (4º medio)', '910.01:1º medio', '910.02:2º medio', '910.03:3º medio', '910.04:4º medio', '963.01:Primer nivel (1º y 2º medio)', '963.03:Segundo nivel (3º medio)', '963.04:Tercero nivel (4º medio)'] if(False in [m in gradoList for m in grado]): raise Exception("El grado no corresponde") letraCurso = list(set([m[11] for m in rows])) r = re.compile('^[A-Z]{1,2}$') if(None in [r.match(letra) for letra in letraCurso]): raise Exception("La letra de curso inválida") runDocenteLider = list(set([m[17] for m in rows])) if(False in [validarRut(run) for run in runDocenteLider]): raise Exception("Existe RUN de docentes inválidos") # VERIFICA LISTA DE ESTUDIANTES print("# VERIFICA LISTA DE ESTUDIANTES") estudiantes = conn.execute(""" SELECT DISTINCT person.PersonId as 'personIdEstudiante' ,numLista.StudentListNumber as 'númeroListaEstudiante' ,mat.Identifier as 'númeroMatriculaEstudiante' ,Person.LastName as apellidoPaternoEstudiante, Person.SecondLastName as apellidoMaternoEstudiante, Person.FirstName as primerNombreEstudiante, Person.MiddleName as otrosNombresEstudiante ,rut.Identifier as runEstudiante ,CASE RefSex.Code WHEN 'Male' THEN 'M' WHEN 'Female' THEN 'F' ELSE 'Sin Registro' END as sexoEstudiante ,Birthdate as fechaNacimientoEstudiante ,address.StreetNumberAndName as DirecciónEstudiante ,address.comuna as ComunaEstudiante ,padre.ApellidoPaternoPadre, padre.ApellidoMaternoPadre, padre.PrimerNombrePadre, padre.OtrosNombresPadre, padre.RunPadre ,madre.ApellidoPaternoMadre, madre.ApellidoMaternoMadre, madre.PrimerNombreMadre, madre.OtrosNombresMadre, madre.RunMadre ,tutor.ApellidoPaternoTutor, tutor.ApellidoMaternoTutor, tutor.PrimerNombreTutor, tutor.OtrosNombresTutor, tutor.RunTutor, tutor.FonoTutor, tutor.EmailTutor ,curso.RBD ,curso.nombreEstablecimiento ,curso.modalidad ,curso.jornada ,curso.nivel ,curso.grado ,curso.letraCurso ,curso.OrganizationIdDelCurso ,curso.apellidoPaternoDocenteLiderCurso, curso.apellidoMaternoDocenteLiderCurso, curso.primerNombreDocenteLiderCurso, curso.otrosNombresDocenteLiderCurso, curso.runDocenteLiderCurso ,oc.AñoCalendario ,Opr.EntryDate as 'fechaIncorporaciónEstudiante' ,Opr.ExitDate as 'fechaRetiroEstudiante' FROM Person INNER JOIN PersonIdentifier mat ON mat.PersonId = Person.PersonId AND mat.RefPersonIdentificationSystemId = 6 LEFT OUTER JOIN RefSex USING(RefSexId) LEFT OUTER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId AND rut.RefPersonIdentificationSystemId = 51 LEFT OUTER JOIN ( SELECT PersonId ,StreetNumberAndName ,RefCounty.Description as Comuna FROM PersonAddress INNER JOIN RefCounty ON PersonAddress.RefCountyId = RefCounty.RefCountyId ) address USING(PersonId) LEFT OUTER JOIN ( SELECT DISTINCT Person.PersonId ,Person.LastName as ApellidoPaternoPadre ,Person.SecondLastName as ApellidoMaternoPadre ,Person.FirstName as PrimerNombrePadre ,Person.MiddleName as OtrosNombresPadre ,rut.Identifier as RunPadre FROM Person INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId AND rut.RefPersonIdentificationSystemId = 51 INNER JOIN PersonRelationship padre ON padre.PersonId = Person.PersonId AND padre.RefPersonRelationshipId IN (8,9,10,11) ) padre USING(PersonId) LEFT OUTER JOIN ( SELECT DISTINCT Person.PersonId ,Person.LastName as ApellidoPaternoMadre ,Person.SecondLastName as ApellidoMaternoMadre ,Person.FirstName as PrimerNombreMadre ,Person.MiddleName as OtrosNombresMadre ,rut.Identifier as RunMadre FROM Person INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId AND rut.RefPersonIdentificationSystemId = 51 INNER JOIN PersonRelationship ON PersonRelationship.PersonId = Person.PersonId AND PersonRelationship.RefPersonRelationshipId IN (18,19,20) ) madre USING(PersonId) LEFT OUTER JOIN ( SELECT DISTINCT Person.PersonId ,Person.LastName as ApellidoPaternoTutor ,Person.SecondLastName as ApellidoMaternoTutor ,Person.FirstName as PrimerNombreTutor ,Person.MiddleName as OtrosNombresTutor ,rut.Identifier as RunTutor ,p.TelephoneNumber as FonoTutor ,PersonEmailAddress.EmailAddress as EmailTutor FROM Person INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId AND rut.RefPersonIdentificationSystemId = 51 INNER JOIN PersonRelationship ON PersonRelationship.PersonId = Person.PersonId AND PersonRelationship.PrimaryContactIndicator = 1 LEFT OUTER JOIN ( SELECT * FROM PersonTelephone ORDER BY PrimaryTelephoneNumberIndicator DESC LIMIT 1 ) p USING(PersonId) LEFT OUTER JOIN PersonEmailAddress USING(PersonId) ) tutor USING(PersonId) LEFT OUTER JOIN ( SELECT DISTINCT StudentListNumber ,OrganizationPersonRole.PersonId FROM K12StudentEnrollment INNER JOIN OrganizationPersonRole USING(OrganizationPersonRoleId) WHERE StudentListNumber NOT NULL AND StudentListNumber != 0 ) numLista USING(PersonId) LEFT OUTER JOIN OrganizationPersonRole as Opr USING(PersonId) INNER JOIN ( SELECT ee.RBD , ee.nombreEstablecimiento , modalidad.Name as modalidad , jornada.Name as jornada , nivel.Name as nivel , rama.Name as rama , sector.Name as sector , especialidad.Name as especialidad , tipoCurso.Name as tipoCurso , codEnse.Name as codigoEnseñanza , grado.Name as grado , curso.Name as letraCurso , curso.OrganizationId as OrganizationIdDelCurso , profesorJefe.apellidoPaternoDocenteLiderCurso , profesorJefe.apellidoMaternoDocenteLiderCurso , profesorJefe.primerNombreDocenteLiderCurso , profesorJefe.otrosNombresDocenteLiderCurso , profesorJefe.runDocenteLiderCurso FROM Organization as curso INNER JOIN OrganizationRelationship as rsCurso on curso.OrganizationId=rsCurso.OrganizationId INNER JOIN Organization as grado on grado.OrganizationId=rsCurso.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsGrado on grado.OrganizationId=rsGrado.OrganizationId INNER JOIN Organization as codEnse on codEnse.OrganizationId=rsGrado.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsCodEnse on codEnse.OrganizationId=rsCodEnse.OrganizationId INNER JOIN Organization as tipoCurso on tipoCurso.OrganizationId=rsCodEnse.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsTipoCurso on tipoCurso.OrganizationId=rsTipoCurso.OrganizationId INNER JOIN Organization as especialidad on especialidad.OrganizationId=rsTipoCurso.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsEspecialidad on especialidad.OrganizationId=rsEspecialidad.OrganizationId INNER JOIN Organization as sector on sector.OrganizationId=rsEspecialidad.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsSector on sector.OrganizationId=rsSector.OrganizationId INNER JOIN Organization as rama on rama.OrganizationId=rsSector.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsRama on rama.OrganizationId=rsRama.OrganizationId INNER JOIN Organization as nivel on nivel.OrganizationId=rsRama.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsNivel on nivel.OrganizationId=rsNivel.OrganizationId INNER JOIN Organization as jornada on jornada.OrganizationId=rsNivel.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsJornada on jornada.OrganizationId=rsJornada.OrganizationId INNER JOIN Organization as modalidad on modalidad.OrganizationId=rsJornada.Parent_OrganizationId INNER JOIN OrganizationRelationship as rsModalidad on modalidad.OrganizationId=rsModalidad.OrganizationId INNER JOIN ( SELECT i.Identifier as RBD ,Organization.Name as 'nombreEstablecimiento' ,i.OrganizationId as OrganizationId FROM OrganizationIdentifier i INNER JOIN Organization USING(OrganizationId) INNER JOIN RefOrganizationIdentificationSystem rbd ON i.RefOrganizationIdentificationSystemId = rbd.RefOrganizationIdentificationSystemId AND i.RefOrganizationIdentificationSystemId = ( SELECT RefOrganizationIdentificationSystemId FROM RefOrganizationIdentificationSystem WHERE Code = 'RBD' ) INNER JOIN RefOrganizationIdentifierType Mineduc ON i.RefOrganizationIdentifierTypeId = Mineduc.RefOrganizationIdentifierTypeId AND i.RefOrganizationIdentifierTypeId = ( SELECT RefOrganizationIdentifierTypeId FROM RefOrganizationIdentifierType WHERE Code = 'Mineduc' )) as ee on ee.OrganizationId=rsModalidad.Parent_OrganizationId INNER JOIN ( SELECT OrganizationPersonRoleId , OrganizationId , PersonId , LastName as 'apellidoPaternoDocenteLiderCurso' , SecondLastName as 'apellidoMaternoDocenteLiderCurso' , FirstName as 'primerNombreDocenteLiderCurso' , MiddleName as 'otrosNombresDocenteLiderCurso' , runDocenteLiderCurso FROM K12StaffAssignment INNER JOIN OrganizationPersonRole USING(OrganizationPersonRoleId) INNER JOIN ( SELECT DISTINCT Person.PersonId ,Person.LastName ,Person.SecondLastName ,Person.FirstName ,Person.MiddleName ,rut.Identifier as RunDocenteLiderCurso FROM Person INNER JOIN PersonIdentifier rut ON rut.PersonId = Person.PersonId AND rut.RefPersonIdentificationSystemId = 51 ) USING(PersonId) WHERE RefTeachingAssignmentRoleId = 1 ) profesorJefe ON OrganizationIdDelCurso = profesorJefe.OrganizationId WHERE curso.RefOrganizationTypeId = 21 ) curso ON Opr.OrganizationId = curso.OrganizationIdDelCurso LEFT OUTER JOIN ( Select MAX(CalendarYear) as 'AñoCalendario', OrganizationId FROM OrganizationCalendar ) oc ON oc.OrganizationId=curso.OrganizationIdDelCurso ORDER BY nivel, grado, letraCurso, StudentListNumber """) print('Estudiantes.returns_rows->',estudiantes.returns_rows) if(estudiantes.returns_rows): rows = estudiantes.fetchall() if(len(rows)==0): raise Exception("No se encuentra ningún dato de los studiante") else: raise Exception("No se encuentra ningún dato de los estudiantes") runEstudiante = list(set([m[7] for m in rows])) if(False in [validarRut(run) for run in runEstudiante]): print([run for run in runEstudiante if not validarRut(run)]) raise Exception("Existe RUN inválidos (runEstudiante)") runPadres = list(set([m[16] for m in rows if m[16] is not None])) runPadresErroneos = [run for run in runPadres if not validarRut(run)] if(len(runPadresErroneos)>0): print(runPadresErroneos) raise Exception("Existe RUN inválidos (runPadres)") runMadres = list(set([m[21] for m in rows if m[21] is not None])) runMadresErroneos = [run for run in runMadres if not validarRut(run)] if(len(runMadresErroneos)>0): print(runMadresErroneos) raise Exception("Existe RUN inválidos (runMadres)") runTutores = list(set([m[26] for m in rows if m[26] is not None])) runTutoresErroneos = [run for run in runTutores if not validarRut(run)] if(len(runTutoresErroneos)>0): print(runTutoresErroneos) raise Exception("Existe RUN inválidos (runTutores)") #Teléfonos con formato E164 phoneTutor = list(set([m[27] for m in rows if m[27] is not None])) r = re.compile('^\+56\d{9,15}$') phoneTutorErroneos = [phone for phone in phoneTutor if not r.match(phone)] if(len(phoneTutorErroneos)>0): print(phoneTutorErroneos) raise Exception("El teléfono del tutor no tiene el formato correcto") emailTutor = list(set([m[28] for m in rows if m[28] is not None])) emailTutoresErroneos = [email for email in emailTutor if not valida_email(email)] if(len(runTutoresErroneos)>0): print(emailTutoresErroneos) raise Exception("Existe un email inválido (emailTutor)") # CAMBIA CLAVE A LA BD Y CREA ARCHIVO CON CLAVE PARA LA SIE print("# CAMBIA CLAVE A LA BD Y CREA ARCHIVO CON CLAVE PARA LA SIE") conn.execute(f"PRAGMA key = '{secPhase}';") psw = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(50)) tiempoPromedioDesifrado = pow(26+26+10,50)/4000000000 print(psw,tiempoPromedioDesifrado) text_file = open("key.txt", "w");text_file.write(psw);text_file.close() psw2 = encryptTextUsingSiePublicKey(psw) text_file = open("key.encrypted", "wb");text_file.write(psw2);text_file.close() conn.execute(f"PRAGMA rekey = '{psw}';") except Exception as e: _t = "ERROR COMMIT: "+str(e) print(_t);dfLog=dfLog.append(pd.Series({'result': _t}),ignore_index=True); _r = False finally: #closind database connection conn.close() return _r def cargarBaseDeDatos(): global dfLog idFile = '1hqAjAknc6dY720X5zO_ZU2FqI_mZa3nB' url_to_zipDB_file = f'http://drive.google.com/uc?export=download&id={idFile}' r = requests.get(url_to_zipDB_file, stream=True) fileName = 'ceds-nds-v7_1_encryptedD3.db' with open(fileName,'wb') as out: out.write(io.BytesIO(r.content).read()) ## Read bytes into file path_to_DB_file = os.path.join(os.path.dirname(fileName), fileName) _t=f"Base de datos: '{path_to_DB_file}' descomprimida exitosamente "; print(_t) dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return path_to_DB_file #---------------------------------------------------------------------------- #PASO N° 10 - Preparar ambiente de trabajo #---------------------------------------------------------------------------- # Clean o Create CSV directory #---------------------------------------------------------------------------- def cleanDirectory(d): global dfLog if(not os.path.exists(d)): os.mkdir(d) else: for root, dirs, files in os.walk(d, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: os.rmdir(os.path.join(root, name)) _t = f'Directorio : {d} limpio y preparado con éxito!!!'; print(_t); dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) return True ###Output _____no_output_____ ###Markdown Sección: CÓDIGO PRINCIPAL--- ###Code dfLog = pd.DataFrame(columns=['json', 'csv', '#savingRows', '#readingRows', 'resultSaving', 'resultReading', 'result']) _encode = 'utf8' #Opciones Windows:'cp1252', Google Colab: 'utf8' _sep = ';' #Opciones Windows:';', Google Colab: ',' def main(): global dfLog, _encode, _sep tiempo_inicial = time() now = datetime.now(pytz.timezone('Chile/Continental')) t_stamp = datetime.timestamp(now) path_to_dir_csv_file = './csv/' path_to_DB_file = cargarBaseDeDatos() path_to_zip_file = '_tmp_json_librodigital_mineduc_8833_02enero2020.zip' #Ingresar solo nombre del archivo if(cleanDirectory(path_to_dir_csv_file)): if(readJsonSaveCSV(path_to_zip_file,path_to_dir_csv_file)): if(transferCSVToSQL_withPandas(path_to_dir_csv_file,path_to_DB_file,t_stamp)): #zipFilesInDir (path_to_dir_csv_file, './'+str(int(t_stamp))+'_Data.zip',lambda name : 'csv' in name); cleanDirectory(path_to_dir_csv_file) _nameErrorFile = str(int(t_stamp))+'_ERRORES.csv' dfLog.to_csv(_nameErrorFile, sep=_sep, encoding=_encode, index=False) zip = ZipFile('./'+str(int(t_stamp))+'_Data.zip','a') zip.write('./ceds-nds-v7_1_encryptedD3.db') zip.write('./key.txt');zip.write('./key.encrypted') zip.write('./'+str(int(t_stamp))+'_ERRORES.csv') zip.close() _t = "Proceso finalizado correctamente!!!" print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) else: cleanDirectory(path_to_dir_csv_file) _nameErrorFile = str(int(t_stamp))+'_ERRORES.csv' dfLog.to_csv(_nameErrorFile, sep=_sep, encoding=_encode, index=False) _t = "Proceso finalizado con ERRORES!!!" print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) if os.path.exists('./ceds-nds-v7_1_encryptedD3.db'): os.remove('./ceds-nds-v7_1_encryptedD3.db') if os.path.exists('./key.txt'): os.remove('./key.txt') if os.path.exists('./key.encrypted'): os.remove('./key.encrypted') _t = f'El tiempo de ejecucion fue: {str(time() - tiempo_inicial)}' print(_t);dfLog = dfLog.append(pd.Series({'result': _t}), ignore_index=True) del dfLog, tiempo_inicial, now, path_to_dir_csv_file del path_to_zip_file, _t if __name__== "__main__": main() ###Output _____no_output_____
resnet50rs_cifar100.ipynb
###Markdown ###Code import numpy as np import torch import torch.nn as nn import torch.optim as optimizers import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision import torchvision.transforms as transforms from sklearn.metrics import accuracy_score ###Output _____no_output_____ ###Markdown **Network model** ###Code class ResNet50rs(nn.Module): def __init__(self, output_dim): super(ResNet50rs, self).__init__() self.stem = StemBlock(channel_in=3, channel_out=64) # Block 1 self.id1 = Block(channel_in=64, channel_out=256, stride=2, identity=True) self.block1 = nn.ModuleList([Block(channel_in=256, channel_out=256, stride=1, identity=False) for _ in range(2)]) # Block 2 self.id2 = Block(channel_in=256, channel_out=512, stride=2, identity=True) self.block2 = nn.ModuleList([Block(channel_in=512, channel_out=512, stride=1, identity=False) for _ in range(3)]) # Block 3 self.id3 = Block(channel_in=512, channel_out=1024, stride=2, identity=True) self.block3 = nn.ModuleList([Block(channel_in=1024, channel_out=1024, stride=1, identity=False) for _ in range(5)]) # Block 4 self.id4 = Block(channel_in=1024, channel_out=2048, stride=2, identity=True) self.block4 = nn.ModuleList([Block(channel_in=2048, channel_out=2048, stride=1, identity=False) for _ in range(2)]) self.avg_pool = GlobalAvgPool2d() self.dropout = nn.Dropout(p=0.25) self.fc = nn.Linear(2048, output_dim, bias=False) def forward(self, x): h = self.stem(x) h = self.id1(h) for block in self.block1: h = block(h) h = self.id2(h) for block in self.block2: h = block(h) h = self.id3(h) for block in self.block3: h = block(h) h = self.id4(h) for block in self.block4: h = block(h) h = self.avg_pool(h) h = self.dropout(h) h = torch.relu(h) h = self.fc(h) y = torch.log_softmax(h, dim=-1) return y class StemBlock(nn.Module): def __init__(self, channel_in, channel_out): super(StemBlock, self).__init__() channel =int(channel_out / 2) self.stem = nn.Sequential(nn.Conv2d(channel_in, channel, kernel_size=(3, 3), stride=2, padding=1, bias=False), nn.BatchNorm2d(channel), nn.ReLU(inplace=True), nn.Conv2d(channel, channel, kernel_size=(3, 3), stride=1, padding=1, bias=False), nn.BatchNorm2d(channel), nn.ReLU(inplace=True), nn.Conv2d(channel, channel_out, kernel_size=(3, 3), stride=1, padding=1, bias=False)) self.init_weights() def init_weights(self): for _, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu') if isinstance(module, nn.BatchNorm2d): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) def forward(self, x): return self.stem(x) class Block(nn.Module): def __init__(self, channel_in, channel_out, stride, identity): super(Block, self).__init__() channel = int(channel_out / 4) self.se = SEBlock(channel_in) # 1x1 conv self.bn1 = nn.BatchNorm2d(channel_in) self.relu1 = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(channel_in, channel, kernel_size=(1, 1), bias=False) # 3x3 conv self.bn2 = nn.BatchNorm2d(channel) self.relu2 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(channel, channel, kernel_size=(3, 3), stride=stride, padding=1, bias=False) # 1x1 conv self.bn3 = nn.BatchNorm2d(channel) self.drop_out = DropPath(drop_prob=0.) self.conv3 = nn.Conv2d(channel, channel_out, kernel_size=(1, 1), bias=False) # skip connection self.identity = identity self.downsample = DownSample(channel_in, channel_out, stride) self.init_weights() def init_weights(self): for _, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu') if isinstance(module, nn.BatchNorm2d): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) def forward(self, x): h = self.se(x) h = self.bn1(h) h = self.relu1(h) h = self.conv1(h) h = self.bn2(h) h = self.relu2(h) h = self.conv2(h) h = self.bn3(h) h = self.drop_out(h) h = self.conv3(h) shortcut = self.downsample(x) if self.identity else x y = h + shortcut return y class SEBlock(nn.Module): def __init__(self, channel, ratio=0.25): super(SEBlock, self).__init__() reduced_channel = int(channel * ratio) self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(nn.Linear(channel, reduced_channel, bias=False), nn.ReLU(inplace=True), nn.Linear(reduced_channel, channel, bias=False), nn.Sigmoid()) def forward(self, x): b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return x * y.expand_as(x) class DropPath(nn.Module): def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): if self.drop_prob is None or self.drop_prob == 0 or not self.training: return x keep_prob = 1 - self.drop_prob shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) rand_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) rand_tensor = rand_tensor.floor_() out = x.div(keep_prob) * rand_tensor return out class DownSample(nn.Module): def __init__(self, channel_in, channel_out, stride): super(DownSample, self).__init__() if stride == 1: avg_pool = nn.Identity() else: avg_pool = nn.AvgPool2d(kernel_size=2, stride=stride) self.downsample = nn.Sequential(avg_pool, nn.Conv2d(channel_in, channel_out, kernel_size=(1, 1), bias=False)) def forward(self, x): return self.downsample(x) class GlobalAvgPool2d(nn.Module): def __init__(self, device='cuda'): super(GlobalAvgPool2d, self).__init__() def forward(self, x): return F.avg_pool2d(x, kernel_size=x.size()[2:]).view(-1, x.size(1)) ###Output _____no_output_____ ###Markdown Tensor sizeCNN networks are capable of adaptive to Image. Therefore, using pytorch library, you can show that tensor $h$ size is$$h = \begin{bmatrix} batch size & channel & width & height \end{bmatrix} $$$batch size$ is Neural Network minibatch, $channel$ is channel number, $width$ and $height$ are width and height of image source. **CIFAR-100** ###Code if __name__ == '__main__': np.random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') scaler = torch.cuda.amp.GradScaler() cifar_classes = ['apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm'] transform_train = transforms.Compose([transforms.Resize((160, 160)), transforms.RandAugment(magnitude=10), transforms.ToTensor()]) transform_val = transforms.Compose([transforms.Resize((160, 160)), transforms.ToTensor()]) cifar_train = torchvision.datasets.CIFAR100(root="CIFAR100", download=True, train=True, transform=transform_train) cifar_val = torchvision.datasets.CIFAR100(root="CIFAR100", download=True, train=False, transform=transform_val) train_dataloader = DataLoader(cifar_train, batch_size=128, shuffle=True, num_workers=2, pin_memory=True, drop_last=True) val_dataloader = DataLoader(cifar_val, batch_size=128, shuffle=False, num_workers=2, pin_memory=True, drop_last=True) model = ResNet50rs(len(cifar_classes)).to(device) def compute_loss(label, pred): return criterion(pred, label) def train_step(x, t): model.train() with torch.cuda.amp.autocast(): preds = model(x) loss = compute_loss(t, preds) del preds, x optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() return loss def val_step(x, t): model.eval() with torch.cuda.amp.autocast(): preds = model(x) loss = compute_loss(t, preds) del x return loss, preds criterion = nn.NLLLoss() log_prob = nn.LogSoftmax(dim=1) optimizer = optimizers.SGD(model.parameters(),lr=0.00078125, momentum=0.9, weight_decay=4e-5) epochs = 50 torch.backends.cudnn.benchmark = True result_list = np.zeros(4) for epoch in range(epochs): train_loss = 0. val_loss = 0. val_acc = 0. for(x, t) in train_dataloader: x, t = x.to(device), t.to(device) loss = train_step(x, t) train_loss += loss.item() del t, loss torch.cuda.empty_cache() train_loss /= len(train_dataloader) print("Epoch: {}, Train Cost: {:.3f}".format(epoch+1, train_loss)) with torch.inference_mode(): for (x, t) in val_dataloader: x, t = x.to(device), t.to(device) loss, preds = val_step(x, t) val_loss += loss.item() val_acc += accuracy_score(t.tolist(), preds.argmax(dim=-1).tolist()) del t, loss, preds torch.cuda.empty_cache() val_loss /= len(val_dataloader) val_acc /= len(val_dataloader) result_list = np.vstack((result_list, np.array([epoch+1, train_loss, val_loss, val_acc]))) print("Epoch: {}, Valid Cost: {:.3f}, Acc: {:.3f}".format(epoch+1, val_loss, val_acc)) ###Output Downloading https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz to CIFAR100/cifar-100-python.tar.gz ###Markdown **Plot** ###Code import matplotlib.pyplot as plt %matplotlib inline result = result_list.T epoch = result[0] train_loss = result[1] val_loss = result[2] accuracy = result[3] fig, ax1 = plt.subplots(figsize=(10,7)) ax1.set_xlim(1, 50) ax1.set_ylim(0, 4) ax1.tick_params(labelsize=16) ax1.set_xlabel("Epoch", fontsize=22) ax1.set_ylabel("Cost", fontsize=22) ax1.plot(epoch, train_loss, color="#ce1021", label="Train") ax1.plot(epoch, val_loss, color="#5ab639", label="Val") ax2 = ax1.twinx() ax2.set_ylim(0, 1) ax2.tick_params(labelsize=16) ax2.set_ylabel("Accuracy", fontsize=22) ax2.plot(epoch, accuracy, color="#0086ce", label="Accuracy") h1, l1 = ax1.get_legend_handles_labels() h2, l2 = ax2.get_legend_handles_labels() ax1.legend(h1+h2, l1+l2, fontsize=18, loc='upper right') plt.show() ###Output _____no_output_____ ###Markdown **Image classification on validation data** ###Code from PIL import Image device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cifar_val = torchvision.datasets.CIFAR100(root="CIFAR100", download=False, train=False, transform=None) images, labels=[], [] for sample in cifar_val: image, label = sample images.append(image) labels.append(label) idx = 51 img_src = images[idx] plt.imshow(img_src) label = labels[idx] trans = transforms.Compose([transforms.Resize((160, 160)), transforms.ToTensor()]) img = trans(img_src) img_batch = img[None] model.eval() torch.backends.cudnn.benchmark = True with torch.cuda.amp.autocast(): prediction = model(img_batch.to(device)) idx = torch.argmax(prediction[0]) print("Pred: ", cifar_classes[idx]) print("Correct: ", cifar_classes[label]) ###Output Pred: turtle Correct: bee