code
stringlengths
2.5k
150k
kind
stringclasses
1 value
# dMRI Data Reconstruction In this notebook, we will reconstruct MRI imgaes from raw data by using Python.This includes: 1. Data processing; 2. DTI reconstruction and 3. DKI reocnstruction. ## Data Preprocessing Data preprocessing is quit important for dMRI reconstruction. Different data preprocessing may lead to different reconstruction image qualities, which will make the comparation of different reconstruct methods unreliable. Thus, here we first preprocessing MRI by following same steps: denosing, topup (susceptibility-induced distortion correction) and eddy current-induced distortion and motion correction. ### __Import python libraries__ ``` import os #TO control directories import numpy as np import nibabel as nib # read and save medical images from dipy.denoise.localpca import mppca #denoising import nipype.interfaces.fsl as fsl #topup from nipype.interfaces.fsl import TOPUP from nipype.testing import anatfile import timeit #compute time, useage: timeit.timeit() import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D %matplotlib inline ``` ### Set data path ``` data_path = "/home/erjun/githubEZ/dMRI_BHS/dMRI_data/dwi" ap_file = 'sub-032213_ses-001_dir-AP_run-1_dwi.nii.gz' # dMRI data pa_file = 'sub-032213_ses-001_dir-PA_run-1_dwi.nii.gz' bvals_file = 'sub-032216_ses-001_dir-AP_run-1_dwi.bvals' # bval file bvecs_file = 'sub-032216_ses-001_dir-AP_run-1_dwi.bvecs' # bvec file denoised_file = 'sub-032216_ses-001_dir-AP_run-1_dwi_denoised.nii.gz' # output file after denoising cwdir = os.getcwd() os.chdir(data_path) #directory setting ``` ### __Denoising__ ``` # Load data file img = nib.load(os.path.join(data_path,ap_file)) data = img.get_data() # Use dipy to denoise denoised = mppca(data, patch_radius=50) # Save data nib.save(nib.Nifti1Image(data, img.affine), os.path.join(data_path,denoised_file)) print('DONE') ``` ### TOPUP ``` # Set default output type and test ExtractROI tool for Define b_0 image extraction function fsl.FSLCommand.set_default_output_type('NIFTI_GZ') fslroi = fsl.ExtractROI(in_file=anatfile, roi_file='bar.nii.gz', t_min=0,t_size=1) fslroi.cmdline == 'fslroi %s bar.nii.gz 0 1' % anatfile # Define b_0 image extraction function def extract_b0(inImage, outImage): "To run this, please first make sure you install fsl and can run it" "One method is that run fsl and thi pre-processing code in the same terminal" fslroi = fsl.ExtractROI(in_file=inImage,roi_file=outImage,t_min=0, t_size=1) fslroi.run() # Test #extract_b0(ap_file, 'bar.nii.gz') #fslroi.cmdline == 'fslroi %s bar.nii.gz 0 1' % ap_file # Extract b0 images extract_b0(ap_file, 'epi_b0.nii.gz') extract_b0(pa_file, 'epi_rev_b0.nii.gz') # Use fslmerge to concatenate images #merger = fsl.Merge() #merger.inputs.in_files = ['epi_b0.nii.gz','epi_rev_b0.nii.gz'] #merger.inputs.dimension = 't' #merger.inputs.output_type = 'NIFTI_GZ' merger = fsl.Merge(in_files=['epi_b0.nii.gz','epi_rev_b0.nii.gz'],dimension = 't',output_type='NIFTI_GZ') merger.run() file = open('topup_encoding.txt','w') file.write('0 1 0 0.05\n0 -1 0 0.05') file.close() topup = TOPUP() topup.inputs.in_file = 'epi_b0_merged.nii.gz' topup.inputs.encoding_file = 'topup_encoding.txt' topup.inputs.output_type = 'NIFTI_GZ' #topup.run() #------------------------------------------------ # FSL ApplyTOPUP #------------------------------------------------ applytopup = fsl.ApplyTOPUP(in_files = ['epi_b0.nii.gz', 'epi_rev_b0.nii.gz'], encoding_file = 'topup_encoding.txt', in_topup_fieldcoef = 'epi_b0_merged_base_fieldcoef.nii.gz', in_topup_movpar = 'epi_b0_merged_base_movpar.txt', output_type = "NIFTI_GZ") #applytopup.run() print('DONE') ``` ### EDDY ``` btr = fsl.BET(in_file= 'epi_b0.nii.gz',#'epi_b0_corrected.nii.gz', frac=0.2, out_file='brain.nii.gz', mask=True) btr.run() # total nuber of volumes in dwi data img = nib.load(denoised_file).get_data() nvolumes = img.shape[-1] file = open('index.txt','w') for i in range(0, nvolumes): file.write('1 ') file.close() #eddy = fsl.Eddy(in_file = denoised_file, #in_mask = 'brain_mask.nii.gz', #in_index = 'index.txt', #in_acqp = 'topup_encoding.txt', #in_topup_fieldcoef = "epi_b0_merged_base_fieldcoef.nii.gz", #in_topup_movpar = 'epi_b0_merged_base_movpar.txt', #in_bvec = bvecs_file, #in_bval = bvals_file, #use_cuda = False, #is_shelled=True) #) #eddy.run() print('DONE') ``` ## DTI Reconstruction ``` import math from skimage import io #用于读取保存或显示图片或者视频 import time from dipy.io import read_bvals_bvecs from dipy.core.gradients import gradient_table from dipy.reconst.dti import TensorModel from dipy.reconst.dti import fractional_anisotropy from dipy.reconst.dti import color_fa import dipy.reconst.dki as dki # Set new data path for DTI reconstruction data_path = "/home/erjun/Documents/dHCP/dhcp_dmri_pipeline/sub-CC00060XX03/ses-12501/dwi" dwi_file = 'sub-CC00060XX03_ses-12501_desc-preproc_dwi.nii.gz' brainmask_file = 'sub-CC00060XX03_ses-12501_desc-preproc_space-dwi_brainmask.nii.gz' bval = 'sub-CC00060XX03_ses-12501_desc-preproc_dwi.bval' bvec = 'sub-CC00060XX03_ses-12501_desc-preproc_dwi.bvec' os.chdir(data_path) # Load data files img1 = nib.load(os.path.join(data_path,dwi_file)) data = img1.get_data() img2 = nib.load(os.path.join(data_path,brainmask_file)) brainmask = img2.get_data() bvals, bvecs = read_bvals_bvecs(os.path.join(bval), os.path.join(data_path,bvec)) gtab = gradient_table(bvals, bvecs) # DTI model ten_model = TensorModel(gtab) ten_fit = ten_model.fit(data, brainmask) # Save DTI parametric maps if not os.path.exists(data_path+'/DTI/'): os.mkdir(data_path+'/DTI') output_path = data_path+'/DTI/' DTI_FA = ten_fit.fa DTI_AD = ten_fit.ad DTI_RD = ten_fit.rd DTI_MD = ten_fit.md nib.save(nib.Nifti1Image(DTI_FA, img1.affine), os.path.join(output_path,'FA.nii.gz')) nib.save(nib.Nifti1Image(DTI_MD, img1.affine), os.path.join(output_path,'MD.nii.gz')) nib.save(nib.Nifti1Image(DTI_RD, img1.affine), os.path.join(output_path,'RD.nii.gz')) nib.save(nib.Nifti1Image(DTI_AD, img1.affine), os.path.join(output_path,'AD.nii.gz')) #Save FA RGB map fa = fractional_anisotropy(ten_fit.evals) cfa = color_fa(fa, ten_fit.evecs) DTI_FA = np.clip(fa, 0, 1) DTI_RGB = color_fa(fa, ten_fit.evecs) nib.save(nib.Nifti1Image(np.array(255 * cfa, 'uint8'), img1.affine), os.path.join(output_path,'FA_RGB.nii.gz')) print('Done!') # DKI MODEL dkimodel = dki.DiffusionKurtosisModel(gtab) dkifit = dkimodel.fit(data, brainmask) # Save DKI parametric maps if not os.path.exists(data_path+'/DKI/'): os.mkdir(data_path+'/DKI') data_path_saveImage = data_path+'/DKI/' DKI_FA = dkifit.fa DKI_MD = dkifit.md DKI_RD = dkifit.rd DKI_AD = dkifit.ad DKI_MK = dkifit.mk(0, 3) DKI_AK = dkifit.ak(0, 3) DKI_RK = dkifit.rk(0, 3) nib.save(nib.Nifti1Image(DKI_FA, img1.affine), os.path.join(data_path_saveImage,'dki_FA.nii.gz')) nib.save(nib.Nifti1Image(DKI_MD, img1.affine), os.path.join(data_path_saveImage,'dki_MD.nii.gz')) nib.save(nib.Nifti1Image(DKI_RD, img1.affine), os.path.join(data_path_saveImage,'dki_RD.nii.gz')) nib.save(nib.Nifti1Image(DKI_AD, img1.affine), os.path.join(data_path_saveImage,'dki_AD.nii.gz')) nib.save(nib.Nifti1Image(DKI_AK, img1.affine), os.path.join(data_path_saveImage,'AK.nii.gz')) nib.save(nib.Nifti1Image(DKI_RK, img1.affine), os.path.join(data_path_saveImage,'RK.nii.gz')) nib.save(nib.Nifti1Image(DKI_MK, img1.affine), os.path.join(data_path_saveImage,'MK.nii.gz')) print('DONE!') ``` ### Show basical output maps ``` # set plot background plt.style.use('seaborn-dark') # plot paramter maps fig, [ax0, ax2, ax3, ax4] = plt.subplots(1,4,figsize=(10,8),subplot_kw={'xticks': [], 'yticks': []}) ax0.imshow(DTI_RGB[:,:,30,:]); ax0.set_title('Color coded FA',fontweight='bold',size=10) #ax1.imshow(DTI_FA[:,30,:]); ax1.set_title('Fractional anisotropy',fontweight='bold',size=10) ax2.imshow(DTI_MD[:,:,30]); ax2.set_title('Mean diffusivity',fontweight='bold',size=10) ax3.imshow(DTI_RD[:,:,30]); ax3.set_title('Radial diffusivity',fontweight='bold',size=10) ax4.imshow(DTI_AD[:,:,30]); ax4.set_title('Axial diffusivity',fontweight='bold',size=10) # plot paramter maps fig, ([ax0, ax1, ax2],[ax3, ax4, ax5]) = plt.subplots(2,3,figsize=(10,8),subplot_kw={'xticks': [], 'yticks': []}) ax0.imshow(DKI_AD[:,:,30]); ax0.set_title('Axial diffusivity',fontweight='bold',size=10) ax1.imshow(DKI_RD[:,:,30]); ax1.set_title('Radial diffusivity',fontweight='bold',size=10) ax2.imshow(DKI_MD[:,:,30]); ax2.set_title('Mean diffusivity',fontweight='bold',size=10) ax3.imshow(DKI_AK[:,:,30]); ax3.set_title('Axial kurtosis',fontweight='bold',size=10) ax4.imshow(DKI_RK[:,:,30]); ax4.set_title('Radial kurtosis',fontweight='bold',size=10) ax5.imshow(DKI_MK[:,:,30]); ax5.set_title('Mean kurtosis',fontweight='bold',size=10) # Define a function named vol_plot # Visualization of MRI volume slices def vol_plot(x): "to create 3D MRI figure with slider" vol = x colormax = vol.max()#获取最大array中的最大值,最后代表cmax volume = vol.T len(volume) r, c = volume[math.floor(len(volume)/2)].shape # Define frames import plotly.graph_objects as go nb_frames = len(volume)-1 fig = go.Figure(frames=[go.Frame( data=go.Surface( z=(len(volume)-1 - k ) * np.ones((r, c)), surfacecolor=volume[len(volume)-1 - k], cmin=0, cmax=colormax ), name=str(k) # name the frame for the animation to behave properly ) for k in range(nb_frames)]) # Add data to be displayed before animation starts fig.add_trace(go.Surface( z=(len(volume)-1) * np.ones((r, c)), surfacecolor=volume[len(volume)-1],#np.flipud(volume[30]), colorscale='gray', cmin=0, cmax=colormax, colorbar=dict(thickness=20, ticklen=4) )) def frame_args(duration): return { "frame": {"duration": 500},# Duration can be used to change animate speed "mode": "immediate", "fromcurrent": True, "transition": {"duration": 500, "easing": "linear"}, } sliders = [ { "pad": {"b": 10, "t": 60}, "len": 0.9, "x": 0.1, "y": 0, "steps": [ { "args": [[f.name], frame_args(0)], "label": str(k), "method": "animate", } for k, f in enumerate(fig.frames) ], } ] # Layout fig.update_layout( title='Slices in volumetric data', width=600, height=600, scene=dict( zaxis=dict(range=[-1, len(volume)-1], autorange=False), aspectratio=dict(x=1, y=1, z=1), ), updatemenus = [ { "buttons": [ { "args": [None, frame_args(50)], "label": "▶", # play symbol "method": "animate", }, { "args": [[None], frame_args(0)], "label": "◼", # pause symbol "method": "animate", }, ], "direction": "left", "pad": {"r": 10, "t": 70}, "type": "buttons", "x": 0.1, "y": 0, } ], sliders=sliders ) fig.show() vol_plot(DTI_MD[:,:,:]) ```
github_jupyter
<font size=6 color='violet'>Introduction</font> ![](http://www.gpb.org/sites/www.gpb.org/files/styles/hero_image/public/blogs/images/2018/08/07/maxresdefault.jpg?itok=gN6ErLyU) In this dataset, we are provided with game analytics for the PBS KIDS Measure Up! app. In this app, children navigate a map and complete various levels, which may be activities, video clips, games, or assessments. Each assessment is designed to test a child's comprehension of a certain set of measurement-related skills. There are five assessments: Bird Measurer, Cart Balancer, Cauldron Filler, Chest Sorter, and Mushroom Sorter. The intent of the competition is to use the gameplay data to forecast how many attempts a child will take to pass a given assessment. Each application install is represented by an installation_id. This will typically correspond to one child, but you should expect noise from issues such as shared devices. In the training set, you are provided the full history of gameplay data. In the test set, we have truncated the history after the start event of a single assessment, chosen randomly, for which you must predict the number of attempts. Note that the training set contains many installation_ids which never took assessments, whereas every installation_id in the test set made an attempt on at least one assessment. The outcomes in this competition are grouped into 4 groups (labeled accuracy_group in the data): 3: the assessment was solved on the first attempt 2: the assessment was solved on the second attempt 1: the assessment was solved after 3 or more attempts 0: the assessment was never solved <font color='blue' size=4>If you think this kernel was helpful,please don't forget to click on the upvote button,that helps a lot.</font> ## <font size=5 color='violet'> Importing required libraries</font> ``` import pandas as pd import os import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import StratifiedKFold from sklearn.metrics import cohen_kappa_score from scipy.stats import mode from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split import xgboost as xgb from xgboost import XGBClassifier from xgboost import plot_importance from matplotlib import pyplot # import shap ``` ### <font size=4 color='violet'> Reading and understanding our data</font> ``` os.listdir('../input/data-science-bowl-2019') %%time keep_cols = ['event_id', 'game_session', 'installation_id', 'event_count', 'event_code','title' ,'game_time', 'type', 'world','timestamp'] train=pd.read_csv('train.csv',usecols=keep_cols) train_labels=pd.read_csv('train_labels.csv', usecols=['installation_id','game_session','accuracy_group']) test=pd.read_csv('test.csv',usecols=keep_cols) submission=pd.read_csv('sample_submission.csv') ``` We can see that this data contains full history of the installation,ie each time a child has played the game a unique game_session identifier is generated and the attributes related to the game is stored.The atttributes are: The data provided in these files are as follows: - `event_id` - Randomly generated unique identifier for the event type. Maps to event_id column in specs table. - `game_session` - Randomly generated unique identifier grouping events within a single game or video play session. - `timestamp` - Client-generated datetime - `event_data` - Semi-structured JSON formatted string containing the events parameters. Default fields are: event_count, event_code, and game_time; otherwise - fields are determined by the event type. - `installation_id` - Randomly generated unique identifier grouping game sessions within a single installed application instance. - `event_count` - Incremental counter of events within a game session (offset at 1). Extracted from event_data. - `event_code` - Identifier of the event 'class'. Unique per game, but may be duplicated across games. E.g. event code '2000' always identifies the 'Start Game' event for all games. Extracted from event_data. - `game_time` - Time in milliseconds since the start of the game session. Extracted from event_data. - `title` - Title of the game or video. - `type` - Media type of the game or video. Possible values are: 'Game', 'Assessment', 'Activity', 'Clip'. - `world` - The section of the application the game or video belongs to. Helpful to identify the educational curriculum goals of the media. We will not consider `specs.csv`,it contains description of events in natural language. ``` train.shape,train_labels.shape x=train_labels['accuracy_group'].value_counts() sns.barplot(x.index,x) ``` ## <font size=5 color='violet'> Data Preparation</font> In this we will prepare the data and make it in a trainable form.For that we will do the following steps : - first,we will find the installation ids which are in `train.csv` and which are not in `train_labels.csv`.These installations won't be of much use to us because `train_labels.csv` contains the the target label,ie `accuracy group`.We will first identify them and remove those rows. ``` not_req=(set(train.installation_id.unique()) - set(train_labels.installation_id.unique())) train_new=~train['installation_id'].isin(not_req) train.where(train_new,inplace=True) train.dropna(inplace=True) train['event_code']=train.event_code.astype(int) ``` <font size=3 color='violet'>Extracting time features</font> ``` def extract_time_features(df): df['timestamp'] = pd.to_datetime(df['timestamp']) df['month'] = df['timestamp'].dt.month df['hour'] = df['timestamp'].dt.hour df['year'] = df['timestamp'].dt.year df['dayofweek'] = df['timestamp'].dt.dayofweek df['weekofyear'] = df['timestamp'].dt.weekofyear return df ``` Next,we will define a `prepare_data` funtion to prepare our train and test data.For the we will do the following steps: - extract `hour_of_day` from timestamp and drop timestamp column,this indicated the hour of day in which is child playes the game. - We will do an on_hot encoding on `event_code` and group the dataframe by installation_id and game_session. - We will define an `agg` dictionary to define the the aggregate functions to be performed after grouping the dataframe - For variables 'type','world' and 'title' we will the the first value,as it is unique for every installation_id,game_session pair. - Atlast, we will join all these togethor and return the dataframe. ``` time_features=['month','hour','year','dayofweek','weekofyear'] def prepare_data(df): df=extract_time_features(df) df=df.drop('timestamp',axis=1) #df['timestamp']=pd.to_datetime(df['timestamp']) #df['hour_of_day']=df['timestamp'].map(lambda x : int(x.hour)) join_one=pd.get_dummies(df[['event_code','installation_id','game_session']], columns=['event_code']).groupby(['installation_id','game_session'], as_index=False,sort=False).agg(sum) agg={'event_count':sum,'game_time':['sum','mean'],'event_id':'count'} join_two=df.drop(time_features,axis=1).groupby(['installation_id','game_session'] ,as_index=False,sort=False).agg(agg) join_two.columns= [' '.join(col).strip() for col in join_two.columns.values] join_three=df[['installation_id','game_session','type','world','title']].groupby( ['installation_id','game_session'],as_index=False,sort=False).first() join_four=df[time_features+['installation_id','game_session']].groupby(['installation_id', 'game_session'],as_index=False,sort=False).agg(mode)[time_features].applymap(lambda x: x.mode[0]) join_one=join_one.join(join_four) join_five=(join_one.join(join_two.drop(['installation_id','game_session'],axis=1))). \ join(join_three.drop(['installation_id','game_session'],axis=1)) return join_five join_train=prepare_data(train) cols=join_train.columns.to_list()[2:-3] join_train[cols]=join_train[cols].astype('int16') join_test=prepare_data(test) cols=join_test.columns.to_list()[2:-3] join_test[cols]=join_test[cols].astype('int16') ``` In this step,we will - prepare train by merging our train to train_labels.This will be our `final_train`. - prepare the test by selecting last row of each installation_id ,game_session as we have only 1000 rows in `sample_submission`.The last accuracy group for each installation id is taken as the accuracy group of the child. ``` cols=join_test.columns[2:-12].to_list() cols.append('event_id count') cols.append('installation_id') ``` - It seems that we have to group dafaframe by `installation_id` to form a proper trainable dataframe. - We will apply the same to form out test set. ``` df=join_test[['event_count sum','game_time mean','game_time sum', 'installation_id']].groupby('installation_id',as_index=False,sort=False).agg('mean') df_two=join_test[cols].groupby('installation_id',as_index=False, sort=False).agg('sum').drop('installation_id',axis=1) df_three=join_test[['title','type','world','installation_id']].groupby('installation_id', as_index=False,sort=False).last().drop('installation_id',axis=1) df_four=join_test[time_features+['installation_id']].groupby('installation_id',as_index=False,sort=False). \ agg(mode)[time_features].applymap(lambda x : x.mode[0]) final_train=pd.merge(train_labels,join_train,on=['installation_id','game_session'], how='left').drop(['game_session'],axis=1) #final_test=join_test.groupby('installation_id',as_index=False,sort=False).last().drop(['game_session','installation_id'],axis=1) final_test=(df.join(df_two)).join(df_three.join(df_four)).drop('installation_id',axis=1) df_two df=final_train[['event_count sum','game_time mean','game_time sum','installation_id']]. \ groupby('installation_id',as_index=False,sort=False).agg('mean') df_two=final_train[cols].groupby('installation_id',as_index=False, sort=False).agg('sum').drop('installation_id',axis=1) df_three=final_train[['accuracy_group','title','type','world','installation_id']]. \ groupby('installation_id',as_index=False,sort=False). \ last().drop('installation_id',axis=1) df_four=join_train[time_features+['installation_id']].groupby('installation_id',as_index=False,sort=False). \ agg(mode)[time_features].applymap(lambda x : x.mode[0]) final_train=(df.join(df_two)).join(df_three.join(df_four)).drop('installation_id',axis=1) final_train.shape,final_test.shape ``` Just making sure that all the columns in our `final_train` and `final_test` is the same,except accuracy_group.The instersection should return `54`. ``` len(set(final_train.columns) & set(final_test.columns)) ``` YES ! It's done.. ## <font size=4 color='violet'> Label Encoding</font> - We will concat out final_train and final_test to form `final`. - We will label encode the categorical variables. - We will split them back to final_train and final_test. ``` final=pd.concat([final_train,final_test]) encoding=['type','world','title'] for col in encoding: lb=LabelEncoder() lb.fit(final[col]) final[col]=lb.transform(final[col]) final_train=final[:len(final_train)] final_test=final[len(final_train):] X_train=final_train.drop('accuracy_group',axis=1) y_train=final_train['accuracy_group'] ``` <font size=5 color='violet'>Evaluation</font> Submissions are scored based on the quadratic weighted kappa, which measures the agreement between two outcomes. This metric typically varies from 0 (random agreement) to 1 (complete agreement). In the event that there is less agreement than expected by chance, the metric may go below 0. $$w_{i,j} = \frac{\left(i-j\right)^2}{\left(N-1\right)^2}$$ We will use `cohen_kappa_score` which is available in `sklearn.metrics` to calculate the score. ## <font size=5 color='violet'> XGBoost with StratifiedKFold</font> Here we will use `StratifiedKFold` and `xgboost` model to train and make prediction. ``` def model(X_train,y_train,final_test,n_splits=3): scores=[] pars = { 'colsample_bytree': 0.8, 'learning_rate': 0.08, 'max_depth': 10, 'subsample': 1, 'objective':'multi:softprob', 'num_class':4, 'eval_metric':'mlogloss', 'min_child_weight':3, 'gamma':0.25, 'n_estimators':500 } kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42) y_pre=np.zeros((len(final_test),4),dtype=float) final_test=xgb.DMatrix(final_test.drop('accuracy_group',axis=1)) for train_index, val_index in kf.split(X_train, y_train): train_X = X_train.iloc[train_index] val_X = X_train.iloc[val_index] train_y = y_train[train_index] val_y = y_train[val_index] xgb_train = xgb.DMatrix(train_X, train_y) xgb_eval = xgb.DMatrix(val_X, val_y) xgb_model = xgb.train(pars, xgb_train, num_boost_round=1000, evals=[(xgb_train, 'train'), (xgb_eval, 'val')], verbose_eval=False, early_stopping_rounds=20 ) val_X=xgb.DMatrix(val_X) pred_val=[np.argmax(x) for x in xgb_model.predict(val_X)] score=cohen_kappa_score(pred_val,val_y,weights='quadratic') scores.append(score) print('choen_kappa_score :',score) pred=xgb_model.predict(final_test) y_pre+=pred pred = np.asarray([np.argmax(line) for line in y_pre]) print('Mean score:',np.mean(scores)) return xgb_model,pred X_train = X_train.drop('installation_id',axis=1) X_train xgb_model,pred=model(X_train,y_train,final_test,5) final_test = final_test.drop('installation_id', axis=1) final_test ``` ## <font size=5 color='violet'> Making our submission</font> After making our prediction we will make our submission to `submission.csv`. ``` sub=pd.DataFrame({'installation_id':submission.installation_id,'accuracy_group':pred}) sub.to_csv('submission.csv',index=False) ``` ## <font size=5 color='violet'>Feature Selection</font> We will use module of xgboost to plot the feature importances and see what features our model think are important for making prediction. ``` fig, ax = plt.subplots(figsize=(10,10)) xgb.plot_importance(xgb_model, max_num_features=50, height=0.5, ax=ax,importance_type='gain') plt.show() ``` There are three methods to measure feature_importances in xgboost.They are : - `weight` : The total number of times this feature was used to split the data across all trees. - `Cover` :The number of times a feature is used to split the data across all trees weighted by the number of training data points that go through those splits. - `Gain` : The average loss reduction gained when using this feature for splitting in trees. We used `Gain` in the above example and the model says when it used `event_code_2030` the loss on average was reduced by 8%. ``` fig, ax = plt.subplots(figsize=(10,10)) xgb.plot_importance(xgb_model, max_num_features=50, height=0.5, ax=ax,importance_type='weight') plt.show() ``` When we considered weight,the model says that is used `game_time mean` 1035 times to split the data across the trees. hmmm...so how what can we conclude from above figures? We will find out.... ### <font size=4 color='violet'>Interpreting our model with Confidence</font> `SHAP` is a powerful tool for interpreting our model with more confidence,It makes the process simple and understandable.We will try SHAP in this section for interpret our model. ``` shap_values = shap.TreeExplainer(xgb_model).shap_values(X_train) shap.summary_plot(shap_values, X_train, plot_type="bar") ``` For example,this figure expains the feature importance and it's influence on different class. - for feature `event_code_3020` its SHAP value is high for `class 3` means it influences predicting class 3 is more than any other class. Summary plot for `class 3` ``` shap.summary_plot(shap_values[3], X_train) ``` Here we can see that the variables are ranked in the descending order. - The most important variable `event_code_3020`. - Lower value of `event_code_3020` has a high and positive impact on the model predicting `class 3` . - Lower value of `event_code_3020` the model tends to classify it to `class 3` Similarly for `class0` ``` shap.summary_plot(shap_values[0], X_train) ``` Hope you can see the differnce between them. ### <font size=4 color='violet'>Select from Features</font> We will use sklearn `SelectFromFeatures` to select relevent features. ``` X_train,X_test,y_train,y_test=train_test_split(X_train,y_train,test_size=.1) model = XGBClassifier() model.fit(X_train, y_train) threshold = np.sort(model.feature_importances_)[40:] for thresh in threshold: # select features using threshold selection = SelectFromModel(model, threshold=thresh, prefit=True) select_X_train = selection.transform(X_train) # train model selection_model = XGBClassifier() selection_model.fit(select_X_train, y_train) # eval model select_X_test = selection.transform(X_test) y_pred = selection_model.predict(select_X_test) predictions = [round(value) for value in y_pred] accuracy = cohen_kappa_score(y_test, predictions) print("Thresh=%.3f, n=%d, cohen kappa score: %.2f%%" % (thresh, select_X_train.shape[1], accuracy*100.0)) ``` We need to look into it further and evaluate... ### Under Construction!!! <font color='red' size=4>If you think this kernel was helpful,please don't forget to click on the upvote button,that helps a lot.</font>
github_jupyter
``` # Import needed packages in PEP 8 order (no unused imports listed) (4 points total) # Import required libraries here import os import matplotlib.pyplot as plt import seaborn as sns import requests import urllib import folium import numpy as np import pandas as pd from pandas.io.json import json_normalize import geopandas as gpd from shapely.geometry import shape from shapely.geometry import Polygon from shapely.geometry import box from shapely.geometry import Point import rasterio as rio from rasterio.mask import mask from rasterio.plot import plotting_extent import rasterstats as rs import earthpy as et import earthpy.plot as ep import spectral.io.envi as envi import spectral as spy import tarfile import zipfile os.chdir(os.path.join(et.io.HOME,'earth-analytics')) def open_ecosystem_structure(site, date): '''Uses API call to retrieve NEON ecosystem structure (CHM) data at a given site and date. Returns list of all rasters within data product. For more information on NEON ecosystem structure data and a full list of available dates see https://data.neonscience.org/data-products/DP3.30015.001 Parameters ---------- site : str 4 Letter site name. See https://www.neonscience.org/field-sites/field-sites-map/list for a full list of NEON sites date : str Date of data collection in yyyy-mm format Returns ------- CHM_raster_tiles : .tif All raster .tif tiles associated with the site and date specified ''' data_product_url = ['https://data.neonscience.org/api/v0/data/DP3.30015.001/' + site+'/'+date] call_response = requests.get(data_product_url[0]) call_response.json() CHM_raster_tiles = [] for i in call_response.json()['data']['files']: data_file_url = i['url'] file_format = data_file_url.find('.tif') if not file_format == -1: CHM_raster_tiles.append(data_file_url) return CHM_raster_tiles def high_res_imagery(site, date): '''Uses API call to retrieve NEON ecosystem structure (CHM) data at a given site and date. Returns list of all rasters within data product. For more information on NEON ecosystem structure data and a full list of available dates see https://data.neonscience.org/data-products/DP3.30010.001 Parameters ---------- site : str 4 Letter site name. See https://www.neonscience.org/field-sites/field-sites-map/list for a full list of NEON sites date : str Date of data collection in yyyy-mm format Returns ------- CHM_raster_tiles : .tif All raster .tif tiles associated with the site and date specified ''' data_product_url = ['https://data.neonscience.org/api/v0/data/DP3.30010.001/' + site+'/'+date] call_response = requests.get(data_product_url[0]) call_response.json() high_res_raster_tiles = [] for i in call_response.json()['data']['files']: data_file_url = i['url'] file_format = data_file_url.find('.tif') if not file_format == -1: high_res_raster_tiles.append(data_file_url) return high_res_raster_tiles def open_woody_veg_structure(site, date): '''Uses API call to retrieve NEON product data for woody vegetation structure. Returns pandas of merged apparent individual, mapping and tagging, and per plot per year documents, eg one dataframe with locational, species, and height data. Also returns a pandas dataframe of filtered plot data to facilitate geospatial merges and calculation of raster stats. For more information on NEON woody vegetation structure data products and available dates, see https://data.neonscience.org/data-products/DP1.10098.001 Parameters ---------- site : str 4 Letter site name. See https://www.neonscience.org/field-sites/field-sites-map/list for a full list of NEON sites date : str Date of data collection in yyyy-mm format Returns ------- all_merged_df : pandas.core.frame.DataFrame Pandas dataframe of merged measurement, plot, and mapping tabular files from data product plot_df : pandas.core.frame.DataFrame Pandas dataframe of perplotperyear.csv locational data ''' data_product_url = ['https://data.neonscience.org/api/v0/data/DP1.10098.001/' + site+'/'+date] call_response = requests.get(data_product_url[0]) all_urls = [] for i in call_response.json()['data']['files']: data_file_url = i['url'] height_find = data_file_url.find('individual') plot_find = data_file_url.find('perplot') map_find = data_file_url.find('mapping') if not height_find == -1: apparent_df = pd.read_csv(data_file_url) elif not plot_find == -1: plot_df = pd.read_csv(data_file_url) elif not map_find == -1: map_df = pd.read_csv(data_file_url) apparent_df = apparent_df[[ 'plotID', 'individualID', 'height']] plot_df = plot_df[['plotID', 'plotType', 'decimalLatitude', 'decimalLongitude', 'easting', 'northing']] map_df = map_df[['plotID', 'individualID', 'scientificName']] measurement_map_merge = pd.merge( apparent_df, map_df, on=['plotID', 'individualID']) all_merged_df = pd.merge(plot_df, measurement_map_merge, on='plotID') return all_merged_df, plot_df def NEON_site_extent(path_to_NEON_boundaries, site): '''Extracts a NEON site extent from an individual site as long as the original NEON site extent shape file contains a column named 'siteID'. Parameters ---------- path_to_NEON_boundaries : str The path to a shape file that contains the list of all NEON site extents, also known as field sampling boundaries (can be found at NEON and ESRI sites) site : str One siteID contains 4 capital letters, e.g. CPER, HARV, ONAQ or SJER. Returns ------- site_boundary : geopandas.geodataframe.GeoDataFrame A vector containing a single polygon per the site specified. ''' NEON_boundaries = gpd.read_file(path_to_NEON_boundaries) boundaries_indexed = NEON_boundaries.set_index(['siteID']) site_boundary = boundaries_indexed.loc[[site]] site_boundary.reset_index(inplace=True) return site_boundary def buffer_point_plots(df, crs, buffer): '''Creates geodataframe from plot points within a designated coordinate reference system. Buffers plot points to a given radius. Compatible with most NEON tabular plot data files including northing and easting locational columns. Final product can be used to visualize plot locations or combined with other spatial data products. Parameters ---------- df : pandas.core.frame.DataFrame df including Northing and Easting plot locations crs : str or rasterio.crs.CRS String of desired coordinate reference system buffer : int Desired radius for final plot polygons Returns ------- buffered_gdf : geopandas.geodataframe.GeoDataFrame Dataframe with point plots buffered to polgyons ''' buffered_gdf = gpd.GeoDataFrame( df, geometry=gpd.points_from_xy( x=df.easting, y=df.northing), crs=crs) buffered_gdf['geometry'] = buffered_gdf.geometry.buffer(buffer) return(buffered_gdf) def tiles_over_insitu_plots(tiles, plots): '''Takes a list of raster images and geodataframe of plot polygons within the same CRS. Cross references overlap between raster extent polygon and plot point polygons. Returns list of .tiff file locations that overlap completely with plot polygons. ---------- tiles : list List of rasters plots : geopandas.geodataframe.GeoDataFrame Geodataframe with polygons of AOI plots Returns ------- target_rasters : list List of strings with to raster locations ''' target_rasters = [] insitu_plot_size = plots.loc[0, 'geometry'].area for tile in tiles: with rio.open(tile) as src: extent = plotting_extent(src) raster_polygon = Polygon([ [extent[0], extent[2]], [extent[0], extent[3]], [extent[1], extent[3]], [extent[1], extent[2]]]) raster_polygon_gdf = gpd.GeoDataFrame(crs=src.crs, geometry=[raster_polygon]) raster_plot_intersection = gpd.overlay( raster_polygon_gdf, plots, how='intersection') if raster_plot_intersection['geometry'].empty: pass elif int( raster_plot_intersection.loc[0, 'geometry'].area) == int( insitu_plot_size): target_rasters.append(tile) return target_rasters def calculate_rasterstats_dataframe(tiles, plot_polygons): '''Creates a geodataframe object with lidar summary statistics using rasterstats zonal. Requires a pandas dataframe with plot polygons to cross reference with lidar calculations. Outputs a single dataframe with uniquely named summary statistics. Parameters ---------- df : pandas.core.frame.DataFrame df including Northing and Easting plot locations crs : str or rasterio.crs.CRS String of desired coordinate reference system or rasterio CRS object Returns ------- CHM_stats : list of geopandas.geodataframe.GeoDataFrame Returns a list of geodataframes with lidar max, lidar mean, lidar median and lidar min calculated in new columns. calculations. ''' CHM_stats = [] for tile in tiles: with rio.open(tile) as chm_src: site_chm = chm_src.read(1, masked=True) site_chm_meta = chm_src.meta site_tree_heights = rs.zonal_stats( plot_polygons, site_chm, affine=site_chm_meta["transform"], geojson_out=True, copy_properties=True, nodata=0, stats=["mean", "median", "max", "min"]) site_tree_heights_gdf = gpd.GeoDataFrame.from_features( site_tree_heights) rename_dict_lidar = {"mean": "lidar_mean", "median": "lidar_median", "max": "lidar_max", "min": "lidar_min"} site_tree_heights_gdf.rename(columns=rename_dict_lidar, inplace=True) CHM_stats.append(site_tree_heights_gdf) return CHM_stats high_res_list=high_res_imagery('CPER','2017-05') # Download shapefile of all NEON site boundaries url = 'https://www.neonscience.org/neon-terrestrial-field-site-boundaries-shapefile' et.data.get_data(url=url, replace=True) # Create path to shapefile terrestrial_sites = os.path.join( 'data', 'earthpy-downloads', 'fieldSamplingBoundaries (1)', 'terrestrialSamplingBoundaries.shp') # Import insitu plot data for CPER and ONAQ sites CPER_insitu_df, CPER_plots = open_woody_veg_structure( site='CPER', date='2017-09') # Import CHM data and identify crs CPER_tif_files = open_ecosystem_structure( site='CPER', date='2017-05') with rio.open(high_res_list[4]) as CPER_src: arr_dig=CPER_src.read(masked=True) CPER_crs = CPER_src.crs # Create geodataframes with buffered plot points CPER_insitu_gdf = buffer_point_plots( df=CPER_insitu_df, crs=CPER_crs, buffer=40) ep.plot_rgb(arr_dig,rgb=(0,1,2)) CPER_AOI_tifs = tiles_over_insitu_plots( tiles=high_res_list, plots=CPER_insitu_gdf) with rio.open(CPER_AOI_tifs[1]) as src: arr=src.read(1,masked=True) extent=plotting_extent(src) fig, ax = plt.subplots() ep.plot_bands(arr,ax=ax,extent=extent) CPER_insitu_gdf.plot(ax=ax, color='pink') tf_AVIRIS = tarfile.open(os.path.join('data','AVIRIS','f190821t01p00r12.tar.gz')) tf_AVIRIS.extractall(path=os.path.join('AVIRIS')) # tf_LANDSAT = tarfile.open(os.path.join( # ONAQ_directory,'LC08_L1TP_038032_20171010_20171024_01_T1.tar.gz')) # tf_LANDSAT.extractall(path=LANDSAT_path) # Download shapefile of all NEON site boundaries url = 'https://www.neonscience.org/neon-terrestrial-field-site-boundaries-shapefile' et.data.get_data(url=url, replace=True) # Create path to shapefile terrestrial_sites = os.path.join( 'data', 'earthpy-downloads', 'fieldSamplingBoundaries (1)', 'terrestrialSamplingBoundaries.shp') CPER_site_outline = NEON_site_extent( path_to_NEON_boundaries=terrestrial_sites, site='CPER') CPER_site_outline.crs=AVIRIS_crs CPER_site_outline.crs AVIRIS_path = os.path.join('AVIRIS', 'f170509t01p00r05rdn_e','f170509t01p00r05rdn_e_sc01_ort_img') with rio.open (AVIRIS_path) as src: arr= src.read(masked=True) extent=plotting_extent(src) arr.shape # fig, ax = plt.subplots() ep.plot_rgb(arr, rgb=(40, 10, 20)) # CPER_insitu_gdf.plot(ax=ax, # color='pink') # CPER_AOI_tifs = tiles_over_insitu_plots( # tiles=AVIRIS_path, plots=CPER_insitu_gdf) with rio.open(AVIRIS_path) as chm_src: site_chm = chm_src.read(1, masked=True) site_chm_meta = chm_src.meta site_tree_heights = rs.zonal_stats( CPER_insitu_gdf, site_chm, affine=site_chm_meta["transform"], geojson_out=True, copy_properties=True, nodata=0, stats=["mean", "median", "max", "min"]) site_tree_heights_gdf = gpd.GeoDataFrame.from_features( site_tree_heights) site_tree_heights_gdf ```
github_jupyter
# Batch Normalization – Practice Batch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now. This is **not** a good network for classfying MNIST digits. You could create a _much_ simpler network and get _better_ results. However, to give you hands-on experience with batch normalization, we had to make an example that was: 1. Complicated enough that training would benefit from batch normalization. 2. Simple enough that it would train quickly, since this is meant to be a short exercise just to give you some practice adding batch normalization. 3. Simple enough that the architecture would be easy to understand without additional resources. This notebook includes two versions of the network that you can edit. The first uses higher level functions from the `tf.layers` package. The second is the same network, but uses only lower level functions in the `tf.nn` package. 1. [Batch Normalization with `tf.layers.batch_normalization`](#example_1) 2. [Batch Normalization with `tf.nn.batch_normalization`](#example_2) The following cell loads TensorFlow, downloads the MNIST dataset if necessary, and loads it into an object named `mnist`. You'll need to run this cell before running anything else in the notebook. ``` import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False) ``` # Batch Normalization using `tf.layers.batch_normalization`<a id="example_1"></a> This version of the network uses `tf.layers` for almost everything, and expects you to implement batch normalization using [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization) We'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function. This version of the function does not include batch normalization. ``` """ DO NOT MODIFY THIS CELL """ def fully_connected(prev_layer, num_units): """ Create a fully connectd layer with the given layer as input and the given number of neurons. :param prev_layer: Tensor The Tensor that acts as input into this layer :param num_units: int The size of the layer. That is, the number of units, nodes, or neurons. :returns Tensor A new fully connected layer """ layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu) return layer ``` We'll use the following function to create convolutional layers in our network. They are very basic: we're always using a 3x3 kernel, ReLU activation functions, strides of 1x1 on layers with odd depths, and strides of 2x2 on layers with even depths. We aren't bothering with pooling layers at all in this network. This version of the function does not include batch normalization. ``` """ DO NOT MODIFY THIS CELL """ def conv_layer(prev_layer, layer_depth): """ Create a convolutional layer with the given layer as input. :param prev_layer: Tensor The Tensor that acts as input into this layer :param layer_depth: int We'll set the strides and number of feature maps based on the layer's depth in the network. This is *not* a good way to make a CNN, but it helps us create this example with very little code. :returns Tensor A new convolutional layer """ strides = 2 if layer_depth % 3 == 0 else 1 conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu) return conv_layer ``` **Run the following cell**, along with the earlier cells (to load the dataset and define the necessary functions). This cell builds the network **without** batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training. ``` """ DO NOT MODIFY THIS CELL """ def train(num_batches, batch_size, learning_rate): # Build placeholders for the input samples and labels inputs = tf.placeholder(tf.float32, [None, 28, 28, 1]) labels = tf.placeholder(tf.float32, [None, 10]) # Feed the inputs into a series of 20 convolutional layers layer = inputs for layer_i in range(1, 20): layer = conv_layer(layer, layer_i) # Flatten the output from the convolutional layers orig_shape = layer.get_shape().as_list() layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]]) # Add one fully connected layer layer = fully_connected(layer, 100) # Create the output layer with 1 node for each logits = tf.layers.dense(layer, 10) # Define loss and training operations model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) # Create operations to test accuracy correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Train and test the network with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for batch_i in range(num_batches): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # train this batch sess.run(train_opt, {inputs: batch_xs, labels: batch_ys}) # Periodically check the validation or training loss and accuracy if batch_i % 100 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc)) elif batch_i % 25 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys}) print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc)) # At the end, score the final accuracy for both the validation and test sets acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Final validation accuracy: {:>3.5f}'.format(acc)) acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels}) print('Final test accuracy: {:>3.5f}'.format(acc)) # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly. correct = 0 for i in range(100): correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]], labels: [mnist.test.labels[i]]}) print("Accuracy on 100 samples:", correct/100) num_batches = 800 batch_size = 64 learning_rate = 0.002 tf.reset_default_graph() with tf.Graph().as_default(): train(num_batches, batch_size, learning_rate) ``` With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.) Using batch normalization, you'll be able to train this same network to over 90% in that same number of batches. # Add batch normalization We've copied the previous three cells to get you started. **Edit these cells** to add batch normalization to the network. For this exercise, you should use [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization) to handle most of the math, but you'll need to make a few other changes to your network to integrate batch normalization. You may want to refer back to the lesson notebook to remind yourself of important things, like how your graph operations need to know whether or not you are performing training or inference. If you get stuck, you can check out the `Batch_Normalization_Solutions` notebook to see how we did things. **TODO:** Modify `fully_connected` to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps. ``` def fully_connected(prev_layer, num_units, is_training): """ Create a fully connectd layer with the given layer as input and the given number of neurons. :param prev_layer: Tensor The Tensor that acts as input into this layer :param num_units: int The size of the layer. That is, the number of units, nodes, or neurons. :returns Tensor A new fully connected layer """ layer = tf.layers.dense(prev_layer, num_units, activation=None) layer = tf.layers.batch_normalization(layer, training=is_training) layer = tf.nn.relu(layer) return layer ``` **TODO:** Modify `conv_layer` to add batch normalization to the convolutional layers it creates. Feel free to change the function's parameters if it helps. ``` def conv_layer(prev_layer, layer_depth, is_training): """ Create a convolutional layer with the given layer as input. :param prev_layer: Tensor The Tensor that acts as input into this layer :param layer_depth: int We'll set the strides and number of feature maps based on the layer's depth in the network. This is *not* a good way to make a CNN, but it helps us create this example with very little code. :returns Tensor A new convolutional layer """ strides = 2 if layer_depth % 3 == 0 else 1 conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=None) conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training) conv_layer = tf.nn.relu(conv_layer) return conv_layer ``` **TODO:** Edit the `train` function to support batch normalization. You'll need to make sure the network knows whether or not it is training, and you'll need to make sure it updates and uses its population statistics correctly. ``` def train(num_batches, batch_size, learning_rate): # Build placeholders for the input samples and labels inputs = tf.placeholder(tf.float32, [None, 28, 28, 1]) labels = tf.placeholder(tf.float32, [None, 10]) is_training = tf.placeholder(tf.bool) # Feed the inputs into a series of 20 convolutional layers layer = inputs for layer_i in range(1, 20): layer = conv_layer(layer, layer_i, is_training) # Flatten the output from the convolutional layers orig_shape = layer.get_shape().as_list() layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]]) # Add one fully connected layer layer = fully_connected(layer, 100, is_training) # Create the output layer with 1 node for each logits = tf.layers.dense(layer, 10) # Define loss and training operations model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(extra_update_ops): train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) # Create operations to test accuracy correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Train and test the network with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for batch_i in range(num_batches): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # train this batch sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True}) # Periodically check the validation or training loss and accuracy if batch_i % 100 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels, is_training: False}) print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc)) elif batch_i % 25 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False}) print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc)) # At the end, score the final accuracy for both the validation and test sets acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels, is_training: False}) print('Final validation accuracy: {:>3.5f}'.format(acc)) acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels, is_training: False}) print('Final test accuracy: {:>3.5f}'.format(acc)) # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly. correct = 0 for i in range(100): correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]], labels: [mnist.test.labels[i]], is_training: False}) print("Accuracy on 100 samples:", correct/100) num_batches = 800 batch_size = 64 learning_rate = 0.002 tf.reset_default_graph() with tf.Graph().as_default(): train(num_batches, batch_size, learning_rate) ``` With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output: `Accuracy on 100 samples`. If this value is low while everything else looks good, that means you did not implement batch normalization correctly. Specifically, it means you either did not calculate the population mean and variance while training, or you are not using those values during inference. # Batch Normalization using `tf.nn.batch_normalization`<a id="example_2"></a> Most of the time you will be able to use higher level functions exclusively, but sometimes you may want to work at a lower level. For example, if you ever want to implement a new feature – something new enough that TensorFlow does not already include a high-level implementation of it, like batch normalization in an LSTM – then you may need to know these sorts of things. This version of the network uses `tf.nn` for almost everything, and expects you to implement batch normalization using [`tf.nn.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization). **Optional TODO:** You can run the next three cells before you edit them just to see how the network performs without batch normalization. However, the results should be pretty much the same as you saw with the previous example before you added batch normalization. **TODO:** Modify `fully_connected` to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps. **Note:** For convenience, we continue to use `tf.layers.dense` for the `fully_connected` layer. By this point in the class, you should have no problem replacing that with matrix operations between the `prev_layer` and explicit weights and biases variables. ``` def fully_connected(prev_layer, num_units): """ Create a fully connectd layer with the given layer as input and the given number of neurons. :param prev_layer: Tensor The Tensor that acts as input into this layer :param num_units: int The size of the layer. That is, the number of units, nodes, or neurons. :returns Tensor A new fully connected layer """ weights = tf.Variable(tf.random_normal([prev_layer.shape[0], num_units])) layer = tf.matmul(prev_layer, weights) # missing: ReLU return layer ``` **TODO:** Modify `conv_layer` to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps. **Note:** Unlike in the previous example that used `tf.layers`, adding batch normalization to these convolutional layers _does_ require some slight differences to what you did in `fully_connected`. ``` def conv_layer(prev_layer, layer_depth): """ Create a convolutional layer with the given layer as input. :param prev_layer: Tensor The Tensor that acts as input into this layer :param layer_depth: int We'll set the strides and number of feature maps based on the layer's depth in the network. This is *not* a good way to make a CNN, but it helps us create this example with very little code. :returns Tensor A new convolutional layer """ strides = 2 if layer_depth % 3 == 0 else 1 in_channels = prev_layer.get_shape().as_list()[3] out_channels = layer_depth*4 weights = tf.Variable( tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05)) bias = tf.Variable(tf.zeros(out_channels)) conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME') conv_layer = tf.nn.bias_add(conv_layer, bias) conv_layer = tf.nn.relu(conv_layer) return conv_layer ``` **TODO:** Edit the `train` function to support batch normalization. You'll need to make sure the network knows whether or not it is training. ``` def train(num_batches, batch_size, learning_rate): # Build placeholders for the input samples and labels inputs = tf.placeholder(tf.float32, [None, 28, 28, 1]) labels = tf.placeholder(tf.float32, [None, 10]) # Feed the inputs into a series of 20 convolutional layers layer = inputs for layer_i in range(1, 20): layer = conv_layer(layer, layer_i) # Flatten the output from the convolutional layers orig_shape = layer.get_shape().as_list() layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]]) # Add one fully connected layer layer = fully_connected(layer, 100) # Create the output layer with 1 node for each logits = tf.layers.dense(layer, 10) # Define loss and training operations model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) # Create operations to test accuracy correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Train and test the network with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for batch_i in range(num_batches): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # train this batch sess.run(train_opt, {inputs: batch_xs, labels: batch_ys}) # Periodically check the validation or training loss and accuracy if batch_i % 100 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc)) elif batch_i % 25 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys}) print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc)) # At the end, score the final accuracy for both the validation and test sets acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Final validation accuracy: {:>3.5f}'.format(acc)) acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels}) print('Final test accuracy: {:>3.5f}'.format(acc)) # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly. correct = 0 for i in range(100): correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]], labels: [mnist.test.labels[i]]}) print("Accuracy on 100 samples:", correct/100) num_batches = 800 batch_size = 64 learning_rate = 0.002 tf.reset_default_graph() with tf.Graph().as_default(): train(num_batches, batch_size, learning_rate) ``` Once again, the model with batch normalization should reach an accuracy over 90%. There are plenty of details that can go wrong when implementing at this low level, so if you got it working - great job! If not, do not worry, just look at the `Batch_Normalization_Solutions` notebook to see what went wrong.
github_jupyter
``` from imports import * import pickle # device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') device = torch.device("cuda:0") 2048*6*10 def get_encoder(model_name): if model_name == 'mobile_net': md = torchvision.models.mobilenet_v2(pretrained=True) encoder = nn.Sequential(*list(md.children())[:-1]) elif model_name == 'resnet': md = torchvision.models.resnet50(pretrained=True) encoder = nn.Sequential(*list(md.children())[:-2]) return encoder class DecisionGenerator_no_attention(nn.Module): def __init__(self, encoder, encoder_dims, device, action_num=4, explanation_num=21): super().__init__() """ encoder_dims = (F,H,W) F:Feature shape (1280 for mobile net, 2048 for resnet) H,W = image feature height, width """ self.encoder = encoder assert len(encoder_dims) == 3, "encoder_dims has to be a triplet with shape (F,H,W)" F,H,W = encoder_dims ind_dim = H*W*F self.action_branch = nn.Sequential( nn.Linear(ind_dim,12), nn.ReLU(), # nn.Dropout(), nn.Linear(12,action_num)) self.explanation_branch = nn.Sequential( nn.Linear(ind_dim,12), nn.ReLU(), # nn.Dropout(), nn.Linear(12, explanation_num)) self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device) def loss_fn(self,device): class_weights = [1, 1, 2, 2] w = torch.FloatTensor(class_weights).to(device) action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device) explanation_loss = nn.BCEWithLogitsLoss().to(device) return action_loss,explanation_loss def forward(self,images,targets=None): images = torch.stack(images) if self.training: assert targets is not None target_reasons = torch.stack([t['reason'] for t in targets]) target_actions = torch.stack([t['action'] for t in targets]) # print(images.shape) features = self.encoder(images) # # print(features.shape) B,F,H,W = features.shape # print(features.view(B,F,H*W).transpose(1,2).shape) # print(transformed_feature.shape) feature_polled = torch.flatten(features,start_dim=1) # print(feature_polled.shape) actions = self.action_branch(feature_polled) reasons = self.explanation_branch(feature_polled) if self.training: action_loss = self.action_loss_fn(actions, target_actions) reason_loss = self.reason_loss_fn(reasons, target_reasons) loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss} return loss_dic else: return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)} encoder = get_encoder('resnet') dg = DecisionGenerator_no_attention(encoder,encoder_dims=(2048,6,10), device='cpu' ) # params = sum([np.prod(p.size()) for p in model_parameters]) # print("len of params: ",params) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) count_parameters(dg) class MHSA2(nn.Module): def __init__(self, emb_dim, kqv_dim, output_dim=10, num_heads=8): super(MHSA2, self).__init__() self.emb_dim = emb_dim self.kqv_dim = kqv_dim self.num_heads = num_heads self.w_k = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_q = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_v = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_out = nn.Linear(kqv_dim * num_heads, output_dim) def forward(self, x): b, t, _ = x.shape e = self.kqv_dim h = self.num_heads keys = self.w_k(x).view(b, t, h, e) values = self.w_v(x).view(b, t, h, e) queries = self.w_q(x).view(b, t, h, e) keys = keys.transpose(2, 1) queries = queries.transpose(2, 1) values = values.transpose(2, 1) dot = queries @ keys.transpose(3, 2) dot = dot / np.sqrt(e) dot = nn.functional.softmax(dot, dim=3) out = dot @ values out = out.transpose(1,2).contiguous().view(b, t, h * e) out = self.w_out(out) return out class DecisionGenerator_whole_attention(nn.Module): def __init__(self, encoder, encoder_dims, device, num_heads=8, \ attention_out_dim=10, action_num=4, explanation_num=21): super().__init__() """ encoder_dims = (F,H,W) F:Feature shape (1280 for mobile net, 2048 for resnet) H,W = image feature height, width """ self.encoder = encoder assert len(encoder_dims) == 3, "encoder_dims has to be a triplet with shape (F,H,W)" F,H,W = encoder_dims self.MHSA = MHSA2(emb_dim=F,kqv_dim=10,output_dim=attention_out_dim,num_heads=num_heads) T = H*W self.action_branch = nn.Sequential( nn.Linear(attention_out_dim*T,64), nn.ReLU(), # nn.Dropout(), nn.Linear(64,action_num)) self.explanation_branch = nn.Sequential( nn.Linear(attention_out_dim*T,64), nn.ReLU(), # nn.Dropout(), nn.Linear(64, explanation_num)) self.action_loss_fn, self.reason_loss_fn = self.loss_fn(device) def loss_fn(self,device): class_weights = [1, 1, 2, 2] w = torch.FloatTensor(class_weights).to(device) action_loss = nn.BCEWithLogitsLoss(pos_weight=w).to(device) explanation_loss = nn.BCEWithLogitsLoss().to(device) return action_loss,explanation_loss def forward(self,images,targets=None): images = torch.stack(images) if self.training: assert targets is not None target_reasons = torch.stack([t['reason'] for t in targets]) target_actions = torch.stack([t['action'] for t in targets]) # print(images.shape) features = self.encoder(images) # # print(features.shape) B,F,H,W = features.shape # print(features.view(B,F,H*W).transpose(1,2).shape) transformed_feature = self.MHSA(features.view(B,F,H*W).transpose(1,2)) #(B, H, T, 10) # print(transformed_feature.shape) feature_polled = torch.flatten(transformed_feature,start_dim=1) # print(feature_polled.shape) actions = self.action_branch(feature_polled) reasons = self.explanation_branch(feature_polled) if self.training: action_loss = self.action_loss_fn(actions, target_actions) reason_loss = self.reason_loss_fn(reasons, target_reasons) loss_dic = {"action_loss":action_loss, "reason_loss":reason_loss} return loss_dic else: return {"action":torch.sigmoid(actions),"reasons":torch.sigmoid(reasons)} dga = DecisionGenerator_whole_attention(encoder, encoder_dims=(2048,6,10), device='cpu') count_parameters(dga) 24078915 classes = { "bus": 0, "traffic light": 1, "traffic sign": 2, "person": 3, "bike": 4, "truck": 5, "motor": 6, "car": 7, "train": 8, "rider": 9, } class_2_name = dict([(value, key) for key, value in classes.items()]) num_classes = len(classes) ``` ## 1. Load model ``` def get_model(num_classes): model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) in_features = model.roi_heads.box_predictor.cls_score.in_features #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) # replace the pre-trained head with a new one model.roi_heads.box_predictor = torchvision.models.detection.faster_rcnn.FastRCNNPredictor(in_features,num_classes) return model.cpu() model = get_model(num_classes) checkpoint = torch.load('saved_models/bdd100k_24.pth') model.load_state_dict(checkpoint['model']) #optimizer.load_state_dict(checkpoint['optimizer_state_dict']) #epoch = checkpoint['epoch'] model.eval() ``` ## 2. Show sample plot ``` def get_preds(idx,img_datalist,threshold): im0 = Image.open(img_datalist[idx]) im0_tensor = torchvision.transforms.ToTensor()(im0) pred = model([im0_tensor]) total_preds = [] for n,confidence in enumerate(pred[0]['scores']): if confidence>threshold: pred_update = {} pred_update['boxes'] = pred[0]['boxes'][n] pred_update['labels'] = pred[0]['labels'][n] pred_update['scores'] = pred[0]['scores'][n] total_preds.append(pred_update) return im0,total_preds def plot_from_image_preds(img,total_preds): fig,ax = plt.subplots(1,figsize=(20,10)) for i in range(len(total_preds)): xy = total_preds[i]['boxes'][0],total_preds[i]['boxes'][1] width = total_preds[i]['boxes'][2]-total_preds[i]['boxes'][0] height = total_preds[i]['boxes'][3]-total_preds[i]['boxes'][1] rect = patches.Rectangle(xy,width,height,linewidth=1,edgecolor='r',facecolor='none') ax.text(xy[0],xy[1],class_2_name[total_preds[i]['labels'].item()]) ax.add_patch(rect) ax.imshow(img) with open("datalists/bdd100k_val_images_path.txt", "rb") as fp: val_img_paths = pickle.load(fp) im, total_preds = get_preds(751,val_img_paths,0.6) plot_from_image_preds(im,total_preds) ``` ## 3. Test ``` im0 = Image.open(val_img_paths[100]) im0_tensor = torchvision.transforms.ToTensor()(im0) model.backbone.out_channels images, targets = model.transform([im0_tensor,im0_tensor]) images.tensors.shape features = model.backbone(images.tensors) proposals, _ = model.rpn(images, features, targets) box_features1 = model.roi_heads.box_roi_pool(features,proposals,images.image_sizes) box_features2 = model.roi_heads.box_head(box_features1) class_logits, box_regression = model.roi_heads.box_predictor(box_features2) ``` ## Test Multihead attention in pytorch ``` box_features2 = box_features2.view(2,1000,1024) box_features2.shape class MHSA(nn.Module): def __init__(self, emb_dim, kqv_dim, num_heads=1): super(MHSA, self).__init__() self.emb_dim = emb_dim self.kqv_dim = kqv_dim self.num_heads = num_heads self.w_k = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_q = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_v = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_out = nn.Linear(kqv_dim * num_heads, emb_dim) def forward(self, x): b, t, _ = x.shape e = self.kqv_dim h = self.num_heads keys = self.w_k(x).view(b, t, h, e) values = self.w_v(x).view(b, t, h, e) queries = self.w_q(x).view(b, t, h, e) keys = keys.transpose(2, 1) print("keys",keys.shape) queries = queries.transpose(2, 1) # b, h, t, e print("queries",queries.shape) values = values.transpose(2, 1) # b, h, t, e print("values",values.shape) dot = queries @ keys.transpose(3, 2) dot = dot / np.sqrt(e) print("dot",dot.shape) weights = nn.functional.softmax(dot, dim=3) print(values.shape) out = weights @ values print(out.shape) out = out.transpose(1,2).contiguous().view(b, t, h * e) out = self.w_out(out) return out, weights attention = MHSA(1024,10,num_heads=8) val, score = attention(box_features2) score.shape attention.parameters model_parameters = filter(lambda p: p.requires_grad, attention.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) 1024*4*80+1024 nn.Linear() nn.MultiheadAttention() ``` ## Test hard attention ``` box_features2.shape box_features2 = box_features2.view(2,1000,1024) box_features2.shape attention = nn.Sequential(nn.Linear(1024,1),nn.Softmax(dim=1)) attention(box_features2) score = attention(box_features2) score.shape box_features2.shape _,ind = torch.topk(score,k=10,dim=1) torch.index_select(box_features2,) ind torch.gather(box_features2,1,ind.expand(ind.size(0),ind.size(1),box_features2.size(2))) box_features2[1,399,:] box_features2[ind,:] (box_features2*attention(box_features2)).shape ind.squeeze(-1).shape proposals[0].shape boxes, scores, labels = model.roi_heads.postprocess_detections(class_logits, box_regression, proposals, images.image_sizes) len(boxes) box_features2_reshaped = box_features2.view(2,1000,1024) box_features2_reshaped.shape,box_features1.shape detections, detector_losses = model.roi_heads(features, proposals, images.image_sizes) detections[0]['boxes'].shape class MHSA(nn.Module): def __init__(self, emb_dim, kqv_dim, num_heads=1): super(MHSA, self).__init__() self.emb_dim = emb_dim self.kqv_dim = kqv_dim self.num_heads = num_heads self.w_k = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_q = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_v = nn.Linear(emb_dim, kqv_dim * num_heads, bias=False) self.w_out = nn.Linear(kqv_dim * num_heads, emb_dim) def forward(self, x): b, t, _ = x.shape e = self.kqv_dim h = self.num_heads keys = self.w_k(x).view(b, t, h, e) values = self.w_v(x).view(b, t, h, e) queries = self.w_q(x).view(b, t, h, e) keys = keys.transpose(2, 1) queries = queries.transpose(2, 1) values = values.transpose(2, 1) dot = queries @ keys.transpose(3, 2) dot = dot / np.sqrt(e) dot = nn.functional.softmax(dot, dim=3) out = dot @ values out = out.transpose(1,2).contiguous().view(b, t, h * e) out = self.w_out(out) return out attention = MHSA(1024,10,num_heads=8) attention_result = attention(box_features2_reshaped) attention_result.shape torch.max(attention_result,1)[0] class DecisionGenerator(nn.Module): def __init__(self,faster_rcnn_model,batch_size=2,action_num=4,explanation_num=21,freeze_rcnn=True): super().__init__() self.rcnn = faster_rcnn_model self.batch_size = batch_size if freeze_rcnn: self.rcnn.params.requires_grad = False self.object_attention = MHSA(1024, kqv_dim=10, num_heads=8) self.action_branch = nn.Linear(1024,action_num) self.explanation_branch = nn.Linear(1024, explanation_num) def forward(images): images,_ = rcnn.transform(images) features = rcnn.backbone(images.tensors) proposals, _ = rcnn.rpn(images, features) box_features = rcnn.roi_heads.box_roi_pool(features,proposals,images.image_sizes) box_features = rcnn.roi_heads.box_head(box_features).view(self.batch_size, -1, 1024) #(B, num_proposal, 1024) box_features = self.object_attention(box_features) #(B, num_proposal, 1024) feature_polled,_ = torch.max(box_features,1) actions = self.action_branch(feature_polled) explanations = self.explanation_branch(feature_polled) return actions,explanations class Self_Attn(nn.Module): """ Self attention Layer""" def __init__(self,in_dim,activation): super(Self_Attn,self).__init__() self.chanel_in = in_dim self.activation = activation self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1) self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1) self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self,x): """ inputs : x : input feature maps( B X C X W X H) returns : out : self attention value + input feature attention: B X N X N (N is Width*Height) """ m_batchsize,C,width ,height = x.size() proj_query = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N) proj_key = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H) energy = torch.bmm(proj_query,proj_key) # transpose check attention = self.softmax(energy) # BX (N) X (N) proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N out = torch.bmm(proj_value,attention.permute(0,2,1) ) out = out.view(m_batchsize,C,width,height) out = self.gamma*out + x return out,attention ```
github_jupyter
# Write custom inference script and requirements to local folder ``` ! mkdir inference_code %%writefile inference_code/inference.py # This is the script that will be used in the inference container import os import json import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer def model_fn(model_dir): """ Load the model and tokenizer for inference """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") tokenizer = AutoTokenizer.from_pretrained(model_dir) model = AutoModelForSeq2SeqLM.from_pretrained(model_dir).to(device) model_dict = {'model':model, 'tokenizer':tokenizer} return model_dict def predict_fn(input_data, model): """ Make a prediction with the model """ text = input_data.pop('inputs') parameters = input_data.pop('parameters', None) tokenizer = model['tokenizer'] model = model['model'] # Parameters may or may not be passed input_ids = tokenizer(text, truncation=True, padding='longest', return_tensors="pt").input_ids output = model.generate(input_ids, **parameters) if parameters is not None else model.generate(input_ids) return tokenizer.batch_decode(output, skip_special_tokens=True)[0] def input_fn(request_body, request_content_type): """ Transform the input request to a dictionary """ request = json.loads(request_body) return request def output_fn(prediction, response_content_type): """ Return model's prediction """ return {'generated_text':prediction} %%writefile inference_code/requirements.txt transformers sentencepiece protobuf ``` # Deploy an endpoint with PyTorchModel Once you .deploy(), this will upload your model package to S3, create a model in SageMaker, create an endpoint configuration, and deploy an endpoint from that configuration. ``` ! pip install -U sagemaker import sagemaker session = sagemaker.Session() session_bucket = session.default_bucket() role = sagemaker.get_execution_role() pytorch_version = '1.7.1' python_version = 'py36' from sagemaker.huggingface import HuggingFaceModel model_name = 'summarization-model' endpoint_name = 'summarization-endpoint' model_for_deployment = HuggingFaceModel(entry_point='inference.py', source_dir='inference_code', model_data=huggingface_estimator.model_data, # model_data=f'{session_bucket}/{<insert_model_location_key>}/model.tar.gz', in case you don't run this notebook using the initialized huggingface_estimator from 2_finetune.ipynb role=role, pytorch_version=pytorch_version, py_version=python_version, transformers_version='4.6.1', name=model_name) from sagemaker.serializers import JSONSerializer from sagemaker.deserializers import BytesDeserializer # Deploy the model predictor = model_for_deployment.deploy(initial_instance_count=1, instance_type='ml.m5.xlarge', endpoint_name=endpoint_name ) text = ('PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions.' ' The aim is to reduce the risk of wildfires.' 'Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.' ) summary_short = predictor.predict({ 'inputs':text, 'parameters':{ 'length_penalty':0.6 } }) print(summary_short) summary_long = predictor.predict({ 'inputs':text, 'parameters':{ 'length_penalty':1.5 } }) print(summary_long) ``` # (Optional) If you haven't fine-tuned a model, but want to deploy directly from HuggingFace Hub to experiment ``` # We will pass these as env variables, defining the model and task we want hub = { 'HF_MODEL_ID':'google/pegasus-xsum', 'HF_TASK':'summarization' } hub_model = HuggingFaceModel(env=hub, role=role, pytorch_version='1.7', py_version='py36', transformers_version='4.6', name='hub-model') hub_predictor = hub_model.deploy(initial_instance_count=1, instance_type='ml.m5.xlarge', endpoint_name='hub-endpoint') # You can also pass in a 'parameters' key with valid parameters, just like we did before summary = hub_predictor.predict({'inputs':text}) print(summary) ``` # Clean up Use this code to delete the resources created in SageMaker Inference (endpoint configuration, endpoint and model). ``` predictor.delete_endpoint() predictor.delete_model() ```
github_jupyter
**[Pandas Home Page](https://www.kaggle.com/learn/pandas)** --- # Introduction In these exercises we'll apply groupwise analysis to our dataset. Run the code cell below to load the data before running the exercises. ``` import pandas as pd reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0) #pd.set_option("display.max_rows", 5) from learntools.core import binder; binder.bind(globals()) from learntools.pandas.grouping_and_sorting import * print("Setup complete.") ``` # Exercises ## 1. Who are the most common wine reviewers in the dataset? Create a `Series` whose index is the `taster_twitter_handle` category from the dataset, and whose values count how many reviews each person wrote. ``` # Your code here reviews_written = reviews.groupby('taster_twitter_handle').size() # Check your answer q1.check() #q1.hint() #q1.solution() ``` ## 2. What is the best wine I can buy for a given amount of money? Create a `Series` whose index is wine prices and whose values is the maximum number of points a wine costing that much was given in a review. Sort the values by price, ascending (so that `4.0` dollars is at the top and `3300.0` dollars is at the bottom). ``` best_rating_per_price = reviews.groupby('price')['points'].max().sort_index() # Check your answer q2.check() #q2.hint() #q2.solution() ``` ## 3. What are the minimum and maximum prices for each `variety` of wine? Create a `DataFrame` whose index is the `variety` category from the dataset and whose values are the `min` and `max` values thereof. ``` price_extremes = reviews.groupby('variety').price.agg([min, max]) # Check your answer q3.check() #q3.hint() #q3.solution() ``` ## 4. What are the most expensive wine varieties? Create a variable `sorted_varieties` containing a copy of the dataframe from the previous question where varieties are sorted in descending order based on minimum price, then on maximum price (to break ties). ``` sorted_varieties = reviews.groupby('variety').price.agg([min, max]).sort_values(by=['min', 'max'], ascending=False) # Check your answer q4.check() #q4.hint() #q4.solution() ``` ## 5. Create a `Series` whose index is reviewers and whose values is the average review score given out by that reviewer. Hint: you will need the `taster_name` and `points` columns. ``` reviewer_mean_ratings = reviews.groupby('taster_name').points.mean() # Check your answer q5.check() #q5.hint() #q5.solution() ``` Are there significant differences in the average scores assigned by the various reviewers? Run the cell below to use the `describe()` method to see a summary of the range of values. ``` reviewer_mean_ratings.describe() ``` ## 6. What combination of countries and varieties are most common? Create a `Series` whose index is a `MultiIndex`of `{country, variety}` pairs. For example, a pinot noir produced in the US should map to `{"US", "Pinot Noir"}`. Sort the values in the `Series` in descending order based on wine count. ``` country_variety_counts = reviews.groupby(['country', 'variety']).size().sort_values(ascending=False) # Check your answer q6.check() #q6.hint() #q6.solution() ``` # Keep going Move on to the [**data types and missing data**](https://www.kaggle.com/residentmario/data-types-and-missing-values). --- **[Pandas Home Page](https://www.kaggle.com/learn/pandas)** *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
github_jupyter
# An Introduction to $\LaTeX$ (LaTeX) Latex is a typesetting language used for formatting equations (and much more) in the scientific communities. LaTeX is used very commonly in higher mathematical and computer science classes and academia. In Stat 140, we'll be using LaTeX to "pretty print" equations and answers for homeworks and labs ## How To Follow This Introduction? If you're viewing through Jupyter, then you're all set! Otherwise, go ahead and download this notebook [here](#). This way, you'll be able to follow and modify the examples that we give, as well as test your own. To see the latex behind the math expressions, go ahead and edit that particular cell ## How Do We Use LaTeX? For the purposes of this introduction, and for Prob 140, we shall use the "math mode" in Latex through markdown cells in Jupyter Notebook. Remember that in order to switch a cell to "markdown mode", simply change the option from "code" $\to$ "Markdown" on the above toolbar. There are two main ways to enter math mode $ You're in Inline Math Mode $ $$ You're in Centered Math Mode $$ ### Inline Math Mode We use inline math when we want to put in equations and/or variables in sentences / the natural flow of text. Here's an example of inline math: Say you have $x$ cows and $y$ chickens on a farm, and you've counted $100$ legs amongst all the animals. What are $x$ and $y$? This is what that'll look like: Say you have $x$ cows and $y$ chickens on a farm, and you've counted $100$ legs amongst all the animals. What are $x$ and $y$? *Note*: Notice that text while inside math mode are somewhat italicized; don't use math mode to write all your text, leave it just for variables ### Centered Math Mode We use centered math mode, when there are particular equations that you'd like to highlight, or have stand out. Here's an example. $$ 1 + 2 + \dots + n = 0.5n(n+1)$$ This is what that'll look like: $$ 1 + 2 + \ldots + n = 0.5n(n+1)$$ *Note*: Don't worry about `ldots` yet ## Superscripts and Subscripts ### Superscripts In order to write superscripts, of the form $x^y$, simply write `$x^y$` `$ 2^3 = 8$`: $ 2^3 = 8$ `$ (x+y)^2 = x^2 + 2xy + y^2$`: $ (x+y)^2 = x^2 + 2xy + y^2$ ** Warning: ** Generally, the power can only be one character long; to write more, you must wrap it with braces { } . So for example: If you forget braces, it'll look like this: `$2^10 = 1024$`: $2^10 = 1024$ To fix, simply add braces `$2^{10} = 1024$`: $2^{10} = 1024$ ### Subscripts In order to write subscripts, of the form $a_1$, simply use the *underscore character* `_` and write `$a_1$` `$ 2^3 = 8$`: $ 2^3 = 8$ `$ (x+y)^2 = x^2 + 2xy + y^2$`: $ (x+y)^2 = x^2 + 2xy + y^2$ ** Warning: ** Just as with the superscript, the subscript can only be one character long; to write more, you must wrap it with braces { } ## Expressions LaTeX provides commands for writing symbols in your mathematical expressions. In the last example you saw the `\ldots` command; as you saw, this drew the dots like this: $\ldots$. Here are some examples of such commands 1. `\sum` : $\sum$ 2. `\prod` : $\prod$ 3. `\infty` : $\infty$ 4. `\log` : $\log$ 5. `\int` : $\int$ 6. `\alpha` : $\alpha$ 7. `\cup` : $\cup$ 8. `\cap` : $\cap$ 9. `\ldots` : $\ldots$ 10. `\pm` : $\pm$ 11. `n \choose k` : $n \choose k$ We can use the subscripting and superscripting from the last section with any of these commands as well; it all integrates well. Here's an example from the *Math Prerequisites* worksheet **1.** Consider the sequence defined by $c_i = i$ for $i = 1, 2, \ldots 10$. What is $\sum_{i=1}^{10} c_i$? Essentially, all the greek letters have such commands (simply go `\yourgreekletterhere`): These we use a lot; here are some of the common ones we use 1. `\alpha` : $\alpha$ 1. `\beta` : $\beta$ 1. `\lambda` : $\lambda$ 1. `\mu` : $\mu$ 1. `\sigma` : $\sigma$ 1. `\pi` : $\pi$ Some commands can take arguments (just like in Python). The way you pass in arguments into a command is by using the braces `{}` syntax \command{Arg 1}{Arg 2}... The most common such command you'll use is the `\frac` command, which creates fractions. `\frac` takes in two arguments, the first the numerator, and the second the denominator (`\frac{Numerator}{Denominator}`). Here are some examples 1. \frac{10}{4} = \frac{5}{2} $\frac{10}{4} = \frac{5}{2}$ 2. \frac{x^2}{x} = x $\frac{x^2}{x} = x$ Another common command you'll use is the `\sqrt` command, which takes in one argument- the operand. Here's an example 1. \sqrt{9} = 3 $\sqrt{9} = 3$ ### Example Combining Everything Together **Question:** Consider a polynomial $ax^2 + bx + c$. For what values of $x$ does this polynomial equal $0$? **Answer** `$$x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$$` $$x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$$ ### Bounds for summations, products, and integrals For summations, products, and integrals, we often want to define the bounds or limits. In LaTeX, the syntax for the bounds are the same symbols as subscript and superscript. \sum_{lower}^{upper} \prod_{lower}^{upper} \int_{lower}^{upper} Note that the bounds will appear next to the symbol in inline math mode and around the symbol in centered math mode \sum_{i=1}^\infty a_i Inline: $\sum_{i=1}^\infty a_i$ Centered: $$\sum_{i=1}^\infty a_i$$ \prod_{i=a}^{b} f(i) $$\prod_{i=1}^{\infty} f(i)$$ \int_{-\infty}^{100}xdx $$\int_{-\infty}^{100}xdx$$ \int_{-\infty}^{\infty}\int_{-\infty}^{\infty}(x+y)dxdy $$\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}(x+y)dxdy$$ \lim_{x\to\infty} f(x) $$\lim_{x\to\infty} f(x)$$ ### Align When manipulating long equations, it is often useful to show the steps in between. The ``align*`` environment provides easy vertical alignment of equations.Let's see an example \begin{align*} 2x + 10 &= -4\\ 2x &= -14\\ x &= -7 \end{align*} \begin{align*} 2x + 10 &= -4\\ 2x &= -14\\ x &= -7 \end{align*} \begin{align*} f(x) &= \int_0^1 g(x)dx = \int_0^1 x^2dx\\ &= \left[x^3\right]_0^1\\ &= 1^3 - 0^3\\ &= \boxed{1} \end{align*} $$ \begin{align*} f(x) &= \int_0^1 g(x)dx = \int_0^1 x^2dx\\ &= \left[x^3\right]_0^1\\ &= 1^3 - 0^3\\ &= \boxed{1} \end{align*} $$ Note that the block starts with `\begin{align*}` and ends with `\end{align*}`. Each line ends with `\\` for new line. The character that we want to align is denoted with `&` \begin{align*} x&=y & w &=z & a&=b+c\\ 2x&=-y & 3w&=\frac{1}{2}z & a&=b\\ -4 + 5x&=2+y & w+2&=-1+w & ab&=cb \end{align*} $$\begin{align*} x&=y & w &=z & a&=b+c\\ 2x&=-y & 3w&=\frac{1}{2}z & a&=b\\ -4 + 5x&=2+y & w+2&=-1+w & ab&=cb \end{align*}$$ ### Example (3c) Let $\{c\}$ and $\{d\}$ be sequences of real numbers such that $$\sum_{i=1}^{100} c_i = 10$$ $$\sum_{i=1}^{100} d_i = 20$$ What is $\sum_{i=1}^{100} (4c_i - d_i + 5)$? ### Solution \begin{align*} \sum_{i=1}^{100} (4c_i - d_i + 5) &= \sum_{i=1}^{100} 4c_i + \sum_{i=1}^{100} - d_i + \sum_{i=1}^{100} 5\\ &= 4 \sum_{i=1}^{100} c_i - \sum_{i=1}^{100} d_i + \sum_{i=1}^{100} 5\\ &= 4 (10) - (20) + 5(100)\\ &= 520 \end{align*} \begin{align*} \sum_{i=1}^{100} (4c_i - d_i + 5) &= \sum_{i=1}^{100} 4c_i + \sum_{i=1}^{100} - d_i + \sum_{i=1}^{100} 5\\ &= 4 \sum_{i=1}^{100} c_i - \sum_{i=1}^{100} d_i + \sum_{i=1}^{100} 5\\ &= 4 (10) - (20) + 5(100)\\ &= 520 \end{align*} ## Further Resources You've made it! These steps outlined above should cover all the LaTeX you need to write solutions. Sometime in your pursuit, you may need some commands or symbols that haven't been listed here. When so, the following resources are here to help - [DeTexify](http://detexify.kirelabs.org/classify.html): This site lets you draw a symbol, and will immediately find the corresponding Latex command. Similar apps for iPhone and Android also exist - [ShareLatex Tutorial](https://www.sharelatex.com/learn/Mathematical_expressions) ShareLaTeX is one of the main online work environments; they have a very simple yet comprehensive tutorial about LaTex. If you'd like to learn more about LaTeX, this is your gateway ## Exploration The best way to learn and absorb LaTeX is to look at examples and code it yourself. Once you have the above guiding principles down, the rest of LaTeX distills into simply finding the symbol that you were trying to find. In that spirit of exploration, we've given you the source for the *Mathematical Prerequisites* worksheet. Try to understand what each of the symbols are doing, and try writing your solutions and explanations in LaTeX below. Don't worry about the `underline` command in the later questions: we only use it to print the blank spaces #### Question 1: Consider the sequence defined by $c_i =i$, for $i=1, 2, \ldots , 10$. 1. Find $\sum_{i=1}^{10} c_i$. `\sum_{i=1}^{10} c_i` 2. If possible, find $\sum_{k=1}^{10} c_k$. If this is not possible,explain why not. `\sum_{k=1}^{10} c_k` #### Solution *Your Answer Here* #### Question 2 Does the expression $$ \sum_{n=1}^{10} 2 $$ `\sum_{n=1}^{10} 2` make sense? If it does, what is its value? #### Solution *Your Answer Here* #### Question 3 Let $\{c\}$ and $\{d\}$ be sequences of real numbers so that $$ \sum_{i=1}^{100} c_i ~=~ 10$$ $$\sum_{j=1}^{100} d_j ~=~ 20 $$ In parts 1-3 find the value of the expression. 1) $\sum_{i=1}^{100} (4c_i + 5)$ 2) $\sum_{i=1}^{100} 4c_i ~+~ 5$ 3) $\sum_{i=1}^{100} (4c_i - d_i + 5)$ `\sum_{i=1}^{100} (4c_i - d_i + 5)` 4) True or false: $$ \sum_{i=1}^{100} \sum_{j=1}^{100} (c_i + d_j) ~~=~~ \sum_{i=1}^{100} (c_i + d_i) $$ `\sum_{i=1}^{100} \sum_{j=1}^{100} (c_i + d_j) = \sum_{i=1}^{100} (c_i + d_i)` If you think the identity is true, find the common value of the two sides. If the identity is false, can you find the value of either of the sides? #### Solution *Your Answer Here* #### Question 4 Let $0 < p < 1$. Find simple expressions for 1. $ \sum_{i=0}^{100} p^i $ 2. $ \sum_{i=0}^{\infty} p^i $ 3. $ \sum_{i=100}^{\infty} p^i $ #### Solution *Your Answer Here* #### Question 5 The sum $ \sum_{n=0}^{\infty} \frac{1}{n!} $ can be expressed very simply. Find that simple expression and a numerical value. `\sum_{n=0}^{\infty} \frac{1}{n!}` #### Solution *Your Answer Here* #### Question 6 Repeat the previous exercise for each of the sums 1. $$ \sum_{i=0}^{\infty} \frac{2^i}{i!} $$ ` \sum_{i=0}^{\infty} \frac{2^i}{i!} ` 2. $$ \sum_{i=0}^{\infty} \frac{2^{3i}}{i!} $$. `\sum_{i=0}^{\infty} \frac{2^{3i}}{i!}` If you had trouble with the previous exercise, this one might help. #### Solution *Your Answer Here* #### Question 7 You know that $e^0 = 1$. What we're going to need, quite often, is an approximation to $e^x$ for a small non-zero number $x$. A crude approximation is 1 because $x$ is tiny. But you can get a finer approximation by writing the first two terms in the expansion for $e^x$ and remembering that Taylor says the rest is small compared to $x$. 1. Explain why $e^{0.01}$ is roughly $1.01$ and $e^{-0.01}$ is roughly $0.99$. 2. Use your reasoning in part (a) to explain why $\log (1+x)$ is roughly $x$ for small $x$. In this class, as in much of math, $\log$ is taken to the base $e$. #### Solution *Your Answer Here* #### Question 8 How many different ways are there to arrange six people in a row? #### Solution *Your Answer Here* #### Question 9 A committee consists of 6 women and 4 men. How many different choices can be made if you want to select - a Chairperson and an Assistant Chairperson? - a subcommittee of two people? - a committee of two men and two women? #### Solution *Your Answer Here* #### Question 10 Let $a$ and $b$ be any two real numbers. You know that $(a + b)^2 = a^2 + 2ab + b^2$. 1. Analogously, write the following as a sum of four terms: $(a + b)^3$ 2. Let $n$ be a non-negative integer. Fill in the blanks: $$ (a + b)^n ~=~ \sum_{k=\underline{~~}}^{\underline{~~}} \underline{~~~~~} a^k b^{n-k} $$ `(a + b)^n ~=~ \sum_{k=\underline{~~}}^{\underline{~~}} \underline{~~~~~} a^k b^{n-k}` #### Solution *Your Answer Here* #### Question 11 Calculate the following. 1. $\frac{d}{dx} \log (x^2)$ 2. $\frac{d}{dx} xe^{-cx}$ where $c > 0$ is a constant 3. $\int xe^{-cx} dx$ where $c > 0$ is a constant (use part (b) or methods of integration) 4. $\int_0^{\infty} ce^{-cx} dx$ where $c > 0$ is a constant #### Solution *Your Answer Here* #### Question 12 Let $c > 0$ be a constant. $\int_0^x ce^{-cx} dx$ doesn't make sense. Why not? `\int_0^x ce^{-cx} dx` #### Solution *Your Answer Here* #### Question 13 Calculate $ \int_0^1 \int_0^1 (x+xy+y) dx dy $. `\int_0^1 \int_0^1 (x+xy+y) dx dy ` #### Solution *Your Answer Here* #### Question 14 Fill in the blanks (it really helps to draw the region of integration): `\int_0^1 \int_y^1 (x+xy+y) dx dy ~=~ \int_0^1 \int_{\underline{~~}}^{\underline{~~}} (x+xy+y) dy dx` $$ \int_0^1 \int_y^1 (x+xy+y) dx dy ~=~ \int_0^1 \int_{\underline{~~}}^{\underline{~~}} (x+xy+y) dy dx $$ #### Solution *Your Answer Here*
github_jupyter
## **Thermodynamics: an Engineering Approach, 7th Ed** Cengel & Boles # Chapter 1: Introduction and Basic Concepts ##Example 1-1, Page.8 ``` #Diketahui: El_USD = 0.09 # Harga Listrik adalah 0.09 $/kWh P_wt = 30 # Wind Turbine power rate, kW t_wt = 2200 # Durasi kerja Wind Turbine dalam satu tahun, hours #Dicari: pengiritan per tahun (?) #Jawab: E_tot = P_wt * t_wt print ('Total energi adalah %f kWh' %round(E_tot,0)) #Uang yang diirit: Saved_USD = E_tot * El_USD print ('Total pengiritan adalah %f USD' %round(Saved_USD,0)) ``` ## Example 1-2, Page No.9 ``` #Diketahui: p=850;# densitas [kg/m^3] V=2; # volume tangki [m^3] #Dicari: Massa (?) m=p*V;# rumus massa #Hasil: print ('Massa minyak di dalam tangki adalah %i kg' %round(m,0)) ``` ## Example 1-3, Page No.10 ``` #Diketahui m=1; # massa seberat 1 lbm #Konstanta yang dipakai g=32.174;# konstanta gravitasi, ft/s^2 #Dicari: Berat (w) lbf = 32.174 # Konversi 1 lbf = 32.174 lbm.ft/s^2 w=(m*g)*(1/lbf); # berat = massa * gravitasi # konversi lbf ke lbm diperlukan #Result print ('Berat benda tersebut di bumi adalah %i lbf' %w) ``` ## Example 1-4, Page No.21 ``` # Diketahui Tc=10; #deltaT karena proses pemberian kalor, C # Calculations Tk=Tc; Tr=1.8*Tk;#Konversi dari R ke K Tf=Tr; # Dihitung dengan rumus di atas #Hasil print ('perubahan suhu tersebut adalah %i K' %Tk) print ('perubahan suhu tersebut adalah %i R' %Tr) print ('perubahan suhu tersebut adalah %i F' %Tf) ``` ## Example 1-5, Page No.23 ``` #Diketahui Patm=14.5; #tekanan atmosfir, psi Pvac=5.8; #pembacaan vacuum gage, psi #Proses perhitungan Pabs=Patm-Pvac;#vacuum selalu dihitung sbg tekanan negatif #Hasil print('P_absolut dari ruangan tersebut adalah %f psi'%round(Pabs,1)) ``` ## Example 1-6, Page No.26 ``` #Diketahui: pw=1000; # Densitas air, kg/m^3; g=9.81; # Gravitasi, m/s^2; SG=0.85;# Specific Gravity/Dens. Relatif fluida di manometer meter = 100 # 1 m = 100 cm, cm h=55/meter;# tinggi dalam satuan, cm Patm=96;# Tekanan Atmosfir, kPa # Jawab # Menghitung P menggunakan likuid pada ketinggian yang sama p=SG*pw; Ptank_abs=Patm+(p*g*h/1000); #Results print ('absolute pressure in tank %f kPa' %round(Ptank_abs,1)) ``` ## Example 1-7, Page No.28 ``` #Constants used g=9.81;#acceleration due to gravity in m/s^2; #Given values h1=0.1;# distance b/w point 1 at air-water interface and point 2 at mercury-air interface in m h2=0.2;# distance b/w oil-water interface and mercury-oil interface in m h3=0.35;# distance b/w air-mercury interface and mercury-oil interface in m pw=1000;# density of water in kg/m^3 pHg=13600;# density of mercury in kg/m^3 poil=800;# density of oil in kg/m^3 Patm=85.6;# atmospheric pressure in kPa #Calculation P1=Patm-(pw*g*h1+poil*g*h2-pHg*g*h3)/1000;#calculating pressure using liquid at same height have same pressure #Results print ('the air pressure in tank %i kPa' %round(P1)) ``` ## Example 1-8, Page No.31 ``` #Constants used g=9.81;# acceleration due to gravity in m/s^2; #Given values pHg=13570;# density of mercury at 10 C in kg/m^3 h=0.74;# converting barometric reading into m from mm #Calculationa Patm=pHg*g*h/1000;# standard pressure formula #Results print ('the atmospheric pressure %f kPa' %round(Patm,1)) ``` ## Example 1-9, Page No.31 ``` #constants used g=9.81;#acceleration due to gravity in m/s^2; #given values m=60;# mass of piston in kg Patm=0.97;# atmospheric pressure in kPa A=0.04;# cross-sectional area in m^2 #calculation P=Patm+(m*g/A)/100000;# standard pressure formula print ('The pressure inside the cylinder %f bar' %round(P,2)) #The volume change will have no effect on the free-body diagram drawn in part (a), and therefore the pressure inside the cylinder will remain the same print('If some heat is transferred to the gas and its volume is doubled, there is no change in pressure'); ``` ## Example 1-10, Page No.32 ``` import math from scipy.integrate import quad from pylab import * #Constants used g=9.81;#acceleration due to gravity in m/s^2; #Given values p=1040;# density on the water surface in kg/m^3 h1=0.8;# thickness of surface zone H=4;# thickness of gradient zone x0=0.0;# lower limit of integration x1=4.0;# upper limit of integration #Calculations P1=p*g*h1/1000;#standard pressure determination formula #P2 = integration of the exp. p*g*(math.sqrt(1+(math.tan(math.pi*z/4/H)^2))) b/w 0-4 def intgrnd1(z): return (p*g*(math.sqrt(1+(math.tan(math.pi*(z)/4/H)**2))) )#integrant P2, err = quad(intgrnd1, x0, x1) P2=P2/1000;#converting into kPa P=P1+P2; #Results print ('the gage pressure at the bottom of gradient zone %f kPa' %round(P,0)) ```
github_jupyter
# Assignment 1 We are given a 2-dimensional grid with points $(i, j)$, $i, j = 0, \dots, N+1$. In this assignment we want to simulate a discrete diffusion process on the grid. We are starting with a distribution $u_0(i, j)$ of function values on the grid points. The distribution process follows the following recurrence relation: $$ u_{n+1}(i, j) = \frac{1}{4}\left[u_n(i+1, j) + u_n(i-1, j) + u_n(i, j+1) + u_n(i, j-1)\right],~i,j=1,\dots, N $$ In other words, we are simply taking the average of the neighbouring grid points. We still need to fix the boundary values. Here, we just use the condition that the boundary values should remain constant, that is $$ u_n(0, j) = u_0(0, j),\quad u_n(i, 0) = u_0(i, 0), \quad u_n(N + 1, j) = u_0(N + 1, j), \quad u_n(i, N + 1) = u_0(i, N + 1) $$ ## Part 1 (basic Python) We define the following skeleton of a Python function: ``` def diffusion_iteration(un): """ Perform one diffusion step for all given grid points. Parameters ---------- un : numpy.ndarray Numpy array of type `float64` and dimension (N + 2, N + 2) that stores the function values at step n. This function returns a Numpy array of dimension (N + 2, N + 2) of type `float64` that contains the function values after performing one step of the above diffusion iteration. """ pass ``` Implement the function `diffusion_iteration` using pure Python without Numba acceleration. Benchmark the runtime of this function for growing dimensions N and plot the runtime against N. What overall complexity of the runtime with respect to the parameter N do you expect? ## Part 2 (Numba acceleration and parallelisation) Now optimise the function `diffusion_iteration` using Numba. In the first step develop a serial Numba implementation that does not use parallelisation. Repeat the benchmarking from the first part and compare the Numba compiled function against the pure Python version. What speed-up do you achieve with Numba? Once you have done this parallelise the function using `numba.prange`. Explain your parallelisation strategy and benchmark the resulting function. The function should parallelise almost perfectly. The optimal speed-up is roughly given by the number of physical CPU cores that you have. What is the actual speed-up that you measure compared to the theoretical speed-up? ## Part 3 (Visualisation) Assume we have some kind of material distribution $u_0$. Furthermore, we assume that all boundary values are $0$. We now want to visualize the diffusion process by generating a nice animation. We assume the grid size parameter $N$ to be large enough such that by the discrete time $n$ when diffusion process arrives at the boundary, the function values $u_n$ are negligibly small. Think about a nice initial distribution $u_0$ of values. Create a nice animation of 5 to 10 seconds in length that plots the iterates $u_n$ one after another. In order to do this you can use the matplotlib function `imshow` to draw individual frames and the `FuncAnimation` class in Matplotlib to generate the animation. Some details about creating such a matplotlib animation is discussed in a [Stackoverflow thread](https://stackoverflow.com/questions/17212722/matplotlib-imshow-how-to-animate). ## Advanced Problem We could make the diffusion process more complicated by defining an index set $S = \{(i_0, j_0), (i_1, j_1), \dots \}$ of interior indices at which we are keeping the interior iteration values constant, that is we set $u_n(i, j) = u_0(i, j)$ for $(i, j)\in S$. Implement a parallel Numba accelerated diffusion step that implements this additional condition and again create a nice visualisation. You need to change the interface of your `diffusion_interface` function to take an additional parameter `constant_indices` where you can pass the information about which indices should be kept constant. Explain the data structure you choose for this condition and how you implement it. ## Assessment of the coursework * The submission deadline for the coursework is **Monday 19 October, 10am**. * Up to 80% of the coursework can be achieved by a perfect solution for Parts 1 to 3. The Advanced Problem is worth an additional 20%. * The assignment does not require much code writing. But a strong emphasis is put on good explanations. Putting a few comment lines in your code is not sufficient as explanation. Use the Jupyter notebook capabilities and write good explanations about what you are doing and why you are doing it as markdown cells. * Your code must be executable without any errors from scratch in Jupyter by choosing "Restart kernel and run all cells." If this produces any errors, any code and explanations after the error occurs will be ignored for the marking. It is not the task of the markers to debug your code. * The code should not run for much longer than 2 minutes on a typical laptop/desktop. This is a soft limit. If your notebook runs too long we reserve the right to reject it. * In addition to core Python packages you are allowed to use Numpy, Numba, and matplotlib. No other packages are allowed and any such request will be rejected. * Any matplotlib output must appear inside your notebook. For more information on how to do this see here: https://medium.com/@1522933668924/using-matplotlib-in-jupyter-notebooks-comparing-methods-and-some-tips-python-c38e85b40ba1. A tutorial on embedding animations is shown here: http://louistiao.me/posts/notebooks/embedding-matplotlib-animations-in-jupyter-as-interactive-javascript-widgets/ * You must submit your solution as a single Jupyter Notebook file with ending `*.ipynb`. **Any other submission will lead to 0 marks automatically. Make sure you submit a correct Notebook file with the right ending.**.
github_jupyter
# Ejercicio: Spectral clustering para documentos El clustering espectral es una técnica de agrupamiento basada en la topología de gráficas. Es especialmente útil cuando los datos no son convexos o cuando se trabaja, directamente, con estructuras de grafos. ##Preparación d elos documentos Trabajaremos con documentos textuales. Estos se limpiarán y se convertirán en vectores. Posteriormente, podremos aplicar el método de spectral clustering. ``` #Se importan las librerías necesarias import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA from gensim.models.doc2vec import Doc2Vec, TaggedDocument ``` La librería de Natural Language Toolkit (nltk) proporciona algunos corpus con los que se puede trabajar. Por ejemplo, el cropus Gutenberg (https://web.eecs.umich.edu/~lahiri/gutenberg_dataset.html) del que usaremos algunos datos. Asimismo, obtendremos de esta librería herramientas de preprocesamiento: stemmer y lista de stopwords. ``` import nltk #Descarga del corpus nltk.download('gutenberg') #Descarga de la lista de stopwords nltk.download('stopwords') from nltk.corpus import gutenberg from nltk.corpus import stopwords from nltk.stem import PorterStemmer ``` Definimos los nombres de los archivos (ids) y la lista de paro ``` #Obtiene ids de los archivos del corpus gutenberg doc_labels = gutenberg.fileids() #Lista de stopwords para inglés lista_paro = stopwords.words('english') ``` Definiremos una función que se encargará de preprocesar los textos. Se eliminan símbolos, se quitan elementos de la lista de stopwords y se pasa todo a minúsculas. ``` def preprocess(document): #Lista que guarda archivos limpios text = [] for word in document: #Minúsculas word = word.lower() #Elimina stopwords y símbolos if word not in lista_paro and word.isalpha() == True: #Se aplica stemming text.append(PorterStemmer().stem(word)) return text ``` Por cada documento, obtenemos la lista de sus palabras (stems) aplicando un preprocesado. Cada documento, entonces, es de la forma $d_i = \{w_1, w_2, ..., w_{N_i}\}$, donde $w_k$ son los stems del documento. ``` docs = [] for doc in doc_labels: #Lista de palabras del documentos arx = gutenberg.words(doc) #Aplica la función de preprocesado arx_prep = preprocess(arx) docs.append(arx_prep) #Imprime el nombre del documento, su longitud original y su longitud con preproceso print(doc,len(arx), len(arx_prep)) ``` Posteriormente, convertiremos cada documento en un vector en $\mathbb{R}^d$. Para esto, utilizaremos el algoritmo Doc2Vec. ``` #Dimensión de los vectores dim = 300 #tamaño de la ventana de contexto windows_siz = 15 #Indexa los documentos con valores enteros documents = [TaggedDocument(doc_i, [i]) for i, doc_i in enumerate(docs)] #Aplica el modelo de Doc2Vec model = Doc2Vec(documents, vector_size=dim, window=windows_siz, min_count=1) #Matriz de datos X = np.zeros((len(doc_labels),dim)) for j in range(0,len(doc_labels)): #Crea la matriz con los vectores de Doc2Vec X[j] = model.docvecs[j] print(X) ``` ###Visualización ``` #Función para plotear def plot_words(Z,ids,color='blue'): #Reduce a dos dimensiones con PCA Z = PCA(n_components=2).fit_transform(Z) r=0 #Plotea las dimensiones plt.scatter(Z[:,0],Z[:,1], marker='o', c=color) for label,x,y in zip(ids, Z[:,0], Z[:,1]): #Agrega las etiquetas plt.annotate(label, xy=(x,y), xytext=(-1,1), textcoords='offset points', ha='center', va='bottom') r+=1 plot_words(X, doc_labels) plt.show() ``` ##Aplicación de spectral clustering Ahora se debe aplicar el algoritmo de spectral clustering a estos datos. Como hemos visto, se debe tomar en cuenta diferentes criterios: * La función graph kernel se va utilizar * El método de selección de vecinos (fully connected, k-nn) * El número de dimensiones que queremos obtener * El número de clusters en k-means Pruebe con estos parámetros para obtener un buen agrupamiento de los documentos elegidos.
github_jupyter
``` # Copyright 2020 IITK EE604A Image Processing. All Rights Reserved. # # Licensed under the MIT License. Use and/or modification of this code outside of EE604 must reference: # # © IITK EE604A Image Processing # https://github.com/ee604/ee604_assignments # # Author: Shashi Kant Gupta, Chiranjeev Prachand and Prof K. S. Venkatesh, Department of Electrical Engineering, IIT Kanpur ``` # Task 2: Image Enhancement II: Spatial Smoothing In this task, we will implement average, gaussian, and median spatial filter. ``` %%bash pip install git+https://github.com/ee604/ee604_plugins # Importing required libraries import cv2 import numpy as np import matplotlib.pyplot as plt from ee604_plugins import download_dataset, cv2_imshow download_dataset(assignment_no=2, task_no=2) # download data for this assignment def avgFilter(img, kernel_size=7): ''' Write a program to implement average filter. You have to assume square kernels. Inputs: + img - grayscaled image of size N x N - values between [0, 255] - 'uint8' + kernel_size - size of the kernel window which should be used for averaging. Ouputs: + out_img - smoothed grayscaled image of size N x N - values between [0, 255] - 'uint8' Allowed modules: + Basic numpy operations + cv2.filter2D() to perform 2D convolution Hint: + Not needed. ''' ############################# # Start your code from here # ############################# # Replace with your code... ############################# # End your code here ######## ############################# return out_img def gaussianFilter(img, kernel_size=7, sigma=3): ''' Write a program to implement gaussian filter. You have to assume square kernels. Inputs: + img - grayscaled image of size N x N - values between [0, 255] - 'uint8' + kernel_size - size of the kernel window which should be used for smoothing. + sigma - sigma parameter for gaussian kernel Ouputs: + out_img - smoothed grayscaled image of size N x N - values between [0, 255] - 'uint8' Allowed modules: + Basic numpy operations + cv2.filter2D() to perform 2D convolution + cv2.getGaussianKernel(). Note that this will give you 1D gaussian. Hint: + Not needed. ''' ############################# # Start your code from here # ############################# # Replace with your code... ############################# # End your code here ######## ############################# return out_img def medianFilter(img, kernel_size=7): ''' Write a program to implement median filter. You have to assume square kernels. Inputs: + img - grayscaled image of size N x N - values between [0, 255] - 'uint8' + kernel_size - size of the kernel window which should be used for smoothing. Ouputs: + out_img - smoothed grayscaled image of size N x N - values between [0, 255] - 'uint8' Allowed modules: + Basic numpy operations + np.median() Hint: + Not needed. ''' ############################# # Start your code from here # ############################# # Replace with your code... ############################# # End your code here ######## ############################# return out_img ``` ### Test --- Your observation should compare the different methods for different images. Must include a sentence on which method + kernel size worked best in each case. ``` # Do not change codes inside this cell # Add your observations in next to next cell # Your observation should compare the different methods for different images lena_orig = cv2.imread('data/lena_gray.jpg', 0) lena_noisy_1 = cv2.imread('data/lena_noisy_1.jpg', 0) lena_noisy_2 = cv2.imread('data/lena_noisy_2.jpg', 0) lena_noisy_3 = cv2.imread('data/lena_noisy_3.jpg', 0) def plot_frame(gridx, gridy, subplot_id, img, name): plt.subplot(gridx, gridy, 1 + int(subplot_id)) plt.imshow(np.uint8(img), cmap="gray", vmin=0, vmax=255) plt.axis("off") plt.title(name) # Do not change codes inside this cell # Add your observations in next cell img_arr = [lena_noisy_1, lena_noisy_2, lena_noisy_3] img_caption = ["Noisy 1", "Noisy 2", "Noisy 3"] for i in range(3): for kernel_size in [5, 7, 9]: print("\n-------------------------------------") print("# Lena", img_caption[i], "| kernel:", kernel_size, "x", kernel_size) print("-------------------------------------") plt.figure(figsize=(20, 13)) plot_frame(1, 5, 0, lena_orig, "Original") plot_frame(1, 5, 1, img_arr[i], "Noisy") tmp_img = avgFilter(np.copy(img_arr[i]), kernel_size=kernel_size) plot_frame(1, 5, 2, tmp_img, "Avg.") tmp_img = gaussianFilter(np.copy(img_arr[i]), kernel_size=kernel_size, sigma=int(kernel_size/5)) plot_frame(1, 5, 3, tmp_img, "Gaussian.") tmp_img = medianFilter(np.copy(img_arr[i]), kernel_size=kernel_size) plot_frame(1, 5, 4, tmp_img, "Median.") plt.show() your_observation = """ Replace this with your observations. """ print(your_observation) # Submission >>>>>>>>>>>>>>>>>>>>> # Do not change codes inside this cell. gen_imgs = [] img_arr = [lena_noisy_1, lena_noisy_2, lena_noisy_3] for i in range(3): for kernel_size in [5, 7, 9]: tmp_img = avgFilter(np.copy(img_arr[i]), kernel_size=kernel_size) gen_imgs.append(tmp_img) tmp_img = gaussianFilter(np.copy(img_arr[i]), kernel_size=kernel_size, sigma=int(kernel_size/5)) gen_imgs.append(tmp_img) tmp_img = medianFilter(np.copy(img_arr[i]), kernel_size=kernel_size) gen_imgs.append(tmp_img) task2_submission = np.array(gen_imgs) ```
github_jupyter
## Machine Learning Model Building Pipeline: Wrapping up for Deployment In the previous lectures, we worked through the typical Machine Learning pipeline to build a regression model that allows us to predict house prices. Briefly, we transformed variables in the dataset to make them suitable for use in a Regression model, then we selected the most predictive variables and finally we built our model. Now, we want to deploy our model. We want to create an API, that we can call with new data, with new characteristics about houses, to get an estimate of the SalePrice. In order to do so, we need to write code in a very specific way. We will show you how to write production code in the coming lectures. Here, we will summarise, the key pieces of code, that we need to take forward, for this particular project, to put our model in production. Let's go ahead and get started. ### Setting the seed It is important to note, that we are engineering variables and pre-processing data with the idea of deploying the model if we find business value in it. Therefore, from now on, for each step that includes some element of randomness, it is extremely important that we **set the seed**. This way, we can obtain reproducibility between our research and our development code. This is perhaps one of the most important lessons that you need to take away from this course: **Always set the seeds**. Let's go ahead and load the dataset. ``` # to handle datasets import pandas as pd import numpy as np # to divide train and test set from sklearn.model_selection import train_test_split # feature scaling from sklearn.preprocessing import MinMaxScaler # to build the models from sklearn.linear_model import Lasso # to evaluate the models from sklearn.metrics import mean_squared_error from math import sqrt # to persist the model and the scaler from sklearn.externals import joblib import os # to visualise al the columns in the dataframe pd.pandas.set_option('display.max_columns', None) ``` ## Load data We need the training data to train our model in the production environment. ``` data_folder = '/Users/michaelpadilla/projects/udemy_deployment/data/house-prices-advanced-regression-techniques/' # load dataset data = pd.read_csv(os.path.join(data_folder, 'houseprice.csv')) print(data.shape) data.head() ``` ## Separate dataset into train and test Before beginning to engineer our features, it is important to separate our data intro training and testing set. This is to avoid over-fitting. There is an element of randomness in dividing the dataset, so remember to set the seed. ``` # Let's separate into train and test set # Remember to seet the seed (random_state for this sklearn function) X_train, X_test, y_train, y_test = train_test_split(data, data.SalePrice, test_size=0.1, random_state=0) # we are setting the seed here X_train.shape, X_test.shape ``` ## Selected features Remember that we will deploy our model utilising only a subset of features, the most predictive ones. This is to make simpler models, so that we build simpler code for deployment. We will tell you more about this in coming lectures. ``` # load selected features features = pd.read_csv(os.path.join(data_folder, 'selected_features.csv'), header=None) # Remember that I added the extra feature, to show you how to put # an additional feature engineering step into production features = [x for x in features[0]] + ['LotFrontage'] print('Number of features: ', len(features)) features ``` ### Missing values For categorical variables, we will fill missing information by adding an additional category: "missing" ``` # make a list of the categorical variables that contain missing values vars_with_na = [var for var in features if X_train[var].isnull().sum()>1 and X_train[var].dtypes=='O'] # print the variable name and the percentage of missing values for var in vars_with_na: print(var, np.round(X_train[var].isnull().mean()*100, 2), ' % missing values') vars_with_na ``` Note that we have much less categorical variables with missing values than in our original dataset. But we still use categorical variables with NA for the final model, so we need to include this piece of feature engineering logic in the deployment pipeline. ``` # I bring forward the functions used in the feature engineering notebook: # function to replace NA in categorical variables def fill_categorical_na(df, var_list): X = df.copy() X[var_list] = df[var_list].fillna('Missing') return X # replace missing values with new label: "Missing" X_train = fill_categorical_na(X_train, vars_with_na) X_test = fill_categorical_na(X_test, vars_with_na) # check that we have no missing information in the engineered variables X_train[vars_with_na].isnull().sum() ``` For numerical variables, we are going to add an additional variable capturing the missing information, and then replace the missing information in the original variable by the mode, or most frequent value: ``` # make a list of the numerical variables that contain missing values vars_with_na = [var for var in features if X_train[var].isnull().sum()>1 and X_train[var].dtypes!='O'] # print the variable name and the percentage of missing values for var in vars_with_na: print(var, np.round(X_train[var].isnull().mean()*100, 2), ' % missing values') ``` #### Important: persisting the mean value for NA imputation As you will see in future sections, one of the key pieces of deploying the model is "Model Validation". Model validation refers to corroborating that the deployed model and the model built during research, are identical. The entire pipeline needs to produce identical results. Therefore, in order to check at the end of the process that the feature engineering pipelines are identical, we will save -we will persist-, the mean value of the variable, so that we can use it at the end, to corroborate our models. ``` X_train['LotFrontage'].mode()[0] # replace the missing values mean_var_dict = {} for var in vars_with_na: # calculate the mode mode_val = X_train[var].mode()[0] # we persist the mean in the dictionary mean_var_dict[var] = mode_val # train # note that the additional binary variable was not selected, so we don't need this step any more #X_train[var+'_na'] = np.where(X_train[var].isnull(), 1, 0) X_train[var].fillna(mode_val, inplace=True) # test # note that the additional binary variable was not selected, so we don't need this step any more #X_test[var+'_na'] = np.where(X_test[var].isnull(), 1, 0) X_test[var].fillna(mode_val, inplace=True) # we save the dictionary for later np.save(os.path.join(data_folder, 'mean_var_dict.npy'), mean_var_dict) # check that we have no more missing values in the engineered variables X_train[vars_with_na].isnull().sum() X_test[vars_with_na].isnull().sum() ``` ### Temporal variables One of our temporal variables was selected to be used in the final model: 'YearRemodAdd' So we need to deploy the bit of code that creates it. ``` # create the temporal var "elapsed years" def elapsed_years(df, var): # capture difference between year variable and year the house was sold df[var] = df['YrSold'] - df[var] return df X_train = elapsed_years(X_train, 'YearRemodAdd') X_test = elapsed_years(X_test, 'YearRemodAdd') for var in ['LotFrontage', '1stFlrSF', 'GrLivArea', 'SalePrice']: X_train[var] = np.log(X_train[var]) X_test[var]= np.log(X_test[var]) ``` ### Categorical variables We do have categorical variables in our final model. First, we will remove those categories within variables that are present in less than 1% of the observations: ``` # let's capture the categorical variables first cat_vars = [var for var in features if X_train[var].dtype == 'O'] cat_vars ``` #### Important: persisting the frequent labels As you will see in future sections, one of the key pieces of deploying the model is "Model Validation". Model validation refers to corroborating that the deployed model and the model built during research, are identical. The entire pipeline needs to produce identical results. Therefore, in order to check at the end of the process, that the feature engineering pipelines are identical, we will save -we will persist-, the list of frequent labels per variable, so that we can use it at the end, to corroborate our models. ``` tmp = X_train.groupby(cat_vars[0])['SalePrice'].count() / len(X_train) tmp > 0.01 def find_frequent_labels(df, var, rare_perc): # finds the labels that are shared by more than a certain % of the houses in the dataset df = df.copy() tmp = df.groupby(var)['SalePrice'].count() / len(df) return tmp[tmp>rare_perc].index frequent_labels_dict = {} for var in cat_vars: frequent_ls = find_frequent_labels(X_train, var, 0.01) # we save the list in a dictionary frequent_labels_dict[var] = frequent_ls X_train[var] = np.where(X_train[var].isin(frequent_ls), X_train[var], 'Rare') X_test[var] = np.where(X_test[var].isin(frequent_ls), X_test[var], 'Rare') # now we save the dictionary np.save(os.path.join(data_folder, 'FrequentLabels.npy'), frequent_labels_dict) frequent_labels_dict ``` Next, we need to transform the strings of these variables into numbers. We will do it so that we capture the monotonic relationship between the label and the target: ``` # this function will assign discrete values to the strings of the variables, # so that the smaller value corresponds to the smaller mean of target def replace_categories(train, test, var, target): train = train.copy() test = test.copy() ordered_labels = train.groupby([var])[target].mean().sort_values().index ordinal_label = {k:i for i, k in enumerate(ordered_labels, 0)} train[var] = train[var].map(ordinal_label) test[var] = test[var].map(ordinal_label) return ordinal_label, train, test X_train.groupby([cat_vars[0]])['SalePrice'].mean().sort_values().index ordinal_label_dict = {} for var in cat_vars: ordinal_label, X_train, X_test = replace_categories(X_train, X_test, var, 'SalePrice') ordinal_label_dict[var] = ordinal_label # now we save the dictionary np.save(os.path.join(data_folder, 'OrdinalLabels.npy'), ordinal_label_dict) ordinal_label_dict # check absence of na [var for var in features if X_train[var].isnull().sum()>0] # check absence of na [var for var in features if X_test[var].isnull().sum()>0] ``` ### Feature Scaling For use in linear models, features need to be either scaled or normalised. In the next section, I will scale features between the min and max values: ``` # capture the target y_train = X_train['SalePrice'] y_test = X_test['SalePrice'] # fit scaler scaler = MinMaxScaler() # create an instance scaler.fit(X_train[features]) # fit the scaler to the train set for later use # we persist the model for future use joblib.dump(scaler, os.path.join(data_folder, 'scaler.pkl')) # transform the train and test set, and add on the Id and SalePrice variables X_train = pd.DataFrame(scaler.transform(X_train[features]), columns=features) X_test = pd.DataFrame(scaler.transform(X_test[features]), columns=features) # train the model lin_model = Lasso(alpha=0.005, random_state=0) # remember to set the random_state / seed lin_model.fit(X_train, y_train) # we persist the model for future use joblib.dump(lin_model, os.path.join(data_folder, 'lasso_regression.pkl')) # evaluate the model: # remember that we log transformed the output (SalePrice) in our feature engineering notebook / lecture. # In order to get the true performance of the Lasso # we need to transform both the target and the predictions # back to the original house prices values. # We will evaluate performance using the mean squared error and the # root of the mean squared error pred = lin_model.predict(X_train) print('linear train mse: {}'.format(mean_squared_error(np.exp(y_train), np.exp(pred)))) print('linear train rmse: {}'.format(sqrt(mean_squared_error(np.exp(y_train), np.exp(pred))))) print() pred = lin_model.predict(X_test) print('linear test mse: {}'.format(mean_squared_error(np.exp(y_test), np.exp(pred)))) print('linear test rmse: {}'.format(sqrt(mean_squared_error(np.exp(y_test), np.exp(pred))))) print() print('Average house price: ', np.exp(y_train).median()) ``` That is all for this notebook. And that is all for this section too. **In the next section, we will show you how to productionise this code for model deployment**.
github_jupyter
``` # super comms script import serial from time import sleep import math from tqdm import * import json def set_target(motor, location, ser, output=True): if ser.is_open: if motor =='A': ser.write(b'A') else: ser.write(b'B') target_bytes = location.to_bytes(4, byteorder='big') #print(target_bytes) ser.write(target_bytes) sleep(0.02) while(ser.in_waiting > 0): b = ser.read() if output: print(b.decode('ascii'), end='') else: raise Exception("Serial is not open!") def get_debug(ser): if ser.is_open: ser.write(b'D') sleep(0.02) while(ser.in_waiting > 0): b = ser.read() print(b.decode('ascii'), end='') print("---") else: raise Exception("Serial is not open!") def gogogo(ser, wait=False, output=True): if ser.is_open: ser.write(b'G') sleep(0.02) if output: print("--- Making a move ---") if wait: end_found = False while not end_found: sleep(0.002) while(ser.in_waiting > 0): b = ser.readline().decode('ascii') if output: print(b) if "move-end" in b: end_found = True else: while(ser.in_waiting > 0): b = ser.read() print(b.decode('ascii'), end='') else: raise Exception("Serial is not open!") def stop(ser): if ser.is_open: ser.write(b'S') sleep(0.1) while(ser.in_waiting > 0): b = ser.read() print(b.decode('ascii'), end='') print("---") else: raise Exception("Serial is not open!") def penup(ser): if ser.is_open: ser.write(b'C') sleep(0.1) while(ser.in_waiting > 0): b = ser.read() #print(b.decode('ascii'), end='') #print("---") else: raise Exception("Serial is not open!") def pendown(ser): if ser.is_open: ser.write(b'X') sleep(0.1) while(ser.in_waiting > 0): b = ser.read() #print(b.decode('ascii'), end='') #print("---") else: raise Exception("Serial is not open!") def reset(ser, output=True): if ser.is_open: ser.write(b'R') sleep(0.5) while(ser.in_waiting > 0): b = ser.read() if output: print(b.decode('ascii'), end='') else: raise Exception("Serial is not open!") ser = serial.Serial('/dev/cu.usbserial-141240', baudrate=115200) # open serial port print(ser.name) # check which port was really used get_debug(ser) # Start with thing at home position! #reset(ser) target_coord = (300,200) reset_point = (800,800) target_lengths = translate_xy_to_ab(target_coord) travel_lengths = (reset_point[0] - target_lengths[0], reset_point[1] - target_lengths[1]) a_step_mm = 10000/125 b_step_mm = 10000/125 travel_steps = (int(travel_lengths[0] * a_step_mm), int(travel_lengths[1] * b_step_mm)) set_target("A", travel_steps[0], ser, output=True) set_target("B", travel_steps[1], ser, output=True) gogogo(ser, wait=True) set_target("A", 0, ser, output=True) set_target("B", 0, ser, output=True) #set_target("A", 132, ser, output=True) #set_target("B", 9121, ser, output=True) gogogo(ser, wait=True) gogogo(ser, wait=True) abpath = [ (5000,5000), (10000,10000), (0, 10000), (1000, 0) ] counter = 0 for coord in abpath: counter += 1 print("Step %s of %s (%s)" % (counter, len(abpath), 100*counter/len(abpath))) set_target('A', coord[0], ser, output=False) set_target('B', coord[1], ser, output=False) gogogo(ser, wait=True, output=False) ser.close() reset(ser) def translate_xy_to_ab(coord): x = coord[0] y = coord[1] a_len = math.sqrt(x**2 + y**2) b_len = math.sqrt((MAX_WIDTH-x)**2 + y**2) return [a_len, b_len] def translate_ab_to_xy(lengths): a = lengths[0] b = lengths[1] # Cosine rule! #cos(left) = (a**2 + MAX_WIDTH**2 - b**2) / (2 * a * MAX_WIDTH) try: left_angle = math.acos((a**2 + MAX_WIDTH**2 - b**2) / (2 * a * MAX_WIDTH)) except Exception as e: # This specifically happens if the values just arn't a triangle! # i.e. consider maxwidth = 100, left length = 10, right = 10... one of # the wires must have broken! print("Not a triangle!") print((a**2 + MAX_WIDTH**2 - b**2) / (2 * a * MAX_WIDTH)) raise e #print(left_angle) # in radians, remember. # sin(left) = opp / hyp # cos(right) = adj / hyp # hyp is 'a' # Lack of precision here - chop to mm. Rounding 'down' y = int(math.sin(left_angle) * a) x = int(math.cos(left_angle) * a) return [x,y] # Math time MAX_WIDTH = 970 a_scale = 10000/130 b_scale = 10000/125 # 0,0 is furthest, then up is less (?) real_start_mm = (800,800) orig_length = (real_start_mm[0] * a_scale, real_start_mm[1] * b_scale) print(orig_length) xy_path = [ (500, 390), #(500,500), #(600,400), 'HOME' ] ab_path = [] for coord in xy_path: if coord=='HOME': movement = (0,0) else: short_ab_mm = translate_xy_to_ab(coord) #print(short_ab_mm) short_ab_steps = (short_ab_mm[0] * a_scale, short_ab_mm[1] * b_scale) #print(short_ab_steps) movement = (int(orig_length[0] - short_ab_steps[0]),int( orig_length[1] - short_ab_steps[1])) print("Going -> %s" % (movement,)) ab_path.append(movement) ser = serial.Serial('/dev/cu.usbserial-141210', baudrate=115200) # open serial port print(ser.name) reset(ser) get_debug(ser) counter = 0 for coord in ab_path: counter += 1 print("Step %s of %s (%s)" % (counter, len(ab_path), 100*counter/len(ab_path))) set_target('A', coord[0], ser, output=False) set_target('B', coord[1], ser, output=False) gogogo(ser, wait=True, output=False) ser.close() with open("spiro.json") as fp: paths = json.load(fp) MAX_WIDTH = 970 offset_x = 300 offset_y = 50 scale_x = 1.5 scale_y = 2 path_counter = 0 a_scale = 10000/130 b_scale = 10000/130 paths.append(('HOME',)) # 0,0 is furthest, then up is less (?) real_start_mm = (800,800) orig_length = (real_start_mm[0] * a_scale, real_start_mm[1] * b_scale) reset(ser) penup(ser) for xy_path in tqdm(paths): if len(xy_path) == 0: continue print("path %s (%s)" % (path_counter, 100*path_counter/len(paths))) path_counter += 1 ab_path = [] for coord in tqdm(xy_path): if coord=='HOME': movement = (0,0) else: coord = (offset_x + coord[0]*scale_x, offset_y + coord[1]*scale_y) short_ab_mm = translate_xy_to_ab(coord) #print(short_ab_mm) short_ab_steps = (short_ab_mm[0] * a_scale, short_ab_mm[1] * b_scale) #print(short_ab_steps) movement = (int(orig_length[0] - short_ab_steps[0]),int( orig_length[1] - short_ab_steps[1])) if movement[0] < 0 or movement[1] < 0: print("%s -> %s" % (coord, movement)) raise Exception("out of bounds") #print("Going -> %s" % (movement,)) ab_path.append(movement) #input("> PENUP !\r\n") penup(ser) set_target('A', ab_path[0][0], ser, output=False) set_target('B', ab_path[0][1], ser, output=False) gogogo(ser, wait=True, output=False) pendown(ser) counter = 0 for coord in tqdm(ab_path[1:]): counter += 1 #print("Step %s of %s (%s)" % (counter, len(ab_path), 100*counter/len(ab_path))) set_target('A', coord[0], ser, output=False) set_target('B', coord[1], ser, output=False) gogogo(ser, wait=True, output=False) penup(ser) #print(len(ab_path)) #print(int(offset_x + xy_path[0][0]*scale_x), int(offset_y + xy_path[0][1]*scale_y)) penup(ser) set_target('A',1000, ser, output=False) set_target('B',1000, ser, output=False) gogogo(ser) ser.close() reset(ser) def go_to_xy(target_coord,ser): target_lengths = translate_xy_to_ab(target_coord) travel_lengths = (reset_point[0] - target_lengths[0], reset_point[1] - target_lengths[1]) a_step_mm = 10000/125 b_step_mm = 10000/125 travel_steps = (int(travel_lengths[0] * a_step_mm), int(travel_lengths[1] * b_step_mm)) set_target("A", travel_steps[0], ser, output=True) set_target("B", travel_steps[1], ser, output=True) gogogo(ser, wait=True) reset(ser) path = [ (650, 400), (300, 400), (300, 150), (650, 150), (650, 400) ] for point in path: go_to_xy(point,ser) pendown(ser) ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png) # Automated Machine Learning _**Classification of credit card fraudulent transactions on remote compute **_ ## Contents 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Train](#Train) 1. [Results](#Results) 1. [Test](#Test) 1. [Acknowledgements](#Acknowledgements) ## Introduction In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge. This notebook is using remote compute to train the model. If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. In this notebook you will learn how to: 1. Create an experiment using an existing workspace. 2. Configure AutoML using `AutoMLConfig`. 3. Train the model using remote compute. 4. Explore the results. 5. Test the fitted model. ## Setup As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. ``` import logging from matplotlib import pyplot as plt import pandas as pd import os import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.core.dataset import Dataset from azureml.train.automl import AutoMLConfig ``` This sample notebook may use features that are not available in previous versions of the Azure ML SDK. ``` print("This notebook was created using version 1.22.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-classification-ccard-remote' experiment=Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ## Create or Attach existing AmlCompute A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource. #### Creation of AmlCompute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota. ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster cpu_cluster_name = "cpu-cluster-1" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2', max_nodes=6) compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) ``` # Data ### Load Data Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. ``` data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv" dataset = Dataset.Tabular.from_delimited_files(data) training_data, validation_data = dataset.random_split(percentage=0.8, seed=223) label_column_name = 'Class' ``` ## Train Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment. |Property|Description| |-|-| |**task**|classification or regression| |**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>| |**enable_early_stopping**|Stop the run if the metric score is not showing improvement.| |**n_cross_validations**|Number of cross validation splits.| |**training_data**|Input dataset, containing both features and label column.| |**label_column_name**|The name of the label column.| **_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric) ``` automl_settings = { "n_cross_validations": 3, "primary_metric": 'average_precision_score_weighted', "enable_early_stopping": True, "max_concurrent_iterations": 2, # This is a limit for testing purpose, please increase it as per cluster size "experiment_timeout_hours": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible "verbosity": logging.INFO, } automl_config = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', compute_target = compute_target, training_data = training_data, label_column_name = label_column_name, **automl_settings ) ``` Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous. ``` remote_run = experiment.submit(automl_config, show_output = False) # If you need to retrieve a run that already started, use the following code #from azureml.train.automl.run import AutoMLRun #remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>') remote_run ``` ## Results #### Widget for Monitoring Runs The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete. **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details ``` from azureml.widgets import RunDetails RunDetails(remote_run).show() remote_run.wait_for_completion(show_output=False) ``` #### Explain model Automated ML models can be explained and visualized using the SDK Explainability library. ## Analyze results ### Retrieve the Best Model Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. ``` best_run, fitted_model = remote_run.get_output() fitted_model ``` #### Print the properties of the model The fitted_model is a python object and you can read the different properties of the object. ## Test the fitted model Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values. ``` # convert the test data to dataframe X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe() y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe() # call the predict functions on the model y_pred = fitted_model.predict(X_test_df) y_pred ``` ### Calculate metrics for the prediction Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values from the trained model that was returned. ``` from sklearn.metrics import confusion_matrix import numpy as np import itertools cf =confusion_matrix(y_test_df.values,y_pred) plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest') plt.colorbar() plt.title('Confusion Matrix') plt.xlabel('Predicted') plt.ylabel('Actual') class_labels = ['False','True'] tick_marks = np.arange(len(class_labels)) plt.xticks(tick_marks,class_labels) plt.yticks([-0.5,0,1,1.5],['','False','True','']) # plotting text value inside cells thresh = cf.max() / 2. for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])): plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black') plt.show() ``` ## Acknowledgements This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project Please cite the following works: Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015 Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi) Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019 Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019
github_jupyter
``` import json import os from pprint import * from tqdm import * from utils.definitions import ROOT_DIR path_load = "mpd.v1/data/" #json folder path_save = ROOT_DIR + "/data/original/" #where to save csv playlist_fields = ['pid','name', 'collaborative', 'modified_at', 'num_albums', 'num_tracks', 'num_followers', 'num_tracks', 'num_edits', 'duration_ms', 'num_artists','description'] ### care, the description field is optional track_fields = ['tid', 'arid' , 'alid', 'track_uri', 'track_name', 'duration_ms'] album_fields = ['alid','album_uri','album_name'] artist_fields = ['arid','artist_uri','artist_name'] interaction_fields = ['pid','tid','pos'] interactions = [] playlists = [] tracks = [] artists = [] albums = [] count_files = 0 count_playlists = 0 count_interactions = 0 count_tracks = 0 count_artists = 0 count_albums = 0 dict_tracks = {} dict_artists = {} dict_albums = {} def process_mpd(path): global count_playlists global count_files filenames = os.listdir(path) for filename in tqdm(sorted(filenames)): if filename.startswith("mpd.slice.") and filename.endswith(".json"): fullpath = os.sep.join((path, filename)) f = open(fullpath) js = f.read() f.close() mpd_slice = json.loads(js) process_info(mpd_slice['info']) for playlist in mpd_slice['playlists']: process_playlist(playlist) pid = playlist['pid'] for track in playlist['tracks']: track['pid']=pid new = add_id_artist(track) if new: process_artist(track) new = add_id_album(track) if new: process_album(track) new = add_id_track(track) if new: process_track(track) process_interaction(track) count_playlists += 1 count_files +=1 show_summary() def process_info(value): #print (json.dumps(value, indent=3, sort_keys=False)) pass def add_id_track(track): global count_tracks if track['track_uri'] not in dict_tracks: dict_tracks[track['track_uri']] = count_tracks track['tid'] = count_tracks count_tracks += 1 return True else: track['tid'] = dict_tracks[track['track_uri']] return False def add_id_artist(track): global count_artists if track['artist_uri'] not in dict_artists: dict_artists[track['artist_uri']] = count_artists track['arid'] = count_artists count_artists += 1 return True else: track['arid'] = dict_artists[track['artist_uri']] return False def add_id_album(track): global count_albums if track['album_uri'] not in dict_albums: dict_albums[track['album_uri']] = count_albums track['alid'] = count_albums count_albums += 1 return True else: track['alid'] = dict_albums[track['album_uri']] return False def process_track(track): global track_fields info = [] for field in track_fields: info.append(track[field]) tracks.append(info) def process_album(track): global album_fields info = [] for field in album_fields: info.append(track[field]) albums.append(info) def process_artist(track): global artist_fields info = [] for field in artist_fields: info.append(track[field]) artists.append(info) def process_interaction(track): global interaction_fields global count_interactions info = [] for field in interaction_fields: info.append(track[field]) interactions.append(info) count_interactions +=1 def process_playlist(playlist): global playlist_fields if not 'description' in playlist: playlist['description'] = None info = [] for field in playlist_fields: info.append(playlist[field]) playlists.append(info) def show_summary(): print (count_files) print (count_playlists) print (count_tracks) print (count_artists) print (count_albums) print (count_interactions) process_mpd(path_load) import csv with open(path_save+"artists.csv", "w") as f: writer = csv.writer(f,delimiter = "\t",) writer.writerow(artist_fields) writer.writerows(artists) print ("artists.csv done") with open(path_save+"albums.csv", "w") as f: writer = csv.writer(f,delimiter = "\t",) writer.writerow(album_fields) writer.writerows(albums) print ("albums.csv done") with open(path_save+"interactions.csv", "w") as f: writer = csv.writer(f,delimiter = "\t",) writer.writerow(interaction_fields) writer.writerows(interactions) print ("interactions.csv done") with open(path_save+"tracks.csv", "w") as f: writer = csv.writer(f,delimiter = "\t",) writer.writerow(track_fields) writer.writerows(tracks) print ("tracks.csv done") with open(path_save+"playlists.csv", "w") as f: writer = csv.writer(f,delimiter = "\t",) writer.writerow(playlist_fields) writer.writerows(playlists) print ("playlists.csv done") ```
github_jupyter
Submitted by Shailender Joseph and Ashish Kumar Sinha __Importing Libraries__ ``` import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.metrics import accuracy_score, recall_score, roc_curve, auc from sklearn import model_selection from sklearn.base import TransformerMixin from sklearn.model_selection import KFold import matplotlib.pylab as plt import matplotlib.patches as patches from numpy import interp from joblib import dump import os import joblib import timeit from datetime import datetime cwd = os.getcwd() print(cwd) DT = 'DT' DT_long = 'Decision Tree' GNB = 'GNB' GNB_long = 'Naive Bayes' SVM = 'SVM' SVM_long = 'Support Vector Machine' ``` ### Data Preprocessing ``` # Replacing the 'unknown' values wit the most frequent values class DataFrameImputer(TransformerMixin): def __init__(self): """Impute missing values. Columns of dtype object are imputed with the most frequent value in column. Columns of other types are imputed with mean of column. """ def fit(self, X, y=None): self.fill = pd.Series([X[c].value_counts().index[0] if X[c].dtype == np.dtype('O') else X[c].mean() for c in X], index=X.columns) return self def transform(self, X, y=None): return X.fillna(self.fill) # Function to separate the object and non-object data and Impute the 'unknown' values def preprocess(df): df[['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'y']] = df[['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'y']].replace('unknown', np.NaN) bank_object_data = df.select_dtypes(include="object") bank_non_object_data = df.select_dtypes(exclude="object") bank_object_data = DataFrameImputer().fit_transform(bank_object_data) label = LabelEncoder() bank_object_data = bank_object_data.apply(label.fit_transform) bank_final = pd.concat([bank_object_data, bank_non_object_data], axis = 1) return(bank_final) df = pd.read_csv('bank-data/bank-additional-full.csv', sep = ';') bank_final = preprocess(df) ``` ## Function to output k-fold estimates of accuracy, recall, precision, f1, and roc_auc ``` def kfold_output(model, X, Y): #function for kfold output start = timeit.default_timer() begin = datetime.now() scoring = ['accuracy', 'recall', 'precision', 'f1', 'roc_auc'] kfold = KFold(n_splits=10, random_state=100, shuffle = True) results_kfold = model_selection.cross_validate(model, X, Y, scoring=scoring, cv=kfold) print("Recall: %0.2f (+/- %0.2f)" % (results_kfold['test_recall'].mean(), results_kfold['test_recall'].std())) print("Precision: %0.2f (+/- %0.2f)" % (results_kfold['test_precision'].mean(), results_kfold['test_precision'].std())) print("F1 Score: %0.2f (+/- %0.2f)" % (results_kfold['test_f1'].mean(), results_kfold['test_f1'].std())) print("Accuracy: %0.2f (+/- %0.2f)" % (results_kfold['test_accuracy'].mean(), results_kfold['test_accuracy'].std())) print("ROC_AUC: %0.2f (+/- %0.2f)" % (results_kfold['test_roc_auc'].mean(), results_kfold['test_roc_auc'].std())) print(results_kfold) stop = timeit.default_timer() end = datetime.now() print('Start Time: ', begin, 'Stop Time: ', end, 'Time Taken: ', stop - start) ``` ## Function to print roc curves for k-fold validation sets alongwith their AUC ``` def classifier_roc(classifier, X_train_res, y_train_res): start = timeit.default_timer() begin = datetime.now() cv = KFold(n_splits=10, random_state=100, shuffle = True) cv_split_filenames = [] tprs = [] aucs = [] mean_fpr = np.linspace(0, 1, 100) plt.figure(figsize=(10,10)) i = 1 file_name = '' if type(classifier) == type(DecisionTreeClassifier()): file_name = DT plot_title = DT_long elif type(classifier) == type(GaussianNB()): file_name = GNB plot_title = GNB_long else: file_name = SVM plot_title = SVM_long if not os.path.exists(plot_title): os.mkdir(cwd+"/"+plot_title) for train, test in cv.split(X_train_res, y_train_res): probas_ = classifier.fit(X_train_res.iloc[train], y_train_res.iloc[train]).predict_proba(X_train_res.iloc[test]) cv_split_filenames = cwd+"/"+plot_title + "/" + file_name + str(i) dump(probas_,cv_split_filenames) # Compute ROC curve and area the curve fpr, tpr, thresholds = roc_curve(y_train_res[test], probas_[:, 1]) tprs.append(interp(mean_fpr, fpr, tpr)) tprs[-1][0] = 0.0 roc_auc = auc(fpr, tpr) aucs.append(roc_auc) plt.plot(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc)) i += 1 plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8) mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(aucs) plt.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8) std_tpr = np.std(tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label=r'$\pm$ 1 std. dev.') plt.xlim([-0.01, 1.01]) plt.ylim([-0.01, 1.01]) plt.xlabel('False Positive Rate',fontsize=18) plt.ylabel('True Positive Rate',fontsize=18) plt.title('Cross-Validation ROC of ' + plot_title,fontsize=14) plt.legend(loc="lower right", prop={'size': 10}) plt.show() stop = timeit.default_timer() end = datetime.now() print('Start Time: ', begin, 'Stop Time: ', end, 'Time Taken: ', stop - start) ``` ## Function to plot roc curves using saved parameters ``` def plot_saved(directory,file_name, X_train_res,y_train_res): start = timeit.default_timer() begin = datetime.now() cv = KFold(n_splits=10, random_state=100, shuffle = True) tprs = [] aucs = [] mean_fpr = np.linspace(0, 1, 100) plt.figure(figsize=(10,10)) i = 1 for train, test in cv.split(X_train_res, y_train_res): probas_ = joblib.load(cwd +"/" + directory + "/" + file_name + str(i),mmap_mode = 'c') # Compute ROC curve and area the curve fpr, tpr, thresholds = roc_curve(y_train_res[test], probas_[:, 1]) tprs.append(interp(mean_fpr, fpr, tpr)) tprs[-1][0] = 0.0 roc_auc = auc(fpr, tpr) aucs.append(roc_auc) plt.plot(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc)) i += 1 plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',label='Chance', alpha=.8) mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(aucs) plt.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8) std_tpr = np.std(tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,label=r'$\pm$ 1 std. dev.') plt.xlim([-0.01, 1.01]) plt.ylim([-0.01, 1.01]) plt.xlabel('False Positive Rate',fontsize=18) plt.ylabel('True Positive Rate',fontsize=18) plt.title('Cross-Validation ROC of '+ directory,fontsize=14) plt.legend(loc="lower right", prop={'size': 10}) plt.show() stop = timeit.default_timer() end = datetime.now() print('Start Time: ', begin, 'Stop Time: ', end, 'Time Taken: ', stop - start) ``` ## Building dataset for training ``` X_train_res = bank_final.drop(['y'], axis = 1) y_train_res = bank_final['y'] ``` ## K-fold ouputs and ROC curve for Decision Tree classifier ``` dt_classifier = DecisionTreeClassifier(min_samples_split=60, min_samples_leaf=60, class_weight = 'balanced', random_state=20) kfold_output(dt_classifier,X_train_res,y_train_res) classifier_roc(dt_classifier, X_train_res, y_train_res) ``` ## ROC curve for Decision Tree classifier using saved parameters ``` plot_saved(DT_long, DT, X_train_res,y_train_res) ``` ## K-fold ouputs and ROC curve for Naive bayes classifier ``` nb_classifier = GaussianNB(priors = [0.11, 0.89]) kfold_output(nb_classifier,X_train_res,y_train_res) classifier_roc(nb_classifier, X_train_res, y_train_res) ``` ## ROC curve for Naive Bayes classifier using saved parameters ``` plot_saved(GNB_long, GNB, X_train_res,y_train_res) ``` ## K-fold ouputs and ROC curve for SVM with plynomial kernel ``` svm_classifier = SVC(kernel = 'poly', random_state = 0, class_weight = 'balanced') kfold_output(svm_classifier,X_train_res,y_train_res) roc_svm_classifier = SVC(kernel = 'poly', probability = True, random_state = 0, class_weight = 'balanced') classifier_roc(roc_svm_classifier, X_train_res, y_train_res) ``` ## ROC curve for SVM classifier using saved parameters ``` plot_saved(SVM_long, SVM, X_train_res,y_train_res) ``` __Among the models we observed that:__ __SVM with 'poly' kernal and 'balanced' class_weights gave:__ #Recall: 0.86 (+/- 0.02); Precision: 0.40 (+/- 0.01); #F1 Score: 0.54 (+/- 0.01); Accuracy: 0.84 (+/- 0.00); ROC_AUC: 0.92 (+/- 0.00) __Gaussian Naive Bayes with priors priors = [0.11, 0.89] gave:__ #Recall: 0.83 (+/- 0.02); Precision: 0.26 (+/- 0.01) #F1 Score: 0.40 (+/- 0.02); Accuracy: 0.72 (+/- 0.01); ROC_AUC: 0.86 (+/- 0.01) __The decision tree classifier with min_samples_split=60, min_samples_leaf=60, class_weight = 'balanced' gave:__ #Recall: 0.92 (+/- 0.01); Precision: 0.42 (+/- 0.01); F1 Score: 0.57 (+/- 0.01) #Accuracy: 0.85 (+/- 0.00); ROC_AUC: 0.94 (+/- 0.00) __Among the models the decision tree gave the best results as it had te highest recall, F1-score and ROC_AUC but SVM was a close second to decision tree.__
github_jupyter
# This Jupyter Notebook contains the full code needed to write the ColumnTransformer blog ## Import Necessary Packages ``` import pandas as pd import numpy as np from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from pytz import timezone ``` ## Import Data and some pre-transformation data prep ``` # read the csvs with waits and weather df = pd.read_csv('./data/dec2019.csv') weather_df = pd.read_csv('./data/dec2019weather.csv') # rename the columns df.columns = ['date_hour', 'wait_hrs'] # cut the date_hours to the hour (no minutes/seconds) and convert to string for merging df['date_hour'] = pd.to_datetime(df['date_hour'], utc=True).values.astype('datetime64[h]') df['date_hour'] = df['date_hour'].astype('str') # create dataframe of all possible departure hours in the month (as string for merging) # note that I chose to include non-ferry service hours at this stage dts = pd.DataFrame(columns=['date_hour']) dts['date_hour'] = pd.date_range(start='2019-12-01 00:00', end='2019-12-31 23:30', freq='H', ).astype('str') # merge/join the waits to the dataframe of all departures df_expanded = dts.merge(df, how='left', on='date_hour') # cast as datetime with timezone UTC df_expanded['date_hour'] = pd.to_datetime(df_expanded['date_hour'], utc=True) # adjust time to PST df_expanded['date_hour'] = [dt.astimezone(timezone('US/Pacific')) for dt in df_expanded['date_hour']] # remove non-sailing times (1 to 4 am for Edmonds (1-3 for Kingston)) df_expanded = df_expanded.set_index('date_hour') df_expanded = df_expanded.between_time('5:00', '00:59') # reset index for modeling df_expanded = df_expanded.reset_index() weather_df.columns = ['date', 'max_temp', 'avg_temp', 'min_temp'] weather_df['date'] = pd.to_datetime(weather_df['date']) df_expanded['date'] = pd.to_datetime(df_expanded['date_hour']).values.astype('datetime64[D]') df_expanded = df_expanded.merge(weather_df, how='left', on='date') df_expanded.head() ``` ## Simple Column Transformer Example ``` # a little cheating to extract the day of the week # and hour of the day w/out using a transformer # (see below for the "real" version) df_simple = df_expanded.copy() df_simple['weekday'] = [dt.weekday() for dt in df_simple['date_hour']] df_simple['hour'] = [dt.hour for dt in df_simple['date_hour']] df_simple.head() X = df_simple.drop(columns='wait_hrs') y = df_simple['wait_hrs'].fillna(value=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=111) # define column transformer and set n_jobs to have it run on all cores col_transformer = ColumnTransformer( transformers=[ ('ss', StandardScaler(), ['max_temp', 'avg_temp', 'min_temp']), ('ohe', OneHotEncoder(), ['weekday', 'hour'])], remainder='drop', n_jobs=-1 ) X_train_transformed = col_transformer.fit_transform(X_train) X_train_transformed lr = LinearRegression() pipe = Pipeline([ ("preprocessing", col_transformer), ("lr", lr) ]) pipe.fit(X_train, y_train) preds_train = pipe.predict(X_train) preds_test = pipe.predict(X_test) preds_train[0:5] preds_test[0:5] col_transformer.get_feature_names col_transformer.named_transformers_['ohe'].get_feature_names() for transformer in col_transformer.named_transformers_.values(): try: transformer.get_feature_names() except: print('SS col') else: print(transformer.get_feature_names()) ``` ## More complex column transformer example: imputing THEN standard scale/ohe ``` # define transformers si_0 = SimpleImputer(strategy='constant', fill_value=0) ss = StandardScaler() ohe = OneHotEncoder() # define column groups with same processing cat_vars = ['weekday', 'hour'] num_vars = ['max_temp', 'avg_temp', 'min_temp'] # set up pipelines for each column group categorical_pipe = Pipeline([ ('si_0', si_0), ('ohe', ohe) ]) numeric_pipe = Pipeline([ ('si_0', si_0), ('ss', ss) ]) # set up columnTransformer col_transformer = ColumnTransformer( transformers=[ ('nums', numeric_pipe, num_vars), ('cats', categorical_pipe, cat_vars) ], remainder='drop', n_jobs=-1 ) pipe = Pipeline([ ("preprocessing", col_transformer), ("lr", lr) ]) pipe.fit(X_train, y_train) preds_train = pipe.predict(X_train) preds_test = pipe.predict(X_test) preds_train[0:10] preds_test[0:10] col_transformer.named_transformers_['cats'].named_steps['ohe'].get_feature_names() ``` ## Create your own custom transformer ``` from sklearn.base import TransformerMixin, BaseEstimator class DateTransformer(TransformerMixin, BaseEstimator): """Extracts features from datetime column Returns: hour: hour day: Between 1 and the number of days in the given month of the given year. month: Between 1 and 12 inclusive. year: four-digit year weekday:day of the week as an integer, where Monday is 0 and Sunday is 6 """ def fit(self, x, y=None): return self def transform(self, x, y=None): result = pd.DataFrame(x, columns=['date_hour']) result['hour'] = [dt.hour for dt in result['date_hour']] result['day'] = [dt.day for dt in result['date_hour']] result['month'] = [dt.month for dt in result['date_hour']] result['year'] = [dt.year for dt in result['date_hour']] result['weekday'] = [dt.weekday() for dt in result['date_hour']] return result[['hour', 'day', 'month', 'year', 'weekday']] def get_feature_names(self): return ['hour','day', 'month', 'year', 'weekday'] X = df_expanded.drop(columns='wait_hrs') y = df_simple['wait_hrs'].fillna(value=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=111) X.head() time_preprocessing = Pipeline([ ('date', DateTransformer()), ('ohe', OneHotEncoder(categories='auto')) ]) ct = ColumnTransformer( transformers=[ ('ss', StandardScaler(), ['max_temp', 'avg_temp', 'min_temp']), ('date_exp', time_preprocessing, ['date_hour'])], remainder='drop', ) pipe = Pipeline([('preprocessor', ct), ('lr', lr)]) pipe.fit(X_train, y_train) preds_train = pipe.predict(X_train) preds_test = pipe.predict(X_test) lr.coef_ ct.named_transformers_['date_exp'].named_steps['ohe'].get_feature_names() ct.named_transformers_['date_exp'].named_steps['date'].get_feature_names() ``` ## Rare features with ColumnTransformer ``` df = pd.DataFrame() df['cat1'] = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1] df['cat2'] = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2] df['num1'] = [np.nan, 1, 1.1, .9, .8, np.nan, 2, 2.2, 1.5, np.nan] df['num2'] = [1.1, 1.1, 1.1, 1.1, 1.1, 1.2, 1.2, 1.2, 1.2, 1.2] target = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] X_train, X_test, y_train, y_test = train_test_split(df, target, random_state=111) num_pipe = Pipeline([ ('si', SimpleImputer(add_indicator=True)), ('ss', StandardScaler()) ]) ct = ColumnTransformer( transformers=[('ohe', OneHotEncoder(categories=[[0,1], [0,2]]), ['cat1', 'cat2']), ('numeric', num_pipe, ['num1', 'num2'])]) pipe = Pipeline([ ('preprocessor', ct), ('lr', lr) ]) pipe.fit(X_train, y_train) preds_train = pipe.predict(X_train) preds_test = pipe.predict(X_test) ct.fit_transform(X_train) ct.fit_transform(X_test) ```
github_jupyter
<a href="https://colab.research.google.com/github/athenian-ct-projects/Concert-Prep-Day-JL/blob/master/Concert_Prep_artist_monopoly_JL.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Artist-themed Monopoly for Concert Prep Day Jack L. '23 ``` #art fact function def fact(): import random x = random.randint(1,10) if x == 1: print("Bob Ross's 'Joy of Painting' TV series lasted for 31 seasons") if x == 2: print("In 1911 when the Mona Lisa was stolen from the Louvre, Pablo Picasso was one of the two primary suspects in the investigation \nbefore it was found out that an employee did it") if x == 3: print("Salvador Dalí thinks that he is a re-incarnation of his brother that died before he was born") if x == 4: print("Vincent van Gogh only ever sold one painting in his life") if x == 5: print("'The Last Supper' by Leonardo da Vinci originally featured Jesus's feet, but in 1652 when installing a door in the refectory where the painting is, \nthe feet were cut off") if x == 6: print("Vincent van Gogh's painting 'The Starry Night' is the view from a psychiatric hospital in France where van Gogh was staying when he painted it") if x == 7: print("The marble that was used for Michelangelo's 'David' was used by two other sculptors before Michelangelo") if x == 8: print("There are five versions of Edvard Munch’s 'The Scream'") if x == 9: print("Auguste Rodin’s 'The Thinker' originally was only 70cm until he later made an enlarged version") if x == 10: print("Andy Warhol's Campbell's Soup cans came in a set of thirty-two cans") #Rainbow paint bucket function def paint(): import random y = random.randint(1,10) if y == 1: print("HOORAY, advance to go collect $200") if y == 2: print("You commited tax fraud - go to jail. If you pass go do not collect $200.") if y == 3: print("You are a guest star on a game show. Collect $100 from the bank.") if y == 4: print("You drink a Sprite cranbery. Suddenly your door falls down and Lebron James walks in and hands you a fat stack of cash. Collect $500 from the bank.") if y == 5: print("Some guy blows up your house with a grenade launcher like in John Wick 2. Pay the bank $200.") if y == 6: print("The Great Depression happens again and your bank fails. Pay the bank all of your money (you can mortgage your artists to avoid going bankrupt).") if y == 7: print("You get in a car crash while wearng a VR headset and playing a flight simulator in the car, saying 'it will be like I am flying in a plane'. \nPay the bank $200 in medical fees") if y == 8: print("Your grandfather dies and he leaves you an inheritance. You assume his massive debt and pay the bank $500.") if y == 9: print("Your favorite NFL team wins the Super Bowl! Pay the bank $50 for the jersey you bought.") if y == 10: print("You win the lottery but spend it all on worthless stuff. Roll the dice again") #Instructions print("Welcome to Artist Monopoly!") print("This is just like regular monopoly but with some IMPORTANT twists:") print("To roll dice just ask siri to roll a pair of dice") print("*there are more spaces, and railroads have been replaced with more modern airlines") print("*there are auction spaces now. If you land on one you can buy any artist on the board but you have to pay double (only one artist each time you land on the spot).") print("*trading artists for money and other artists are encoureged but you can only propose a trade on your turn.") print("*chance spaces have been replaced by artist facts. If you land on that space, type the word 'fact' into the computer.") print("*community chests have been replaced by rainbow paint buckets. If you land on that space, type the word 'paint' into the computer.\n") print("IMPORTANT: When someone goes bankrupt, type the word 'player' into the computer.\n") player = int(input("How many people are playing today? ")) print("Alright you're ready to play. Everyone starts with $1500") #Gameplay while loop while player > 1: tip = input() if tip == "fact": fact() elif tip == "paint": paint() elif tip == "player": player = player - 1 else: print("You must have spelled somthing wrong. Try again.") #Final score calculating print("Looks like we have a winner! Now lets calculate your final score.") pig = int(input("How much money did the winner have in the end? ")) fig = 0 for z in range(1,pig): fig = fig + z print("The final score of the winner is:") print(fig) print("Thanks for playing!") ``` https://www.bocadolobo.com/blog/art/famous-artists-time/ https://www.mentalfloss.com/article/5838715-things-you-didnt-know-about-famous-art https://medium.com/@inna_13021/taxes-youll-have-to-pay-when-purchasing-or-selling-art-a418b958c457
github_jupyter
<img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> # Python for Finance **Analyze Big Financial Data** O'Reilly (2014) Yves Hilpisch <img style="border:0px solid grey;" src="http://hilpisch.com/python_for_finance.png" alt="Python for Finance" width="30%" align="left" border="0"> **Buy the book ** | <a href='http://shop.oreilly.com/product/0636920032441.do' target='_blank'>O'Reilly</a> | <a href='http://www.amazon.com/Yves-Hilpisch/e/B00JCYHHJM' target='_blank'>Amazon</a> **All book codes & IPYNBs** | <a href="http://oreilly.quant-platform.com">http://oreilly.quant-platform.com</a> **The Python Quants GmbH** | <a href='http://tpq.io' target='_blank'>http://tpq.io</a> **Contact us** | <a href='mailto:[email protected]'>[email protected]</a> # Input-Output Operations ``` from pylab import plt plt.style.use('ggplot') import matplotlib as mpl mpl.rcParams['font.family'] = 'serif' ``` ## Basic I/O with Python ### Writing Objects to Disk ``` path = './data/' import numpy as np from random import gauss a = [gauss(1.5, 2) for i in range(1000000)] # generation of normally distributed randoms import pickle pkl_file = open(path + 'data.pkl', 'wb') # open file for writing # Note: existing file might be overwritten %time pickle.dump(a, pkl_file) pkl_file pkl_file.close() ll $path* pkl_file = open(path + 'data.pkl', 'rb') # open file for reading %time b = pickle.load(pkl_file) b[:5] a[:5] np.allclose(np.array(a), np.array(b)) np.sum(np.array(a) - np.array(b)) pkl_file = open(path + 'data.pkl', 'wb') # open file for writing %time pickle.dump(np.array(a), pkl_file) %time pickle.dump(np.array(a) ** 2, pkl_file) pkl_file.close() ll $path* pkl_file = open(path + 'data.pkl', 'rb') # open file for reading x = pickle.load(pkl_file) x y = pickle.load(pkl_file) y pkl_file.close() pkl_file = open(path + 'data.pkl', 'wb') # open file for writing pickle.dump({'x' : x, 'y' : y}, pkl_file) pkl_file.close() pkl_file = open(path + 'data.pkl', 'rb') # open file for writing data = pickle.load(pkl_file) pkl_file.close() for key in data.keys(): print(key, data[key][:4]) !rm -f $path* ``` ### Reading and Writing Text Files ``` rows = 5000 a = np.random.standard_normal((rows, 5)) # dummy data a.round(4) import pandas as pd t = pd.date_range(start='2014/1/1', periods=rows, freq='H') # set of hourly datetime objects t csv_file = open(path + 'data.csv', 'w') # open file for writing header = 'date,no1,no2,no3,no4,no5\n' csv_file.write(header) for t_, (no1, no2, no3, no4, no5) in zip(t, a): s = '%s,%f,%f,%f,%f,%f\n' % (t_, no1, no2, no3, no4, no5) csv_file.write(s) csv_file.close() ll $path* csv_file = open(path + 'data.csv', 'r') # open file for reading for i in range(5): print(csv_file.readline(), end='') csv_file = open(path + 'data.csv', 'r') content = csv_file.readlines() for line in content[:5]: print(line, end='') csv_file.close() !rm -f $path* ``` ### SQL Databases ``` import sqlite3 as sq3 query = 'CREATE TABLE numbs (Date date, No1 real, No2 real)' con = sq3.connect(path + 'numbs.db') con.execute(query) con.commit() import datetime as dt con.execute('INSERT INTO numbs VALUES(?, ?, ?)', (dt.datetime.now(), 0.12, 7.3)) data = np.random.standard_normal((10000, 2)).round(5) for row in data: con.execute('INSERT INTO numbs VALUES(?, ?, ?)', (dt.datetime.now(), row[0], row[1])) con.commit() con.execute('SELECT * FROM numbs').fetchmany(10) pointer = con.execute('SELECT * FROM numbs') for i in range(3): print(pointer.fetchone()) con.close() !rm -f $path* ``` ### Writing and Reading Numpy Arrays ``` import numpy as np dtimes = np.arange('2015-01-01 10:00:00', '2021-12-31 22:00:00', dtype='datetime64[m]') # minute intervals len(dtimes) dty = np.dtype([('Date', 'datetime64[m]'), ('No1', 'f'), ('No2', 'f')]) data = np.zeros(len(dtimes), dtype=dty) data['Date'] = dtimes a = np.random.standard_normal((len(dtimes), 2)).round(5) data['No1'] = a[:, 0] data['No2'] = a[:, 1] %time np.save(path + 'array', data) # suffix .npy is added ll $path* %time np.load(path + 'array.npy') data = np.random.standard_normal((10000, 6000)) %time np.save(path + 'array', data) ll $path* %time np.load(path + 'array.npy') data = 0.0 !rm -f $path* ``` ## I/O with pandas ``` import numpy as np import pandas as pd data = np.random.standard_normal((1000000, 5)).round(5) # sample data set filename = path + 'numbs' ``` ### SQL Database ``` import sqlite3 as sq3 query = 'CREATE TABLE numbers (No1 real, No2 real,\ No3 real, No4 real, No5 real)' con = sq3.Connection(filename + '.db') con.execute(query) %%time con.executemany('INSERT INTO numbers VALUES (?, ?, ?, ?, ?)', data) con.commit() ll $path* %%time temp = con.execute('SELECT * FROM numbers').fetchall() print(temp[:2]) temp = 0.0 %%time query = 'SELECT * FROM numbers WHERE No1 > 0 AND No2 < 0' res = np.array(con.execute(query).fetchall()).round(3) res = res[::100] # every 100th result import matplotlib.pyplot as plt %matplotlib inline plt.plot(res[:, 0], res[:, 1], 'ro') plt.grid(True); plt.xlim(-0.5, 4.5); plt.ylim(-4.5, 0.5) # tag: scatter_query # title: Plot of the query result # size: 60 ``` ### From SQL to pandas ``` import pandas.io.sql as pds %time data = pds.read_sql('SELECT * FROM numbers', con) data.head() %time data[(data['No1'] > 0) & (data['No2'] < 0)].head() %%time res = data[['No1', 'No2']][((data['No1'] > 0.5) | (data['No1'] < -0.5)) & ((data['No2'] < -1) | (data['No2'] > 1))] plt.plot(res.No1, res.No2, 'ro') plt.grid(True); plt.axis('tight') # tag: data_scatter_1 # title: Scatter plot of complex query results # size: 55 h5s = pd.HDFStore(filename + '.h5s', 'w') %time h5s['data'] = data h5s h5s.close() %%time h5s = pd.HDFStore(filename + '.h5s', 'r') temp = h5s['data'] h5s.close() np.allclose(np.array(temp), np.array(data)) temp = 0.0 ll $path* ``` ### Data as CSV File ``` %time data.to_csv(filename + '.csv') ls data/ %%time pd.read_csv(filename + '.csv')[['No1', 'No2', 'No3', 'No4']].hist(bins=20) # tag: data_hist_3 # title: Histogram of 4 data sets # size: 60 ``` ### Data as Excel File ``` %time data[:100000].to_excel(filename + '.xlsx') %time pd.read_excel(filename + '.xlsx', 'Sheet1').cumsum().plot() # tag: data_paths # title: Paths of random data from Excel file # size: 60 ll $path* rm -f $path* ``` ## Fast I/O with PyTables ``` import numpy as np import tables as tb import datetime as dt import matplotlib.pyplot as plt %matplotlib inline ``` ### Working with Tables ``` filename = path + 'tab.h5' h5 = tb.open_file(filename, 'w') rows = 2000000 row_des = { 'Date': tb.StringCol(26, pos=1), 'No1': tb.IntCol(pos=2), 'No2': tb.IntCol(pos=3), 'No3': tb.Float64Col(pos=4), 'No4': tb.Float64Col(pos=5) } filters = tb.Filters(complevel=0) # no compression tab = h5.create_table('/', 'ints_floats', row_des, title='Integers and Floats', expectedrows=rows, filters=filters) tab pointer = tab.row ran_int = np.random.randint(0, 10000, size=(rows, 2)) ran_flo = np.random.standard_normal((rows, 2)).round(5) %%time for i in range(rows): pointer['Date'] = dt.datetime.now() pointer['No1'] = ran_int[i, 0] pointer['No2'] = ran_int[i, 1] pointer['No3'] = ran_flo[i, 0] pointer['No4'] = ran_flo[i, 1] pointer.append() # this appends the data and # moves the pointer one row forward tab.flush() tab ll $path* dty = np.dtype([('Date', 'S26'), ('No1', '<i4'), ('No2', '<i4'), ('No3', '<f8'), ('No4', '<f8')]) sarray = np.zeros(len(ran_int), dtype=dty) sarray %%time sarray['Date'] = dt.datetime.now() sarray['No1'] = ran_int[:, 0] sarray['No2'] = ran_int[:, 1] sarray['No3'] = ran_flo[:, 0] sarray['No4'] = ran_flo[:, 1] %%time h5.create_table('/', 'ints_floats_from_array', sarray, title='Integers and Floats', expectedrows=rows, filters=filters) h5 h5.remove_node('/', 'ints_floats_from_array') tab[:3] tab[:4]['No4'] %time np.sum(tab[:]['No3']) %time np.sum(np.sqrt(tab[:]['No1'])) %%time plt.hist(tab[:]['No3'], bins=30) plt.grid(True) print(len(tab[:]['No3'])) # tag: data_hist # title: Histogram of data # size: 60 %%time res = np.array([(row['No3'], row['No4']) for row in tab.where('((No3 < -0.5) | (No3 > 0.5)) \ & ((No4 < -1) | (No4 > 1))')])[::100] plt.plot(res.T[0], res.T[1], 'ro') plt.grid(True) # tag: scatter_data # title: Scatter plot of query result # size: 70 %%time values = tab.cols.No3[:] print("Max %18.3f" % values.max()) print("Ave %18.3f" % values.mean()) print("Min %18.3f" % values.min()) print("Std %18.3f" % values.std()) %%time results = [(row['No1'], row['No2']) for row in tab.where('((No1 > 9800) | (No1 < 200)) \ & ((No2 > 4500) & (No2 < 5500))')] for res in results[:4]: print(res) %%time results = [(row['No1'], row['No2']) for row in tab.where('(No1 == 1234) & (No2 > 9776)')] for res in results: print(res) ``` ### Working with Compressed Tables ``` filename = path + 'tab.h5c' h5c = tb.open_file(filename, 'w') filters = tb.Filters(complevel=4, complib='blosc') tabc = h5c.create_table('/', 'ints_floats', sarray, title='Integers and Floats', expectedrows=rows, filters=filters) %%time res = np.array([(row['No3'], row['No4']) for row in tabc.where('((No3 < -0.5) | (No3 > 0.5)) \ & ((No4 < -1) | (No4 > 1))')])[::100] %time arr_non = tab.read() %time arr_com = tabc.read() ll $path* h5c.close() ``` ### Working with Arrays ``` %%time arr_int = h5.create_array('/', 'integers', ran_int) arr_flo = h5.create_array('/', 'floats', ran_flo) h5 ll $path* h5.close() !rm -f $path* ``` ### Out-of-Memory Computations ``` filename = path + 'array.h5' h5 = tb.open_file(filename, 'w') n = 100 ear = h5.create_earray(h5.root, 'ear', atom=tb.Float64Atom(), shape=(0, n)) %%time rand = np.random.standard_normal((n, n)) for i in range(750): ear.append(rand) ear.flush() ear ear.size_on_disk out = h5.create_earray(h5.root, 'out', atom=tb.Float64Atom(), shape=(0, n)) expr = tb.Expr('3 * sin(ear) + sqrt(abs(ear))') # the numerical expression as a string object expr.set_output(out, append_mode=True) # target to store results is disk-based array %time expr.eval() # evaluation of the numerical expression # and storage of results in disk-based array out[0, :10] %time imarray = ear.read() # read whole array into memory import numexpr as ne expr = '3 * sin(imarray) + sqrt(abs(imarray))' ne.set_num_threads(16) %time ne.evaluate(expr)[0, :10] h5.close() !rm -f $path* ``` ## Conclusions ## Further Reading <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> <a href="http://tpq.io" target="_blank">http://tpq.io</a> | <a href="http://twitter.com/dyjh" target="_blank">@dyjh</a> | <a href="mailto:[email protected]">[email protected]</a> **Quant Platform** | <a href="http://quant-platform.com">http://quant-platform.com</a> **Python for Finance** | <a href="http://python-for-finance.com" target="_blank">Python for Finance @ O'Reilly</a> **Derivatives Analytics with Python** | <a href="http://derivatives-analytics-with-python.com" target="_blank">Derivatives Analytics @ Wiley Finance</a> **Listed Volatility and Variance Derivatives** | <a href="http://lvvd.tpq.io" target="_blank">Listed VV Derivatives @ Wiley Finance</a> **Python Training** | <a href="http://training.tpq.io" target="_blank">Python for Finance University Certificate</a>
github_jupyter
<a href="https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/keras_mnist_tpu.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ##### Copyright 2018 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` ## MNIST on TPU (Tensor Processing Unit)<br>or GPU using tf.Keras and tf.data.Dataset <table><tr><td><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/keras-tensorflow-tpu300px.png" width="300" alt="Keras+Tensorflow+Cloud TPU"></td></tr></table> ## Overview This sample trains an "MNIST" handwritten digit recognition model on a GPU or TPU backend using a Keras model. Data are handled using the tf.data.Datset API. This is a very simple sample provided for educational purposes. Do not expect outstanding TPU performance on a dataset as small as MNIST. This notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select **File > View** on GitHub. ## Learning objectives In this notebook, you will learn how to: * Authenticate in Colab to access Google Cloud Storage (GSC) * Format and prepare a dataset using tf.data.Dataset * Create convolutional and dense layers using tf.keras.Sequential * Build a Keras classifier with softmax, cross-entropy, and the adam optimizer * Run training and validation in Keras using Cloud TPU * Export a model for serving from ML Engine * Deploy a trained model to ML Engine * Test predictions on a deployed model ## Instructions <h3><a href="https://cloud.google.com/gpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/gpu-hexagon.png" width="50"></a> &nbsp;&nbsp;Train on GPU or TPU&nbsp;&nbsp; <a href="https://cloud.google.com/tpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png" width="50"></a></h3> 1. Select a GPU or TPU backend (Runtime > Change runtime type) 1. Runtime > Run All <br/>(Watch out: the "Colab-only auth" cell requires user input. <br/>The "Deploy" part at the end requires cloud project and bucket configuration.) <h3><a href="https://cloud.google.com/ml-engine/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/mlengine-hexagon.png" width="50"></a> &nbsp;&nbsp;Deploy to AI Platform</h3> At the bottom of this notebook you can deploy your trained model to AI Platform for a serverless, autoscaled, REST API experience. You will need a Google Cloud project and a GCS (Google Cloud Storage) bucket for this last part. TPUs are located in Google Cloud, for optimal performance, they read data directly from Google Cloud Storage. ### Imports ``` import os, re, time, json import PIL.Image, PIL.ImageFont, PIL.ImageDraw import numpy as np import tensorflow as tf from matplotlib import pyplot as plt print("Tensorflow version " + tf.__version__) #@title visualization utilities [RUN ME] """ This cell contains helper functions used for visualization and downloads only. You can skip reading it. There is very little useful Keras/Tensorflow code here. """ # Matplotlib config plt.rc('image', cmap='gray_r') plt.rc('grid', linewidth=0) plt.rc('xtick', top=False, bottom=False, labelsize='large') plt.rc('ytick', left=False, right=False, labelsize='large') plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white') plt.rc('text', color='a8151a') plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf") # pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO) def dataset_to_numpy_util(training_dataset, validation_dataset, N): # get one batch from each: 10000 validation digits, N training digits batch_train_ds = training_dataset.apply(tf.data.experimental.unbatch()).batch(N) # eager execution: loop through datasets normally if tf.executing_eagerly(): for validation_digits, validation_labels in validation_dataset: validation_digits = validation_digits.numpy() validation_labels = validation_labels.numpy() break for training_digits, training_labels in batch_train_ds: training_digits = training_digits.numpy() training_labels = training_labels.numpy() break else: v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next() t_images, t_labels = batch_train_ds.make_one_shot_iterator().get_next() # Run once, get one batch. Session.run returns numpy results with tf.Session() as ses: (validation_digits, validation_labels, training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels]) # these were one-hot encoded in the dataset validation_labels = np.argmax(validation_labels, axis=1) training_labels = np.argmax(training_labels, axis=1) return (training_digits, training_labels, validation_digits, validation_labels) # create digits from local fonts for testing def create_digits_from_local_fonts(n): font_labels = [] img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1 font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25) font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25) d = PIL.ImageDraw.Draw(img) for i in range(n): font_labels.append(i%10) d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2) font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded) font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28]) return font_digits, font_labels # utility to display a row of digits with their predictions def display_digits(digits, predictions, labels, title, n): plt.figure(figsize=(13,3)) digits = np.reshape(digits, [n, 28, 28]) digits = np.swapaxes(digits, 0, 1) digits = np.reshape(digits, [28, 28*n]) plt.yticks([]) plt.xticks([28*x+14 for x in range(n)], predictions) for i,t in enumerate(plt.gca().xaxis.get_ticklabels()): if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red plt.imshow(digits) plt.grid(None) plt.title(title) # utility to display multiple rows of digits, sorted by unrecognized/recognized status def display_top_unrecognized(digits, predictions, labels, n, lines): idx = np.argsort(predictions==labels) # sort order: unrecognized first for i in range(lines): display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n], "{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n) # utility to display training and validation curves def display_training_curves(training, validation, title, subplot): if subplot%10==1: # set up the subplots on the first call plt.subplots(figsize=(10,10), facecolor='#F0F0F0') plt.tight_layout() ax = plt.subplot(subplot) ax.grid(linewidth=1, color='white') ax.plot(training) ax.plot(validation) ax.set_title('model '+ title) ax.set_ylabel(title) ax.set_xlabel('epoch') ax.legend(['train', 'valid.']) ``` *(you can double-click on collapsed cells to view the non-essential code inside)* ### Colab-only auth for this notebook and the TPU ``` IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ # this is always set on Colab, the value is 0 or 1 depending on GPU presence if IS_COLAB_BACKEND: from google.colab import auth # Authenticates the Colab machine and also the TPU using your # credentials so that they can access your private GCS buckets. auth.authenticate_user() ``` ### TPU or GPU detection ``` # Detect hardware try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection except ValueError: tpu = None gpus = tf.config.experimental.list_logical_devices("GPU") # Select appropriate distribution strategy if tpu: tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu, steps_per_run=128) # Going back and forth between TPU and host is expensive. Better to run 128 batches on the TPU before reporting back. print('Running on TPU ', tpu.cluster_spec().as_dict()['worker']) elif len(gpus) > 1: strategy = tf.distribute.MirroredStrategy([gpu.name for gpu in gpus]) print('Running on multiple GPUs ', [gpu.name for gpu in gpus]) elif len(gpus) == 1: strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU print('Running on single GPU ', gpus[0].name) else: strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU print('Running on CPU') print("Number of accelerators: ", strategy.num_replicas_in_sync) ``` ### Parameters ``` BATCH_SIZE = 64 * strategy.num_replicas_in_sync # Gobal batch size. # The global batch size will be automatically sharded across all # replicas by the tf.data.Dataset API. A single TPU has 8 cores. # The best practice is to scale the batch size by the number of # replicas (cores). The learning rate should be increased as well. LEARNING_RATE = 0.01 LEARNING_RATE_EXP_DECAY = 0.6 if strategy.num_replicas_in_sync == 1 else 0.7 # Learning rate computed later as LEARNING_RATE * LEARNING_RATE_EXP_DECAY**epoch # 0.7 decay instead of 0.6 means a slower decay, i.e. a faster learnign rate. training_images_file = 'gs://mnist-public/train-images-idx3-ubyte' training_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte' validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte' validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte' ``` ### tf.data.Dataset: parse files and prepare training and validation datasets Please read the [best practices for building](https://www.tensorflow.org/guide/performance/datasets) input pipelines with tf.data.Dataset ``` def read_label(tf_bytestring): label = tf.io.decode_raw(tf_bytestring, tf.uint8) label = tf.reshape(label, []) label = tf.one_hot(label, 10) return label def read_image(tf_bytestring): image = tf.io.decode_raw(tf_bytestring, tf.uint8) image = tf.cast(image, tf.float32)/255.0 image = tf.reshape(image, [28*28]) return image def load_dataset(image_file, label_file): imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16) imagedataset = imagedataset.map(read_image, num_parallel_calls=16) labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8) labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16) dataset = tf.data.Dataset.zip((imagedataset, labelsdataset)) return dataset def get_training_dataset(image_file, label_file, batch_size): dataset = load_dataset(image_file, label_file) dataset = dataset.cache() # this small dataset can be entirely cached in RAM dataset = dataset.shuffle(5000, reshuffle_each_iteration=True) dataset = dataset.repeat() # Mandatory for Keras for now dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed dataset = dataset.prefetch(-1) # fetch next batches while training on the current one (-1: autotune prefetch buffer size) return dataset def get_validation_dataset(image_file, label_file): dataset = load_dataset(image_file, label_file) dataset = dataset.cache() # this small dataset can be entirely cached in RAM dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch dataset = dataset.repeat() # Mandatory for Keras for now return dataset # instantiate the datasets training_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE) validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file) ``` ### Let's have a look at the data ``` N = 24 (training_digits, training_labels, validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N) display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N) display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N) font_digits, font_labels = create_digits_from_local_fonts(N) ``` ### Keras model: 3 convolutional layers, 2 dense layers If you are not sure what cross-entropy, dropout, softmax or batch-normalization mean, head here for a crash-course: [Tensorflow and deep learning without a PhD](https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/#featured-code-sample) ``` # This model trains to 99.4% accuracy in 10 epochs (with a batch size of 64) def make_model(): model = tf.keras.Sequential( [ tf.keras.layers.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1), name="image"), tf.keras.layers.Conv2D(filters=12, kernel_size=3, padding='same', use_bias=False), # no bias necessary before batch norm tf.keras.layers.BatchNormalization(scale=False, center=True), # no batch norm scaling necessary before "relu" tf.keras.layers.Activation('relu'), # activation after batch norm tf.keras.layers.Conv2D(filters=24, kernel_size=6, padding='same', use_bias=False, strides=2), tf.keras.layers.BatchNormalization(scale=False, center=True), tf.keras.layers.Activation('relu'), tf.keras.layers.Conv2D(filters=32, kernel_size=6, padding='same', use_bias=False, strides=2), tf.keras.layers.BatchNormalization(scale=False, center=True), tf.keras.layers.Activation('relu'), tf.keras.layers.Flatten(), tf.keras.layers.Dense(200, use_bias=False), tf.keras.layers.BatchNormalization(scale=False, center=True), tf.keras.layers.Activation('relu'), tf.keras.layers.Dropout(0.4), # Dropout on dense layer only tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', # learning rate will be set by LearningRateScheduler loss='categorical_crossentropy', metrics=['accuracy']) return model with strategy.scope(): model = make_model() # print model layers model.summary() # set up learning rate decay lr_decay = tf.keras.callbacks.LearningRateScheduler( lambda epoch: LEARNING_RATE * LEARNING_RATE_EXP_DECAY**epoch, verbose=True) ``` ### Train and validate the model ``` EPOCHS = 10 steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset print("Steps per epoch: ", steps_per_epoch) # Little wrinkle: in the present version of Tensorfow (1.14), switching a TPU # between training and evaluation is slow (approx. 10 sec). For small models, # it is recommeneded to run a single eval at the end. history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, callbacks=[lr_decay]) final_stats = model.evaluate(validation_dataset, steps=1) print("Validation accuracy: ", final_stats[1]) ``` ### Visualize predictions ``` # recognize digits from local fonts probabilities = model.predict(font_digits, steps=1) predicted_labels = np.argmax(probabilities, axis=1) display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N) # recognize validation digits probabilities = model.predict(validation_digits, steps=1) predicted_labels = np.argmax(probabilities, axis=1) display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7) ``` ## Deploy the trained model to AI Platform model serving Push your trained model to production on AI Platform for a serverless, autoscaled, REST API experience. You will need a GCS (Google Cloud Storage) bucket and a GCP project for this. Models deployed on AI Platform autoscale to zero if not used. There will be no AI Platform charges after you are done testing. Google Cloud Storage incurs charges. Empty the bucket after deployment if you want to avoid these. Once the model is deployed, the bucket is not useful anymore. ### Configuration ``` PROJECT = "" #@param {type:"string"} BUCKET = "gs://" #@param {type:"string", default:"jddj"} NEW_MODEL = True #@param {type:"boolean"} MODEL_NAME = "mnist" #@param {type:"string"} MODEL_VERSION = "v1" #@param {type:"string"} assert PROJECT, 'For this part, you need a GCP project. Head to http://console.cloud.google.com/ and create one.' assert re.search(r'gs://.+', BUCKET), 'For this part, you need a GCS bucket. Head to http://console.cloud.google.com/storage and create one.' ``` ### Export the model for serving from AI Platform ``` # Wrap the model so that we can add a serving function class ExportModel(tf.keras.Model): def __init__(self, model): super().__init__(self) self.model = model # The serving function performig data pre- and post-processing. # Pre-processing: images are received in uint8 format converted # to float32 before being sent to through the model. # Post-processing: the Keras model outputs digit probabilities. We want # the detected digits. An additional tf.argmax is needed. # @tf.function turns the code in this function into a Tensorflow graph that # can be exported. This way, the model itself, as well as its pre- and post- # processing steps are exported in the SavedModel and deployed in a single step. @tf.function(input_signature=[tf.TensorSpec([None, 28*28], dtype=tf.uint8)]) def my_serve(self, images): images = tf.cast(images, tf.float32)/255 # pre-processing probabilities = self.model(images) # prediction from model classes = tf.argmax(probabilities, axis=-1) # post-processing return {'digits': classes} # Must copy the model from TPU to CPU to be able to compose them. restored_model = make_model() restored_model.set_weights(model.get_weights()) # this copies the weights from TPU, does nothing on GPU # create the ExportModel and export it to the Tensorflow standard SavedModel format serving_model = ExportModel(restored_model) export_path = os.path.join(BUCKET, 'keras_export', str(time.time())) tf.keras.backend.set_learning_phase(0) # inference only tf.saved_model.save(serving_model, export_path, signatures={'serving_default': serving_model.my_serve}) print("Model exported to: ", export_path) # Note: in Tensorflow 2.0, it will also be possible to # export to the SavedModel format using model.save(): # serving_model.save(export_path, save_format='tf') # saved_model_cli: a useful too for troubleshooting SavedModels (the tool is part of the Tensorflow installation) !saved_model_cli show --dir {export_path} !saved_model_cli show --dir {export_path} --tag_set serve !saved_model_cli show --dir {export_path} --tag_set serve --signature_def serving_default # A note on naming: # The "serve" tag set (i.e. serving functionality) is the only one exported by tf.saved_model.save # All the other names are defined by the user in the fllowing lines of code: # def myserve(self, images): # ****** # return {'digits': classes} # ****** # tf.saved_model.save(..., signatures={'serving_default': serving_model.myserve}) # *************** ``` ### Deploy the model This uses the command-line interface. You can do the same thing through the AI Platform UI at https://console.cloud.google.com/mlengine/models ``` # Create the model if NEW_MODEL: !gcloud ai-platform models create {MODEL_NAME} --project={PROJECT} --regions=us-central1 # Create a version of this model (you can add --async at the end of the line to make this call non blocking) # Additional config flags are available: https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions # You can also deploy a model that is stored locally by providing a --staging-bucket=... parameter !echo "Deployment takes a couple of minutes. You can watch your deployment here: https://console.cloud.google.com/mlengine/models/{MODEL_NAME}" !gcloud ai-platform versions create {MODEL_VERSION} --model={MODEL_NAME} --origin={export_path} --project={PROJECT} --runtime-version=1.14 --python-version=3.5 ``` ### Test the deployed model Your model is now available as a REST API. Let us try to call it. The cells below use the "gcloud ml-engine" command line tool but any tool that can send a JSON payload to a REST endpoint will work. ``` # prepare digits to send to online prediction endpoint digits_float32 = np.concatenate((font_digits, validation_digits[:100-N])) # pixel values in [0.0, 1.0] float range digits_uint8 = np.round(digits_float32*255).astype(np.uint8) # pixel values in [0, 255] int range labels = np.concatenate((font_labels, validation_labels[:100-N])) with open("digits.json", "w") as f: for digit in digits_uint8: # the format for AI Platform online predictions is: one JSON object per line data = json.dumps({"images": digit.tolist()}) # "images" because that was the name you gave this parametr in the serving funtion my_serve f.write(data+'\n') # Request online predictions from deployed model (REST API) using the "gcloud ml-engine" command line. predictions = !gcloud ai-platform predict --model={MODEL_NAME} --json-instances digits.json --project={PROJECT} --version {MODEL_VERSION} print(predictions) predictions = np.stack([json.loads(p) for p in predictions[1:]]) # first elemet is the name of the output layer: drop it, parse the rest display_top_unrecognized(digits_float32, predictions, labels, N, 100//N) ``` ## What's next * Learn about [Cloud TPUs](https://cloud.google.com/tpu/docs) that Google designed and optimized specifically to speed up and scale up ML workloads for training and inference and to enable ML engineers and researchers to iterate more quickly. * Explore the range of [Cloud TPU tutorials and Colabs](https://cloud.google.com/tpu/docs/tutorials) to find other examples that can be used when implementing your ML project. On Google Cloud Platform, in addition to GPUs and TPUs available on pre-configured [deep learning VMs](https://cloud.google.com/deep-learning-vm/), you will find [AutoML](https://cloud.google.com/automl/)*(beta)* for training custom models without writing code and [Cloud ML Engine](https://cloud.google.com/ml-engine/docs/) which will allows you to run parallel trainings and hyperparameter tuning of your custom models on powerful distributed hardware. ## License --- author: Martin Gorner<br> twitter: @martin_gorner --- Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --- This is not an official Google product but sample code provided for an educational purpose
github_jupyter
# 第6章 スモール言語を作る ``` # !pip install pegtree import pegtree as pg from pegtree.colab import peg, pegtree, example %%peg Program = { // 開式非終端記号 Expression* #Program } EOF EOF = !. // ファイル終端 Expression = / FuncDecl // 関数定義 / VarDecl // 変数定義 / IfExpr // if 式 / Binary // 二項演算 ``` import pegtree as pg peg = pg.grammar('chibi.pegtree') parser = pg.generate(peg) ## 6.2.5 パーザの生成 ``` import pegtree as pg peg = pg.grammar('chibi.pegtree') parser = pg.generate(peg) ``` ## トランスコンパイラ ``` class Visitor(object): def visit(self, tree): tag = tree.getTag() name = f'accept{tag}' if hasattr(self, name): # accept メソッドがあるか調べる # メソッド名からメソッドを得る acceptMethod = getattr(self, name) return acceptMethod(tree) print(f'TODO: accept{tag}') return None class Compiler(Visitor): # Visitor クラスの継承 def __init__(self): self.buffers = [] peg = pg.grammar('chibi.pegtree') self.parser = pg.generate(peg) def compile(self, source): tree = self.parser(source) # 構文木に交換 self.buffers = [] # バッファの初期化 self.visit(tree) return ''.join(self.buffers) # バッファを連結してソースコードにまとめる def push(self, s): # コード片をバッファに追加 self.buffers.append(s) c = Compiler() code = c.compile('1+2*3') print(code) ``` ### 各ノードのコード変換 ``` BUILTIN_FUNCTIONS = { 'print': 'console.log' } class Compiler(Visitor): # Visitor クラスの継承 def __init__(self): self.buffers = [] peg = pg.grammar('chibi.pegtree') self.parser = pg.generate(peg) def compile(self, source): tree = self.parser(source) # 構文木に交換 self.buffers = [] # バッファの初期化 self.visit(tree) return ''.join(self.buffers) # バッファを連結してソースコードにまとめる def push(self, s): # コード片をバッファに追加 self.buffers.append(s) def acceptProgram(self, tree): for child in tree: # 子ノードのリスト self.visit(child) # 子ノードの変換 self.push('\n') # 改行をバッファに追加 def acceptInt(self, tree): v = tree.getToken() self.push(v) def acceptName(self, tree): name = tree.getToken() self.push(name) def acceptAdd(self, tree): self.push('(') self.visit(tree[0]) self.push('+') self.visit(tree[1]) self.push(')') def acceptEq(self, tree): self.push('(') self.visit(tree[0]) self.push('===') self.visit(tree[1]) self.push(') ? 1 : 0') def acceptFuncApp(self, tree): f = tree.getToken(0) self.push(BUILTIN_FUNCTIONS.get(f, f)) self.push('(') self.visit(tree[1]) self.push(')') def accepterr(self, tree): print(repr(tree)) c = Compiler() code = c.compile(''' f(x) = x+1 print(x) ''') print(code) ``` ## インタプリタ ``` class Interpreter(Visitor): def __init__(self): self.env = {} # 空の環境を用意する peg = pg.grammar('chibi.pegtree') self.parser = pg.generate(peg) def eval(self, source): tree = self.parser(source) return self.visit(tree) chibi = Interpreter() source = input('>>> ') while source != '': result = chibi.eval(source) print(result) source = input('>>> ') class Interpreter(Visitor): def __init__(self): self.env = {} # 空の環境を用意する peg = pg.grammar('chibi.pegtree') self.parser = pg.generate(peg) def eval(self, source): tree = self.parser(source) return self.visit(tree) def acceptProgram(self, tree): result = None for child in tree: result = self.visit(child) return result def acceptInt(self, tree): token = tree.getToken() return int(token) def acceptAdd(self, tree): v0 = self.visit(tree[0]) v1 = self.visit(tree[1]) return v0 + v1 def acceptEq(self, tree): v0 = self.visit(tree[0]) v1 = self.visit(tree[1]) return 1 if v0 == v1 else 0 def acceptIfExpr(self, tree): v0 = self.visit(tree[0]) if v0 != 0: return self.visit(tree[1]) else: return self.visit(tree[2]) def acceptVarDecl(self, tree): v = self.visit(tree[1]) x = str(tree[0]) self.env[x] = v return v def acceptName(self, t): x = t.getToken() if x in self.env: return self.env[x] else: raise NameError(x) def acceptFuncDecl(self, tree): f = tree.getToken(0) x = tree.getToken(1) e = tree.get(2) self.env[f] = (x, e) return self.env[f] def acceptFuncApp(self, tree): f = tree.getToken(0) # 関数名を得る v = self.visit(tree[1]) # 引数を先に評価 x, e = self.env[f] # 関数名から引数名と関数式を取り出す self.env[x] = v # 環境xをvにする v = self.visit(e) # 関数式を評価 return v source = ''' fib(n) = if n < 3 then 1 else fib(n-1)+fib(n-2) fib(4) ''' c = Interpreter() print(c.eval(source)) ```
github_jupyter
# SQL in Python ### Packages - [Pandas.read_sql](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html) - [SQLite3](https://docs.python.org/3.6/library/sqlite3.html) ### Tutorials - https://www.tutorialspoint.com/sqlite/sqlite_python.htm - https://www.pythoncentral.io/introduction-to-sqlite-in-python/ - https://medium.com/swlh/reproducing-sql-queries-in-python-codes-35d90f716b1a - http://www.sqlitetutorial.net/sqlite-python/ ### Create a SQL database connection to a sample SQL database, and read records from that database Structured Query Language (SQL) is an [ANSI specified](https://docs.oracle.com/database/121/SQLRF/ap_standard_sql001.htm#SQLRF55514), powerful format for interacting with large databases efficiently. **SQLite** is a lightweight and somewhat restricted version of SQL. ``` # Imports import sqlite3 as sq3 import pandas.io.sql as pds import pandas as pd ``` ### Database connections Our first step will be to create a connection to our SQL database. A few common SQL databases used with Python include: - Microsoft SQL Server - Postgres - MySQL - AWS Redshift - AWS Aurora - Oracle DB - Terradata - Db2 Family - Many, many others Each of these databases will require a slightly different setup, and may require credentials (username & password), tokens, or other access requirements. We'll be using `sqlite3` to connect to our database, but other connection packages include: - [`SQLAlchemy`](https://www.sqlalchemy.org/) (most common) - [`psycopg2`](http://initd.org/psycopg/) - [`MySQLdb`](http://mysql-python.sourceforge.net/MySQLdb.html) ## Classic Rock Database ``` # Initialize path to SQLite database path = 'databases/classic_rock.db' con = sq3.Connection(path) # We now have a live connection to our SQL database ``` ### Reading data Now that we've got a connection to our database, we can perform queries, and load their results in as Pandas DataFrames ``` # Write the query query = ''' SELECT * FROM rock_songs; ''' # Execute the query observations = pds.read_sql(query, con) observations.head() # We can also run any supported SQL query # Write the query query = ''' SELECT Artist, Release_Year, COUNT(*) AS num_songs, AVG(PlayCount) AS avg_plays FROM rock_songs GROUP BY Artist, Release_Year ORDER BY num_songs desc; ''' # Execute the query observations = pds.read_sql(query, con) observations.head() ``` ### Common parameters There are a number of common paramters that can be used to read in SQL data with formatting: - **coerce_float**: Attempt to force numbers into floats - **parse_dates**: List of columns to parse as dates - **chunksize**: Number of rows to include in each chunk ``` query=''' SELECT Artist, Release_Year, COUNT(*) AS num_songs, AVG(PlayCount) AS avg_plays FROM rock_songs GROUP BY Artist, Release_Year ORDER BY num_songs desc; ''' # Execute the query observations_generator = pds.read_sql(query, con, coerce_float=True, # Doesn't effect this dataset, because floats were correctly parsed parse_dates=['Release_Year'], # Parse `Release_Year` as a date chunksize=5 # Allows for streaming results as a series of shorter tables ) for index, observations in enumerate(observations_generator): if index < 5: print(f'Observations index: {index}'.format(index)) display(observations) ``` ### Baseball Database Example ``` # Create a variable, `path`, containing the path to the `baseball.db` contained in `resources/` path = 'databases/baseball.db' # Create a connection, `con`, that is connected to database at `path` con = sq3.Connection(path) # Create a variable, tables, which reads in all data from the table sqlite_master all_tables = pd.read_sql('SELECT * FROM sqlite_master', con) all_tables # Displaying all tables in database pd.read_sql("select name from sqlite_master where type = 'table';", con) # Create a variable, `query`, containing a SQL query which reads in all data from the `` table query = """ SELECT * FROM allstarfull ; """ allstar_observations = pd.read_sql(query, con) allstar_observations.head() best_query = """ SELECT playerID, sum(GP) AS num_games_played, AVG(startingPos) AS avg_starting_position FROM allstarfull GROUP BY playerID ORDER BY num_games_played DESC, avg_starting_position ASC LIMIT 3 """ best = pd.read_sql(best_query, con) best.head() ``` ### Artists Database Example ``` conn = sq3.connect("databases/artists.sqlite") # Displaying all tables in database pd.read_sql("select name from sqlite_master where type = 'table';", conn) query="SELECT * FROM artists" music_reviews = pd.read_sql_query(query, conn) music_reviews.tail() query1="SELECT * FROM artists WHERE artist='kleenex'" result=pd.read_sql_query(query1, conn) result # conn.commit() conn.close() ``` |![head.png](imgs/head.png)|![head.png](imgs/head.png)| |---|---| |![reading_nosql.png](imgs/reading_nosql.png)|![reading_online.png](imgs/reading_online.png)|
github_jupyter
# Data Space Report <img src="images/polito_logo.png" alt="Polito Logo" style="width: 200px;"/> ## Pittsburgh Bridges Data Set <img src="images/andy_warhol_bridge.jpg" alt="Andy Warhol Bridge" style="width: 200px;"/> Andy Warhol Bridge - Pittsburgh. Report created by Student Francesco Maria Chiarlo s253666, for A.A 2019/2020. **Abstract**:The aim of this report is to evaluate the effectiveness of distinct, different statistical learning approaches, in particular focusing on their characteristics as well as on their advantages and backwards when applied onto a relatively small dataset as the one employed within this report, that is Pittsburgh Bridgesdataset. **Key words**:Statistical Learning, Machine Learning, Bridge Design. ### Imports Section <a class="anchor" id="imports-section"></a> ``` from utils.all_imports import *; %matplotlib inline # Some global script variables # --------------------------------------------------------------------------- # dataset_path, dataset_name, column_names, TARGET_COL = \ get_dataset_location() # Info Data to be fetched estimators_list, estimators_names = get_estimators() # Estimator to be trained dataset, feature_vs_values = load_brdiges_dataset(dataset_path, dataset_name) # variables used for pass through arrays used to store results pos_gs = 0; pos_cv = 0 # Array used for storing graphs plots_names = list(map(lambda xi: f"{xi}_learning_curve.png", estimators_names)) pca_kernels_list = ['linear', 'poly', 'rbf', 'cosine', 'sigmoid'] cv_list = list(range(10, 1, -1)) columns_2_avoid = ['ERECTED', 'LENGTH', 'LOCATION'] # Make distinction between Target Variable and Predictors # --------------------------------------------------------------------------- # rescaledX, y, columns = prepare_data_for_train(dataset, target_col=TARGET_COL) ``` ## Pricipal Component Analysis ``` show_table_pc_analysis(X=rescaledX) ``` #### Major Pros & Cons of PCA ## Learning Models <a class="anchor" id="learning-models"></a> ``` # Parameters to be tested for Cross-Validation Approach # ----------------------------------------------------- param_grids = [] parmas_logreg = { 'penalty': ('l1', 'l2', 'elastic', None), 'solver': ('newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'), 'fit_intercept': (True, False), 'tol': (1e-4, 1e-3, 1e-2), 'class_weight': (None, 'balanced'), 'C': (10.0, 1.0, .1, .01, .001, .0001), # 'random_state': (0,), }; param_grids.append(parmas_logreg) parmas_knn_clf = { 'n_neighbors': (2,3,4,5,6,7,8,9,10), 'weights': ('uniform', 'distance'), 'metric': ('euclidean', 'minkowski', 'manhattan'), 'leaf_size': (5, 10, 15, 30), 'algorithm': ('ball_tree', 'kd_tree', 'brute'), }; param_grids.append(parmas_knn_clf) params_sgd_clf = { 'loss': ('log', 'modified_huber'), # ('hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron') 'penalty': ('l2', 'l1', 'elasticnet'), 'alpha': (1e-1, 1e-2, 1e-3, 1e-4), 'max_iter': (50, 100, 150, 200, 500, 1000, 1500, 2000, 2500), 'class_weight': (None, 'balanced'), 'learning_rate': ('optimal',), 'tol': (None, 1e-2, 1e-4, 1e-5, 1e-6), # 'random_state': (0,), }; param_grids.append(params_sgd_clf) kernel_type = 'svm-rbf-kernel' params_svm_clf = { # 'gamma': (1e-7, 1e-4, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3, 1e+5, 1e+7), 'gamma': (1e-5, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3, 1e+5), 'max_iter':(1e+2, 1e+3, 2 * 1e+3, 5 * 1e+3, 1e+4, 1.5 * 1e+3), 'degree': (1,2,4,8), 'coef0': (.001, .01, .1, 0.0, 1.0, 10.0), 'shrinking': (True, False), 'kernel': ['linear', 'poly', 'rbf', 'sigmoid',], 'class_weight': (None, 'balanced'), 'C': (1e-4, 1e-3, 1e-2, 0.1, 1.0, 10, 1e+2, 1e+3), 'probability': (True,), }; param_grids.append(params_svm_clf) parmas_tree = { 'splitter': ('random', 'best'), 'criterion':('gini', 'entropy'), 'max_features': (None, 'sqrt', 'log2'), 'max_depth': (None, 3, 5, 7, 10,), 'splitter': ('best', 'random',), 'class_weight': (None, 'balanced'), }; param_grids.append(parmas_tree) parmas_random_forest = { 'n_estimators': (3, 5, 7, 10, 30, 50, 70, 100, 150, 200), 'criterion':('gini', 'entropy'), 'bootstrap': (True, False), 'min_samples_leaf': (1,2,3,4,5), 'max_features': (None, 'sqrt', 'log2'), 'max_depth': (None, 3, 5, 7, 10,), 'class_weight': (None, 'balanced', 'balanced_subsample'), }; param_grids.append(parmas_random_forest) # Some variables to perform different tasks # ----------------------------------------------------- N_CV, N_KERNEL, N_GS = 9, 5, 6; nrows = N_KERNEL // 2 if N_KERNEL % 2 == 0 else N_KERNEL // 2 + 1; ncols = 2; grid_size = [nrows, ncols] ``` | Learning Technique | Type of Learner | Type of Learning | Classification | Regression | Clustering | | --- | --- | --- | --- | --- | --- | | *Stochastic Gradient Descent (SGD)* | *Linear Model* | *Supervised Learning*| *Supported* | *Supported* | *Not-Supported*| ``` n_components=9 learning_curves_by_kernels( # learning_curves_by_components( estimators_list[:], estimators_names[:], rescaledX, y, train_sizes=np.linspace(.1, 1.0, 10), n_components=9, pca_kernels_list=pca_kernels_list[0], verbose=0, by_pairs=True, savefigs=True, scoring='accuracy', figs_dest=os.path.join('figures', 'learning_curve', f"Pcs_{n_components}"), ignore_func=True, # figsize=(20,5) ) %%javascript IPython.OutputArea.prototype._should_scroll = function(lines) { return false; } plot_dest = os.path.join("figures", "n_comp_9_analysis", "grid_search") X = rescaledX; pos = 3 df_gs, df_auc_gs, df_pvalue = grid_search_all_by_n_components( estimators_list=estimators_list[pos], \ param_grids=param_grids[pos - 1], estimators_names=estimators_names[pos], \ X=X, y=y, n_components=9, random_state=0, show_plots=False, show_errors=False, verbose=1, plot_dest=plot_dest, debug_var=False) df_9, df_9_auc = df_gs, df_auc_gs ``` Looking at the results obtained running *Sgd Classifier* against our dataset splitted into training set and test set and adopting a different kernel trick applied to *kernel-Pca* unsupervised preprocessing method we can state generally speaking that looking at the weighted values of *Recall, Precision, and F1-Scores* we obtained good performance and and except for one trial where we got lower and worst results, when *Polynomial and Rbf Tricks* is selected, in the remaning cases have gotten remarkable results. More precisely we can say what follows: - speaking about __Linear kernel Pca based Sgd Classifier__, when adoping the default threshold of *.5* for classification purposes we have a model that reaches an accuracy of *65%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *79%* with a Roc Curve that shows a behavior for which the model increases its *TPR* without affecting the *FPR* score, however at a given point the Roc Curve trend turns so that the two cited scores begin to increase linearly and with a slope lower than that of Random Classifier so that FPR increases faster. The model is very precise when predicting class 1 instances but it has a recall of just *54%* so misclassified more or less half of samples from class 1 and this fact influenced instead the precision of class 0 that is a bit low, just *32%*, while class 0 recall is very high. Since the test accuracy score loses nearly 30 percent points we can assume that sucha model quite overfit to train data, we are not really encouraged to adopt it except we decied to exploit it for including it in an ensemble classifier, more boosting like than bagging one. - observing __Polynomial kernel Pca based Sgd Estimator__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *76%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *73%*. It represents the best result obtained running th SGD based Training Algorithm upon our input dataset, in particular it obtained high precision and high recall for class 1, in other words such a model is able to recognize and correctly classify most of the data examples whose true label is indeed class 1. However, even if the model has high recall related to class 0, since the dataset is unbalanced we cannot say the same things for precision score about the class 0. So the model is somewhat uncertain when predicting class 0 as label value for new observations. - review __Rbf kernel Pca based Sgd Classifier__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *82%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *57%*. In particular such a trial along with the *PCosine kernel Pca based Sgd Classifier* are the two attempts that lead to worts results, since the model overfit against the data employed at training time, but also the model gained weights that tend to predict every thing as class 1 instance. So, the resulting scores tell us that the model is highly precise and obtained high recall related to class 1, convercely has very low performance for precision and recall referred to class 0. Since such a model is performing just a little bit better than random classifier, can be largely adopted along other similar models for building voting classifier, following boosting like classifier policy and behavior. - looking at __Cosine kernel Pca based Sgd Classifier__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *32%* at test time against an accuracy of *95%* at train step, while the Auc score reaches a value of just *59%*. Here the fine tuned model obtained from grid-search approach tells us that we are able to classify with high precision a few data examples from class 1, and even if we correctly classify all instances from class 0, we also wrongly predict class labels for most of instances,. whose true label is class 1. This means that the model is highly uncertain when predicting class 0 as the output target label. Moreover, the model's ROC Curve performs slighltly better than the random classifier, and we end up saying that such a model has gained weights and hyper-params that tend to predict the unknown instances as belonging to class 0 most of the time. We cannot neither say that switching the class labels between the two classes will allow us to obtain a better result since the roc curve trend is just a little bit better than the random classifier. - finally, referring to __Sigmoid kernel Pca based Sgd Model__, we can notice that such a model exploiting a default threshold of *.5* reaches an accuracy of *44%* at test time against an accuracy of *92%* at train step, while the Auc score reaches a value of *66%*. This model behaves more or less as the model obtained from the first trial performed for Sgd-based classifier, so as the first model is slightly worst than the best model found here when adopting as classifier Sgd technique, that is the *Cosine kernel Pca based Sgd Classifier*. __Significance Analysis__: finally, when looking at the different graphics related to the test which aims at investigating the diagnostic power of our different models we have fine tuned for *SGD Classifier*, picking the best one for such a test we can notice that beacues of the *signficance level* $\alpha$ set equal to *0.05 that is 5% of chance to reject the Null-Hypothesis $H_{0}$*, we have obtained following results. Adopting the SGD statistical learning technique for classification fine tuned as above with hyper-params selectd also depending on the kind of *kernel-trick adopted for kernel-Pca unsupervised technique*, we can calim that only two out of five trials lead to a *p-vlaue* worst than *selected significance level equal to 5%*, which are *Linear- and Cosine-kernel Pca based Sgd Classifier*, so rejecting the *Null-Hypotesis* for those two cases will results into a *Type I Error*. While the remaining three cases, that are *Poly-, Rbf- and Sigmoid-kernel Pca based Sgd Classifier* have obtained a p-value over the range $[.9, 3]$ *in percet points*, so we are satisfyed for the results obtained in terms of significance scores, however, only *Poly-, and Rbf-kernel Pca based Sgd Classifier* really matter or are worth models since they do not overfit too much and do not go worstly as *Sigmoid-kernel Pca based Sgd Classifier* at test time. #### Table Fine Tuned Hyper-Params(SGD Classifier) ``` # create_widget_list_df([df_gs, df_auc_gs]) #print(df_gs); print(df_auc_gs) show_table_summary_grid_search(df_gs, df_auc_gs, df_pvalue) ``` Looking at the table dispalyed just above that shows the details about the selected values for hyper-parameters specified during grid search, in the different situations accordingly to the fixed kernel-trick for kernel Pca unsupervised method we can state that, referring to the first two columns of *Train and Test Accuracy*, we can recognize which trials lead to more overfit results such as for *Rbfd Trick* or less overfit solution such as in the case of *Linear, Polynomial, Cosine, and Sigmoid Tricks*. Speaking about the hyper-parameters, we can say what follows: - looking at __alpha hyper-parameter__, that is constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when set to *learning_rate* is set to *'optimal'*, as was here, we can notice that the final choice through the different trials was more or less tha same, meanning that the adopted kernel trick for performing kernel-Pca does not affected appreciably such a hyper-param, which three cases out of five was set to *0.1*, and the remaining case adopted *0.0001*, *0.001* for respectively Cosine and Sigmoid based *kernel-Pca*. This also remind us that while training the classifiers was not necessary to force a high regularization contribute for reducing the overfit as well as the learning process, even if we know that *Rbf kernel Pca based Sgd Classifier* overfits mostly against train data, and gained weights that encourages predicting all samples as belonging to class 1. - reviewing __class_weight hyper-param__, what we can state about such a parameter is that it represents weights associated with classes. If not given, all classes are supposed to have weight one. The *“balanced” mode* uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as __n_samples / (n_classes * np.bincount(y))__. In particular we can notice that three out five models that were fine tuned accepted or selected *balanced weights*, which are *Linear-, Sigomoid-, Cosine-kernel Pca based Sgd Classifier*, while the remaining obtain better, when setting uniform weights which are models *Polynomial-, Rbf-kernel Pca based Sgd Classifier*. So the choiche of the right *kernel-trick* affected the subsequent selection at fine tuning time of the *class_weight hyper-param*. What we can further notice is that *Polynomial-, Rbf-kernel Pca based Sgd Classifier* more or less adopted same kind of values for hyper-params, as an instance for penalty hyper-param, however Polynomial model got worst performance in terms of accuracy but considering the other metrics simultaneously we can understand that the Poly model overfits less than Rbf one and so get better performance in general. - speaking of __learning_rate hyper-param__, since we force this as the unique available choice it was just report for completeness. - interesting it is the discussion about __loss parameter__, if fact we know that the possible options are *‘hinge’, ‘log’, ‘modified_huber’, ‘squared_hinge’, ‘perceptron’*, where the *‘log’ loss* gives logistic regression, a probabilistic classifier. *‘modified_huber’* is another smooth loss that brings tolerance to outliers as well as probability estimates. *‘squared_hinge’* is like hinge but is quadratically penalized. ‘perceptron’ is the linear loss used by the perceptron algorithm. Here, speaking about loss parameter we can clearly understand that the choice of a particular kernel trick does not affect the following choice of the loss function to be optimized, in fact uniformly all the models adopted or tend to prefer *modified_huber* loss function, allowing the models to fit to the data taking into account the fact that such a loss function is less sensitive to outliers, recalling inn fact that the Huber loss function is used in robust statistics, M-estimation and additive modelling. This loss is so cllaed beacues it derives from the plain version normally exploited for regression problems. - also when referring to __max iteration parameter__, we can easily say that thte models evenly adopted somewhat small number of iteration before stopping the learning procedure, this might be also becaues we work with a small dataset and so a set of data points that is small tend to overfit quickly and this migth be the reason for which in order to avoid too much overfit the training procedure performed employing grid-search technique for fine-tuning tend to prefer tiny number of iterations set for training the model. - __penalty parameter__, we recall here that it represents regularization term to be used. More precisely, defaults to *‘l2’* which is the standard regularizer for linear SVM models. *‘l1’* and *‘elasticnet’* might bring *sparsity* to the model (feature selection) not achievable with *‘l2’*. Also for such a hyper-param the choice of a particular *kernel-trick* to be used for *kernel-Pca* was affecting the subsequent selection of penalty contribute to regularize learning task, as was for *class weight hyper-param*. Here three over five models that are *Linear-, Sigomoid-, Cosine-kernel Pca based Sgd Classifier* adopted *l1-norm* as regularization term so the models's weights tend to be more sparse, while the remaining *Polynomial-, Rbf-kernel Pca based Sgd Classifier* models adopted *l2-nrom*. For the trials we have done, the models with *l1-regularization term* seem to get worst performance, more precisely *Sigomoid-, Cosine-kernel Pca based Sgd Classifier* even were worser than random classifier, while the *Linear-kernel Pca based Sgd Classifier* was slightly worst than Polynomial one, so does not overfit too much however we can say it can be exploited for ensemble method that follows a Boosting Policy. If we imagine to build up an *Ensemble Classifier* from the family of *Average Methods*, which state that the underlying principle leading their creation requires to build separate and single classifiers than averaging their prediction in regression context or adopting a majority vote strategy for the classification context, we can claim that amongst the purposed *Sgd classifier*, for sure, we could employ the classifier found from all the trials, except for *Rbf, Cosine and Sigmoid kernel Pca based Sgd Classifiers*, since the first model is overly overfitting to the data used at train time and more precisely most of the time predicted correctly just samples from class 1 and misclassifyes instances from class 0, the others instead assumed the opposite behavior. Also, because of their performance metrics and also because Ensemble Methods such as Bagging Classifier, usually work fine exploiting an ensemble of independent and fine tuned classifier differently from Boosting Methods which instead are based on weak learners. ``` # show_histogram_first_sample(Xtrain_transformed, ytrain_, estimators_) ``` ### Improvements and Conclusions <a class="anchor" id="Improvements-and-conclusions"></a> Extension that we can think of to better improve the analyses we can perform on such a relative tiny dataset many include, for preprocessing phases: - Selecting different *Feature Extraction ant Dimensionality Reduction Techniques* other than Pca or kernel Pca such as: *linear discriminant analysis (LDA)*, or *canonical correlation analysis (CCA) techniques* as a pre-processing step. Extension that we can think of to better improve the analyses we can perform on such a relative tiny dataset many include, for training phases: - Selecting different *Ensemble Methods, investigating both Average based and Boosting based Statistical Learning Methods*. Extension that we can think of to better improve the analyses we can perform on such a relative tiny dataset many include, for diagnostic analyses after having performed train and test phases: - Using other measures, indicators and ghraphical plots such as the *Total Operating Characteristic (TOC)*, since also such a measure characterizes diagnostic ability while revealing more information than the ROC. In fact for each threshold, ROC reveals two ratios, TP/(TP + FN) and FP/(FP + TN). In other words, ROC reveals hits/(hits + misses) and false alarms/(false alarms + correct rejections). On the other hand, TOC shows the total information in the contingency table for each threshold. Lastly, the TOC method reveals all of the information that the ROC method provides, plus additional important information that ROC does not reveal, i.e. the size of every entry in the contingency table for each threshold. ## References section <a class="anchor" id="references"></a> ### Main References - Data Domain Information part: - (Deck) https://en.wikipedia.org/wiki/Deck_(bridge) - (Cantilever bridge) https://en.wikipedia.org/wiki/Cantilever_bridge - (Arch bridge) https://en.wikipedia.org/wiki/Deck_(bridge) - Machine Learning part: - (Theory Book) https://jakevdp.github.io/PythonDataScienceHandbook/ - (Feature Extraction: PCA) https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html - (Linear Model: Logistic Regression) https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression - (Neighbor-based Learning: Knn) https://scikit-learn.org/stable/modules/neighbors.html - (Stochastc Learning: SGD Classifier) https://scikit-learn.org/stable/modules/sgd.html#sgd - (Discriminative Model: SVM) https://scikit-learn.org/stable/modules/svm.html - (Non-Parametric Learning: Decsion Trees) https://scikit-learn.org/stable/modules/tree.html#tree - (Ensemble, Non-Parametric Learning: RandomForest) https://scikit-learn.org/stable/modules/ensemble.html#forest - Metrics: - (F1-Accuracy-Precision-Recall) https://towardsdatascience.com/beyond-accuracy-precision-and-recall-3da06bea9f6c - Statistics: - (Correlation and dependence) https://en.wikipedia.org/wiki/Correlation_and_dependence - (KDE) https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/ - Chart part: - (Seaborn Charts) https://acadgild.com/blog/data-visualization-using-matplotlib-and-seaborn - Third Party Library: - (sklearn) https://scikit-learn.org/stable/index.html - (statsmodels) https://www.statsmodels.org/stable/index.html# ### Others References - Plots: - (Python Plot) https://www.datacamp.com/community/tutorials/matplotlib-tutorial-python?utm_source=adwords_ppc&utm_campaignid=898687156&utm_adgroupid=48947256715&utm_device=c&utm_keyword=&utm_matchtype=b&utm_network=g&utm_adpostion=&utm_creative=255798340456&utm_targetid=aud-299261629574:dsa-473406587955&utm_loc_interest_ms=&utm_loc_physical_ms=1008025&gclid=Cj0KCQjw-_j1BRDkARIsAJcfmTFu4LAUDhRGK2D027PHiqIPSlxK3ud87Ek_lwOu8rt8A8YLrjFiHqsaAoLDEALw_wcB - Markdown Math part: - (Math Symbols Latex) https://oeis.org/wiki/List_of_LaTeX_mathematical_symbols - (Tutorial 1) https://share.cocalc.com/share/b4a30ed038ee41d868dad094193ac462ccd228e2/Homework%20/HW%201.2%20-%20Markdown%20and%20LaTeX%20Cheatsheet.ipynb?viewer=share - (Tutorial 2) https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Typesetting%20Equations.html
github_jupyter
## Dataset https://data.wprdc.org/dataset/allegheny-county-restaurant-food-facility-inspection-violations/resource/112a3821-334d-4f3f-ab40-4de1220b1a0a This data set is a set of all of the restaurants in Allegheny County with geographic locations including zip code, size, description of use, and a "status" ranging from 0 to 7 to indicate if the restaurant is currently open. ``` import pandas as pd restaurants_all = pd.read_csv("r.csv") ``` First, I remove the few restaurants that are outside of Pittsburgh and those with a value of 0 or 1 for their status, which indicates that they are closed. ``` query_mask = restaurants_all['status'] > 1 zip_mask_low = restaurants_all['zip'] > 14999.0 zip_mask_high = restaurants_all['zip'] < 16000.0 open_restaurants = restaurants_all[query_mask] open_restaurants = open_restaurants[zip_mask_low] open_restaurants = open_restaurants[zip_mask_high] open_restaurants = open_restaurants[open_restaurants['zip'].notnull()] open_restaurants.head(5) ``` Then I count up the number of open restaurants within a certain zipcode by keeping track of the data in a dictionary, using the zipcode as a key and incrementing the value associated with it. ``` zipcode_counter = dict() for row in open_restaurants.index: zipc = open_restaurants.loc[row, "zip"] if zipc not in zipcode_counter: zipcode_counter[zipc] = 1 else: zipcode_counter[zipc] = zipcode_counter[zipc] + 1 zipcode_counter zip_sorted = dict(sorted(zipcode_counter.items(), key=lambda item: item[1])) zip_sorted import matplotlib.pyplot as plt names = list(zipcode_counter.keys()) values = list(zipcode_counter.values()) plt.bar(names, values) plt.xlabel("Zipcodes") plt.ylabel("Number of Restaurants") plt.axis([15000, 15300, 0, 1060]) plt.show() average = sum(zip_sorted.values()) / len(zip_sorted) print(average) ``` Plotting this data, we find that there is a very wide range from 0 to 1041 and a mean of 124 restaurants per zipcode. ``` #all_values = zipcode_counter.values() #max_value = max(all_values) #print(max_value) max_key = max(zipcode_counter, key=zipcode_counter.get) print(max_key) min_key = min(zipcode_counter, key=zipcode_counter.get) print(min_key) ``` The top ten zipcodes with the most restaurants and their corresponding neighborhoods are: * 15222.0: 1041 - Strip District * 15212.0: 694 - North Shore/North Side * 15213.0: 677 - Oakland * 15219.0: 639 - Hill District * 15237.0: 509 - Ross Township * 15146.0: 482 - Monroeville * 15205.0: 423 - Crafton * 15108.0: 408 - Coraoplis * 15235.0: 396 - Penn Hills * 15203.0: 392 - South Side According to our metric, we divide the data into fifths and award points to the total of each of the zipcodes: ``` print(len(zip_sorted)) zipcode_points_restaurants = dict() i = 1 for key in zip_sorted: zipcode_points_restaurants[key] = i // 28 + 1 i = i + 1 zipcode_points_restaurants ```
github_jupyter
``` from tensorflow import keras from tensorflow.keras import * from tensorflow.keras.models import * from tensorflow.keras.layers import * from tensorflow.keras.regularizers import l2#正则化L2 import tensorflow as tf import numpy as np import pandas as pd normal = np.loadtxt(r'F:\张老师课题学习内容\code\数据集\试验数据(包括压力脉动和振动)\2013.9.12-未发生缠绕前\2013-9.12振动\2013-9-12振动-1450rmin-mat\1450r_normalviby.txt', delimiter=',') chanrao = np.loadtxt(r'F:\张老师课题学习内容\code\数据集\试验数据(包括压力脉动和振动)\2013.9.17-发生缠绕后\振动\9-17下午振动1450rmin-mat\1450r_chanraoviby.txt', delimiter=',') print(normal.shape,chanrao.shape,"***************************************************") data_normal=normal[16:18] #提取前两行 data_chanrao=chanrao[16:18] #提取前两行 print(data_normal.shape,data_chanrao.shape) print(data_normal,"\r\n",data_chanrao,"***************************************************") data_normal=data_normal.reshape(1,-1) data_chanrao=data_chanrao.reshape(1,-1) print(data_normal.shape,data_chanrao.shape) print(data_normal,"\r\n",data_chanrao,"***************************************************") #水泵的两种故障类型信号normal正常,chanrao故障 data_normal=data_normal.reshape(-1, 512)#(65536,1)-(128, 515) data_chanrao=data_chanrao.reshape(-1,512) print(data_normal.shape,data_chanrao.shape) import numpy as np def yuchuli(data,label):#(4:1)(51:13) #打乱数据顺序 np.random.shuffle(data) train = data[0:102,:] test = data[102:128,:] label_train = np.array([label for i in range(0,102)]) label_test =np.array([label for i in range(0,26)]) return train,test ,label_train ,label_test def stackkk(a,b,c,d,e,f,g,h): aa = np.vstack((a, e)) bb = np.vstack((b, f)) cc = np.hstack((c, g)) dd = np.hstack((d, h)) return aa,bb,cc,dd x_tra0,x_tes0,y_tra0,y_tes0 = yuchuli(data_normal,0) x_tra1,x_tes1,y_tra1,y_tes1 = yuchuli(data_chanrao,1) tr1,te1,yr1,ye1=stackkk(x_tra0,x_tes0,y_tra0,y_tes0 ,x_tra1,x_tes1,y_tra1,y_tes1) x_train=tr1 x_test=te1 y_train = yr1 y_test = ye1 #打乱数据 state = np.random.get_state() np.random.shuffle(x_train) np.random.set_state(state) np.random.shuffle(y_train) state = np.random.get_state() np.random.shuffle(x_test) np.random.set_state(state) np.random.shuffle(y_test) #对训练集和测试集标准化 def ZscoreNormalization(x): """Z-score normaliaztion""" x = (x - np.mean(x)) / np.std(x) return x x_train=ZscoreNormalization(x_train) x_test=ZscoreNormalization(x_test) # print(x_test[0]) #转化为一维序列 x_train = x_train.reshape(-1,512,1) x_test = x_test.reshape(-1,512,1) print(x_train.shape,x_test.shape) def to_one_hot(labels,dimension=2): results = np.zeros((len(labels),dimension)) for i,label in enumerate(labels): results[i,label] = 1 return results one_hot_train_labels = to_one_hot(y_train) one_hot_test_labels = to_one_hot(y_test) x = layers.Input(shape=[512,1,1]) Flatten=layers.Flatten()(x) Dense1=layers.Dense(12, activation='relu')(Flatten) Dense2=layers.Dense(6, activation='relu')(Dense1) Dense3=layers.Dense(2, activation='softmax')(Dense2) model = keras.Model(x, Dense3) model.summary() #定义优化 model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy']) import time time_begin = time.time() history = model.fit(x_train,one_hot_train_labels, validation_split=0.1, epochs=50,batch_size=10, shuffle=True) time_end = time.time() time = time_end - time_begin print('time:', time) import time time_begin = time.time() score = model.evaluate(x_test,one_hot_test_labels, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) time_end = time.time() time = time_end - time_begin print('time:', time) #绘制acc-loss曲线 import matplotlib.pyplot as plt plt.plot(history.history['loss'],color='r') plt.plot(history.history['val_loss'],color='g') plt.plot(history.history['accuracy'],color='b') plt.plot(history.history['val_accuracy'],color='k') plt.title('model loss and acc') plt.ylabel('Accuracy') plt.xlabel('epoch') plt.legend(['train_loss', 'test_loss','train_acc', 'test_acc'], loc='center right') # plt.legend(['train_loss','train_acc'], loc='upper left') #plt.savefig('1.png') plt.show() import matplotlib.pyplot as plt plt.plot(history.history['loss'],color='r') plt.plot(history.history['accuracy'],color='b') plt.title('model loss and sccuracy ') plt.ylabel('loss/sccuracy') plt.xlabel('epoch') plt.legend(['train_loss', 'train_sccuracy'], loc='center right') plt.show() ```
github_jupyter
# Dimensionality Reduction Example Using the IMDB data, feature matrix and apply dimensionality reduction to this matrix via PCA and SVD. ``` %matplotlib inline import json import random import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.sparse import lil_matrix from sklearn.neighbors import DistanceMetric from sklearn.metrics import jaccard_score from sklearn.metrics import pairwise_distances # Let's restrict ourselves just to US titles relevant_title_df = pd.read_csv("../data/us_relevant_titles.csv") # And create a set of just these titles, so we can filter them relevant_title_set = set(relevant_title_df["title"]) actor_id_to_name_map = {} # Map Actor IDs to actor names actor_id_to_index_map = {} # Map actor IDs to a unique index of known actors index_to_actor_ids = [] # Array mapping unique index back to actor ID (invert of actor_id_to_index_map) index_counter = 0 # Unique actor index; increment for each new actor known_actors = set() movie_actor_list = [] # List of all our movies and their actors test_count = 0 with open("../data/imdb_recent_movies.json", "r") as in_file: for line in in_file: this_movie = json.loads(line) # Restrict to American movies if this_movie["title_name"] not in relevant_title_set: continue # Keep track of all the actors in this movie for actor_id,actor_name in zip(this_movie['actor_ids'],this_movie['actor_names']): # Keep names and IDs actor_id_to_name_map[actor_id] = actor_name # If we've seen this actor before, skip... if actor_id in known_actors: continue # ... Otherwise, add to known actor set and create new index for them known_actors.add(actor_id) actor_id_to_index_map[actor_id] = index_counter index_to_actor_ids.append(actor_id) index_counter += 1 # Finished with this film movie_actor_list.append({ "movie": this_movie["title_name"], "actors": set(this_movie['actor_ids']), "genres": this_movie["title_genre"] }) print("Known Actors:", len(known_actors)) ``` ## Generate Same DataFrame using Sparse Matrics The above will break if you have too much data. We can get around that partially with sparse matrices, where we only store the non-zero elements of the feature matrix and their indices. ``` # With sparse matrix, initialize to size of Movies x Actors of 0s matrix_sparse = lil_matrix((len(movie_actor_list), len(known_actors)), dtype=bool) # Update the matrix, movie by movie, setting non-zero values for the appropriate actors for row,movie in enumerate(movie_actor_list): for actor_id in movie["actors"]: this_index = actor_id_to_index_map[actor_id] matrix_sparse[row,this_index] = 1 df = pd.DataFrame.sparse.from_spmatrix( matrix_sparse, index=[m["movie"] for m in movie_actor_list], columns=[index_to_actor_ids[i] for i in range(len(known_actors))] ) df top_k_actors = 1000 # Extract the most frequent actors, so we can deal with a reasonable dataset size actor_df = df.sum(axis=0) top_actors = set(actor_df.sort_values().tail(top_k_actors).index) # Restrict the data frame to just the movies containing #. the top k actors reduced_df = df[top_actors] # restrict to just these top actors # throw away movies that don't have any of these actors reduced_df = reduced_df.loc[reduced_df.sum(axis=1) > 0] reduced_df ``` ## Apply SVD to Feature Matrix ``` # https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.TruncatedSVD.html from sklearn.decomposition import TruncatedSVD matrix_dense = reduced_df.to_numpy() reduced_df svd = TruncatedSVD(n_components=2) svd.fit(matrix_dense) matrix_reduced = svd.transform(matrix_dense) np.mean(matrix_reduced, axis=0) plt.scatter(matrix_reduced[:,0], matrix_reduced[:,1]) counter = 0 for index in np.argwhere((matrix_reduced[:,0] > 1.0) & (matrix_reduced[:,1] > 0.8)): movie_title = reduced_df.iloc[index[0]].name for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]: print(this_movie["movie"]) print("\tGenres:", ", ".join(this_movie["genres"])) print("\tActors:", ", ".join([actor_id_to_name_map[actor] for actor in this_movie["actors"]])) counter += 1 if counter > 10: print("...") break counter = 0 for index in np.argwhere((matrix_reduced[:,0] < 0.1) & (matrix_reduced[:,1] < 0.1)): movie_title = reduced_df.iloc[index[0]].name for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]: print(this_movie["movie"]) print("\tGenres:", ", ".join(this_movie["genres"])) print("\tActors:", ", ".join([actor_id_to_name_map[actor] for actor in this_movie["actors"]])) counter += 1 if counter > 10: print("...") break comp1_genre_map = {} comp1_actor_map = {} comp1_counter = 0 for index in np.argwhere((matrix_reduced[:,0] > 1.0) & (matrix_reduced[:,1] < 0.2)): movie_title = reduced_df.iloc[index[0]].name for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]: for g in this_movie["genres"]: comp1_genre_map[g] = comp1_genre_map.get(g, 0) + 1 for a in [actor_id_to_name_map[actor] for actor in this_movie["actors"]]: comp1_actor_map[a] = comp1_actor_map.get(a, 0) + 1 comp1_counter += 1 print("Movies in Component 1:", comp1_counter) print("Genres:") for g in sorted(comp1_genre_map, key=comp1_genre_map.get, reverse=True)[:10]: print("\t", g, comp1_genre_map[g]) print("Actors:") for a in sorted(comp1_actor_map, key=comp1_actor_map.get, reverse=True)[:10]: print("\t", a, comp1_actor_map[a]) comp2_genre_map = {} comp2_actor_map = {} comp2_counter = 0 for index in np.argwhere((matrix_reduced[:,0] < 0.1) & (matrix_reduced[:,1] < 0.1)): movie_title = reduced_df.iloc[index[0]].name for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]: for g in this_movie["genres"]: comp2_genre_map[g] = comp2_genre_map.get(g, 0) + 1 for a in [actor_id_to_name_map[actor] for actor in this_movie["actors"]]: comp2_actor_map[a] = comp2_actor_map.get(a, 0) + 1 comp2_counter += 1 print("Movies in Component 2:", comp2_counter) print("Genres:") for g in sorted(comp2_genre_map, key=comp2_genre_map.get, reverse=True)[:10]: print("\t", g, comp2_genre_map[g]) print("Actors:") for a in sorted(comp2_actor_map, key=comp2_actor_map.get, reverse=True)[:10]: print("\t", a, comp2_actor_map[a]) ``` ## Find Similar Movies in Reduced Dimensional Space ``` query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == "The Lord of the Rings: The Fellowship of the Ring"][0] # query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == "Heavy Metal 2000"][0] # query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == "Casino Royale"][0] # query_idx = [idx for idx,m in enumerate(reduced_df.index) if m == "Star Wars: Episode II - Attack of the Clones"][0] query_idx query_v = matrix_reduced[query_idx,:] query_v # get distances between all films and query film distances = pairwise_distances(matrix_reduced, [query_v], metric='euclidean') distances_df = pd.DataFrame(distances, columns=["distance"]) for idx,row in distances_df.sort_values(by="distance", ascending=True).head(20).iterrows(): print(idx, reduced_df.iloc[idx].name, row["distance"]) ``` ## SVD and Column Feature Space Above, we focused on the *movies* in the reduced feature/"concept" space. Here, we will use SVD to map the *actors* into the reduced "concept" space. ``` # See that the shape of this matrix is *reduced space* X original features svd.components_.shape ``` We will use this reduced space to inspect the associations with a given actor and the concept set of concepts (i.e., the reduced space) ``` # query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == "Ewan McGregor"][0] # query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == "Eric Roberts"][0] # query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == "Jason Statham"][0] # query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == "Leonardo DiCaprio"][0] query_actor = [idx for idx,name in actor_id_to_name_map.items() if name == "George Clooney"][0] query_actor query_actor_index = np.argwhere(reduced_df.columns == query_actor)[0,0] query_actor_index # Show the actor strengths across these concepts svd.components_.T[query_actor_index,:] # And you can use this method to evaluate distances between actors in the concept space distances = pairwise_distances(svd.components_.T, [svd.components_.T[query_actor_index,:]], metric='euclidean') distances_df = pd.DataFrame(distances, columns=["distance"]) for idx,row in distances_df.sort_values(by="distance", ascending=True).head(20).iterrows(): print(idx, actor_id_to_name_map[reduced_df.columns[idx]], row["distance"]) ``` ## SVD is more scalable than PCA ``` from sklearn.decomposition import PCA matrix_sparse.shape # This will fail pca = PCA(n_components=2) pca.fit(matrix_sparse) svd = TruncatedSVD(n_components=2) svd.fit(matrix_sparse) matrix_reduced = svd.transform(matrix_sparse) print(np.mean(matrix_reduced, axis=0)) plt.scatter(matrix_reduced[:,0], matrix_reduced[:,1]) comp1_genre_map = {} comp1_actor_map = {} comp1_counter = 0 for index in np.argwhere((matrix_reduced[:,0] > 1.0) & (matrix_reduced[:,1] < 0.2)): movie_title = df.iloc[index[0]].name for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]: for g in this_movie["genres"]: comp1_genre_map[g] = comp1_genre_map.get(g, 0) + 1 for a in [actor_id_to_name_map[actor] for actor in this_movie["actors"]]: comp1_actor_map[a] = comp1_actor_map.get(a, 0) + 1 comp1_counter += 1 print("Movies in Component 1:", comp1_counter) print("Genres:") for g in sorted(comp1_genre_map, key=comp1_genre_map.get, reverse=True)[:10]: print("\t", g, comp1_genre_map[g]) print("Actors:") for a in sorted(comp1_actor_map, key=comp1_actor_map.get, reverse=True)[:10]: print("\t", a, comp1_actor_map[a]) comp2_genre_map = {} comp2_actor_map = {} comp2_counter = 0 for index in np.argwhere((matrix_reduced[:,0] < 0.1) & (matrix_reduced[:,1] < 0.1)): movie_title = df.iloc[index[0]].name for this_movie in [m for m in movie_actor_list if m['movie'] == movie_title]: for g in this_movie["genres"]: comp2_genre_map[g] = comp2_genre_map.get(g, 0) + 1 for a in [actor_id_to_name_map[actor] for actor in this_movie["actors"]]: comp2_actor_map[a] = comp2_actor_map.get(a, 0) + 1 comp2_counter += 1 print("Movies in Component 2:", comp2_counter) print("Genres:") for g in sorted(comp2_genre_map, key=comp2_genre_map.get, reverse=True)[:10]: print("\t", g, comp2_genre_map[g]) print("Actors:") for a in sorted(comp2_actor_map, key=comp2_actor_map.get, reverse=True)[:10]: print("\t", a, comp2_actor_map[a]) ```
github_jupyter
# 1. Workflow for building and deploying interactive dashboards **Let's say you want to make it easy to explore some dataset. That is, you want to:** * Make a visualization of the data * Maybe add some custom widgets to see the effects of some variables * Then deploy the result as a web app. **You can definitely do that in Python, but you would expect to:** * Spend days of effort to get some initial prototype working in a Jupyter notebook * Work hard to tame the resulting opaque mishmash of domain-specific, widget, and plotting code * Start over nearly from scratch whenever you need to: - Deploy in a standalone server - Visualize different aspects of your data - Scale up to larger (>100K) datasets # Step-by-step data-science workflow Here we'll show a simple, flexible, powerful, step-by-step workflow, explaining which open-source tools solve each of the problems involved: - Step 1: Get some data - Step 2: Prototype a plot in a notebook - Step 3: Define your domain model - Step 4: Get a widget-based UI for free - Step 5: Link your domain model to your visualization - Step 6: Widgets now control your interactive plots - Step 7: Deploy your dashboard ``` import holoviews as hv import geoviews as gv import param, paramnb, parambokeh import dask.dataframe as dd from colorcet import cm from bokeh.models import WMTSTileSource from holoviews.operation.datashader import datashade from holoviews.operation import decimate from holoviews.streams import RangeXY, PlotSize ``` ## Step 1: Get some data * Here we'll use a subset of the often-studied NYC Taxi dataset * About 12 million points of GPS locations from taxis * Stored in the efficient Parquet format for easy access * Loaded into a Dask dataframe for multi-core<br>(and if needed, out-of-core or distributed) computation <div class="alert alert-warning" role="alert"> <strong>Warning!</strong> If you are low on memory (less than 8 GB) load only a subset of the data by changing the line below to: <br> <code>df = dd.read_parquet('../data/nyc_taxi_hours.parq/')[:10000].persist()</code> </div> ``` %time df = dd.read_parquet('../data/nyc_taxi_hours.parq/').persist() print(len(df)) df.head(2) ``` ## Step 2: Prototype a plot in a notebook * A text-based representation isn't very useful for big datasets like this, so we need to build a plot * But we don't want to start a software project, so we use HoloViews: - Simple, declarative way to annotate your data for visualization - Large library of Elements with associated visual representation - Elements combine (lay out or overlay) easily * And we'll want live interactivity, so we'll use a Bokeh plotting extension * Result: ``` hv.extension('bokeh') points = hv.Points(df, kdims=['pickup_x', 'pickup_y']) decimate(points) ``` Here ``Points`` declares an object wrapping `df` and visualized as a scatterplot, and `decimate` limits the number of points that will be sent to the browser to avoid crashing it. As you can see, HoloViews makes it very simple to pop up a visualization of your data, getting *something* on screen with only a few characters of typing. But it's not particularly pretty, so let's customize it a bit: ``` options = dict(width=700, height=600, xaxis=None, yaxis=None, bgcolor='black') points = points.opts(plot=options) decimate(points) ``` That looks a bit better, but it's still decimating the data nearly beyond recognition, so let's try using Datashader to rasterize it into a fixed-size image to send to the browser: ``` taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cm['fire']).opts(plot=options) taxi_trips ``` Ok, that looks good now; there's clearly lots to explore in this dataset. To put it in context, let's overlay that on a map: ``` taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cm['fire']).opts(plot=options) wmts = WMTSTileSource(url='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg') tiles = gv.WMTS(wmts) tiles * taxi_trips ``` We could add lots more visual elements (laying out additional plots left and right, overlaying annotations, etc.), but let's say that this is our basic visualization we'll want to share. To sum up what we've done so far, here are the complete 11 lines of code required to generate this geo-located interactive plot of millions of datapoints in Jupyter: ``` import holoviews as hv, geoviews as gv, dask.dataframe as dd from colorcet import cm from holoviews.operation.datashader import datashade from bokeh.models import WMTSTileSource hv.extension('bokeh') df = dd.read_parquet('../data/nyc_taxi_hours.parq/').persist() options = dict(width=700, height=600, xaxis=None, yaxis=None, bgcolor='black') points = hv.Points(df, kdims=['pickup_x', 'pickup_y']) taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cm['fire']).opts(plot=options) wmts = WMTSTileSource(url='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg') gv.WMTS(wmts) * taxi_trips ``` ## Step 3: Define your domain model Now that we've prototyped a nice plot, we could keep editing the code above to explore whatever aspects of the data we wished. However, often at this point we will want to start sharing our workflow with people who aren't familar with how to program visualizations in this way. So the next step: figure out what we want our intended user to be able to change, and declare those variables or parameters with: - type and range checking - documentation strings - default values The Param library allows declaring Python attributes having these features (and more, such as dynamic values and inheritance), letting you set up a well-defined space for a user (or you!) to explore. ## NYC Taxi Parameters ``` class NYCTaxiExplorer(hv.streams.Stream): alpha = param.Magnitude(default=0.75, doc="Alpha value for the map opacity") plot = param.ObjectSelector(default="pickup", objects=["pickup","dropoff"]) colormap = param.ObjectSelector(default=cm["fire"], objects=cm.values()) passengers = param.Range(default=(0, 10), bounds=(0, 10), doc=""" Filter for taxi trips by number of passengers""") ``` Each Parameter is a normal Python attribute, but with special checks and functions run automatically when getting or setting. Parameters capture your goals and your knowledge about your domain, declaratively. ### Class level parameters ``` NYCTaxiExplorer.alpha NYCTaxiExplorer.alpha = 0.5 NYCTaxiExplorer.alpha ``` ### Validation ``` try: NYCTaxiExplorer.alpha = '0' except Exception as e: print(e) ``` ### Instance parameters ``` explorer = NYCTaxiExplorer(alpha=0.6) explorer.alpha NYCTaxiExplorer.alpha ``` ## Step 4: Get a widget-based UI for free * Parameters are purely declarative and independent of any widget toolkit, but contain all the information needed to build interactive widgets * ParamNB generates UIs in Jupyter from Parameters, using ipywidgets ``` paramnb.Widgets(NYCTaxiExplorer) NYCTaxiExplorer.passengers ``` * ipywidgets work with Jupyter Dashboards Server for deployment * Declaration of parameters is independent of the UI library used * ParamBokeh generates UIs from the same Parameters, using Bokeh widgets, either in Jupyter or in Bokeh Server ``` parambokeh.Widgets(NYCTaxiExplorer) ``` ## Step 5: Link your domain model to your visualization We've now defined the space that's available for exploration, and the next step is to link up the parameter space with the code that specifies the plot: ``` class NYCTaxiExplorer(hv.streams.Stream): alpha = param.Magnitude(default=0.75, doc="Alpha value for the map opacity") colormap = param.ObjectSelector(default=cm["fire"], objects=cm.values()) plot = param.ObjectSelector(default="pickup", objects=["pickup","dropoff"]) passengers = param.Range(default=(0, 10), bounds=(0, 10)) def make_view(self, x_range=None, y_range=None, **kwargs): map_tiles = tiles.opts(style=dict(alpha=self.alpha), plot=options) points = hv.Points(df, kdims=[self.plot+'_x', self.plot+'_y'], vdims=['passenger_count']) selected = points.select(passenger_count=self.passengers) taxi_trips = datashade(selected, x_sampling=1, y_sampling=1, cmap=self.colormap, dynamic=False, x_range=x_range, y_range=y_range, width=800, height=475) return map_tiles * taxi_trips ``` Note that the `NYCTaxiExplorer` class is entirely declarative (no widgets), and can be used "by hand" to provide range-checked and type-checked plotting for values from the declared parameter space: ``` explorer = NYCTaxiExplorer(alpha=0.2, plot="dropoff") explorer.make_view() ``` ## Step 6: Widgets now control your interactive plots But in practice, why not pop up the widgets to make it fully interactive? ``` explorer = NYCTaxiExplorer() paramnb.Widgets(explorer, callback=explorer.event) hv.DynamicMap(explorer.make_view, streams=[explorer, RangeXY()]) explorer = NYCTaxiExplorer() parambokeh.Widgets(explorer, callback=explorer.event) hv.DynamicMap(explorer.make_view, streams=[explorer, RangeXY()]) ``` ## Step 7: Deploy your dashboard Ok, now you've got something worth sharing, running inside Jupyter. But if you want to share your work with people who don't use Python, you'll now want to run a server with this same code. * If you used **ParamBokeh**, deploy with **Bokeh Server**: - Write the above code to a file ``nyc_parambokeh.py`` - Add ``, mode='server'`` to the ``Widgets()`` call to declare which object should be served - ``bokeh serve nyc_parambokeh.py`` * If you used **ParamNB**, deploy with **Jupyter Dashboard Server**: - Use [Jupyter Dashboards Extension](https://github.com/jupyter/dashboards) to select cells from the notebook to display - Use preview mode to see layout - Use [Jupyter Dashboards Server](https://github.com/jupyter-incubator/dashboards_server) to deploy - Note various caveats below # Complete dashboard code ``` import holoviews as hv, geoviews as gv, param, parambokeh, dask.dataframe as dd from colorcet import cm from bokeh.models import WMTSTileSource from holoviews.operation.datashader import datashade from holoviews.streams import RangeXY, PlotSize hv.extension('bokeh') df = dd.read_parquet('./data/nyc_taxi.parq/').persist() url='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg' tiles = gv.WMTS(WMTSTileSource(url=url)) tile_options = dict(width=800,height=475,xaxis=None,yaxis=None,bgcolor='black',show_grid=False) passenger_counts = (0, df.passenger_count.max().compute()+1) class NYCTaxiExplorer(hv.streams.Stream): alpha = param.Magnitude(default=0.75, doc="Alpha value for the map opacity") colormap = param.ObjectSelector(default=cm["fire"], objects=[cm[k] for k in cm.keys() if not '_' in k]) plot = param.ObjectSelector(default="pickup", objects=["pickup","dropoff"]) passengers = param.Range(default=passenger_counts, bounds=passenger_counts) output = parambokeh.view.Plot() def make_view(self, x_range, y_range, alpha, colormap, plot, passengers, **kwargs): map_tiles = tiles(style=dict(alpha=alpha), plot=tile_options) points = hv.Points(df, kdims=[plot+'_x', plot+'_y'], vdims=['passenger_count']) if passengers != passenger_counts: points = points.select(passenger_count=passengers) taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=colormap, dynamic=False, x_range=x_range, y_range=y_range) return map_tiles * taxi_trips selector = NYCTaxiExplorer(name="NYC Taxi Trips") selector.output = hv.DynamicMap(selector.make_view, streams=[selector, RangeXY(), PlotSize()]) parambokeh.Widgets(selector, view_position='right', callback=selector.event, mode='server') ``` # Branching out The other sections in this tutorial will expand on steps in this workflow, providing more step-by-step instructions for each of the major tasks. These techniques can create much more ambitious apps with very little additional code or effort: * Adding additional linked or separate subplots of any type; see [2 - Annotating your data](./02-annotating-data.ipynb) and [4 - Exploration with containers](./04-exploration-with-containers.ipynb). * Declaring code that runs for clicking or selecting *within* the Bokeh plot; see [7 - Custom interactivity](./07-custom-interactivity.ipynb). * Using multiple sets of widgets of many different types; see [ParamNB](https://github.com/ioam/paramnb) and [ParamBokeh](https://github.com/ioam/parambokeh). * Using datasets too big for any one machine, with [Dask.Distributed](https://distributed.readthedocs.io). # Future work * Jupyter Dashboards Server not currently maintained; requires older ipywidgets version * Bokeh Server is mature and well supported, but does not currently support drag-and-drop layout like Jupyter Dashboards does * ParamBokeh and ParamNB still need some polishing and work to make them ready for widespread use * E.g. ParamNB and ParamBokeh should provide more flexible widget layouts
github_jupyter
# SKLearn Spacy Reddit Text Classification Example In this example we will be buiding a text classifier using the reddit content moderation dataset. For this, we will be using SpaCy for the word tokenization and lemmatization. The classification will be done with a Logistic Regression binary classifier. The steps in this tutorial include: 1) Train and build your NLP model 2) Build your containerized model 3) Test your model as a docker container 4) Run Seldon in your kubernetes cluster 5) Deploy your model with Seldon 6) Interact with your model through API 7) Clean your environment ### Before you start Make sure you install the following dependencies, as they are critical for this example to work: * Helm v2.13.1+ * A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM) * kubectl v1.14+ * Python 3.6+ * Python DEV requirements (we'll install them below) Let's get started! 🚀🔥 ## 1) Train and build your NLP model ``` # Let's first install any dependencies !pip install -r requirements.txt import pandas as pd from sklearn.model_selection import train_test_split import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from seldon_core.seldon_client import SeldonClient import dill import sys, os # This import may take a while as it will download the Spacy ENGLISH model from ml_utils import CleanTextTransformer, SpacyTokenTransformer df_cols = ["prev_idx", "parent_idx", "body", "removed"] TEXT_COLUMN = "body" CLEAN_COLUMN = "clean_body" TOKEN_COLUMN = "token_body" # Downloading the 50k reddit dataset of moderated comments df = pd.read_csv("https://raw.githubusercontent.com/axsauze/reddit-classification-exploration/master/data/reddit_train.csv", names=df_cols, skiprows=1, encoding="ISO-8859-1") df.head() # Let's see how many examples we have of each class df["removed"].value_counts().plot.bar() x = df["body"].values y = df["removed"].values x_train, x_test, y_train, y_test = train_test_split( x, y, stratify=y, random_state=42, test_size=0.1, shuffle=True) # Clean the text clean_text_transformer = CleanTextTransformer() x_train_clean = clean_text_transformer.transform(x_train) # Tokenize the text and get the lemmas spacy_tokenizer = SpacyTokenTransformer() x_train_tokenized = spacy_tokenizer.transform(x_train_clean) # Build tfidf vectorizer tfidf_vectorizer = TfidfVectorizer( max_features=10000, preprocessor=lambda x: x, tokenizer=lambda x: x, token_pattern=None, ngram_range=(1, 3)) tfidf_vectorizer.fit(x_train_tokenized) # Transform our tokens to tfidf vectors x_train_tfidf = tfidf_vectorizer.transform( x_train_tokenized) # Train logistic regression classifier lr = LogisticRegression(C=0.1, solver='sag') lr.fit(x_train_tfidf, y_train) # These are the models we'll deploy with open('tfidf_vectorizer.model', 'wb') as model_file: dill.dump(tfidf_vectorizer, model_file) with open('lr.model', 'wb') as model_file: dill.dump(lr, model_file) ``` ## 2) Build your containerized model ``` # This is the class we will use to deploy !cat RedditClassifier.py # test that our model works from RedditClassifier import RedditClassifier # With one sample sample = x_test[0:1] print(sample) print(RedditClassifier().predict(sample, ["feature_name"])) ``` ### Create Docker Image with the S2i utility Using the S2I command line interface we wrap our current model to seve it through the Seldon interface ``` # To create a docker image we need to create the .s2i folder configuration as below: !cat .s2i/environment # As well as a requirements.txt file with all the relevant dependencies !cat requirements.txt !s2i build . seldonio/seldon-core-s2i-python3:0.11 reddit-classifier:0.1 ``` ## 3) Test your model as a docker container ``` # Remove previously deployed containers for this model !docker rm -f reddit_predictor !docker run --name "reddit_predictor" -d --rm -p 5001:5000 reddit-classifier:0.1 ``` ### Make sure you wait for language model SpaCy will download the English language model, so you have to make sure the container finishes downloading it before it can be used. You can view this by running the logs until you see "Linking successful". ``` # Here we need to wait until we see "Linking successful", as it's downloading the Spacy English model # You can hit stop when this happens !docker logs -t -f reddit_predictor # We now test the REST endpoint expecting the same result endpoint = "0.0.0.0:5001" batch = sample payload_type = "ndarray" sc = SeldonClient(microservice_endpoint=endpoint) response = sc.microservice( data=batch, method="predict", payload_type=payload_type, names=["tfidf"]) print(response) # We now stop it to run it in docker !docker stop reddit_predictor ``` ## 4) Run Seldon in your kubernetes cluster In order to run Seldon we need to make sure that Helm is initialised and Tiller is running. For this we can run the following initialisation and waiting commands. ``` # If not running you can install it # First initialise helm !kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default !helm init !kubectl rollout status deploy/tiller-deploy -n kube-system ``` Now we can install run the Seldon Operator using the latest Helm charts ``` !helm install seldon-core-operator --name seldon-core-operator --repo https://storage.googleapis.com/seldon-charts ``` And we can make sure that it is actually running with the following command ``` !kubectl get pod | grep seldon ``` In order for us to be able to reach the model, we will need to set up an ingress. For this we will use ambassador: ``` !helm install stable/ambassador --name ambassador --set crds.keep=false ``` We can now see the ambassador service is running. In our case we can reach it through the external IP which is our localhost, but if you are using a cloud provider, make sure you have access to the ambassador endpoint. ``` !kubectl get svc ambassador ``` ## 5) Deploy your model with Seldon We can now deploy our model by using the Seldon graph definition: ``` # We'll use our seldon deployment file !cat reddit_clf.json !kubectl apply -f reddit_clf.json !kubectl get pods ``` ## 6) Interact with your model through API Now that our Seldon Deployment is live, we are able to interact with it through its API. There are two options in which we can interact with our new model. These are: a) Using CURL from the CLI (or another rest client like Postman) b) Using the Python SeldonClient #### a) Using CURL from the CLI ``` %%bash curl -X POST -H 'Content-Type: application/json' \ -d "{'data': {'names': ['text'], 'ndarray': ['Hello world this is a test']}}" \ http://127.0.0.1/seldon/default/reddit-classifier/api/v0.1/predictions ``` #### b) Using the Python SeldonClient ``` from seldon_core.seldon_client import SeldonClient import numpy as np host = "localhost" port = "80" # Make sure you use the port above batch = np.array(["Hello world this is a test"]) payload_type = "ndarray" deployment_name="reddit-classifier" transport="rest" namespace="default" sc = SeldonClient( gateway="ambassador", ambassador_endpoint=host + ":" + port, namespace=namespace) client_prediction = sc.predict( data=batch, deployment_name=deployment_name, names=["text"], payload_type=payload_type, transport="rest") print(client_prediction) ``` ## 7) Clean your environment ``` !kubectl delete -f reddit_clf.json !helm del --purge ambassador !helm del --purge seldon-core-operator ```
github_jupyter
## Image segmentation with CamVid ``` %reload_ext autoreload %autoreload 2 %matplotlib inline from fastai import * from fastai.vision import * from fastai.callbacks.hooks import * ``` The One Hundred Layer Tiramisu paper used a modified version of Camvid, with smaller images and few classes. You can get it from the CamVid directory of this repo: git clone https://github.com/alexgkendall/SegNet-Tutorial.git ``` path = Path('./data/camvid-tiramisu') path.ls() ``` ## Data ``` fnames = get_image_files(path/'val') fnames[:3] lbl_names = get_image_files(path/'valannot') lbl_names[:3] img_f = fnames[0] img = open_image(img_f) img.show(figsize=(5,5)) def get_y_fn(x): return Path(str(x.parent)+'annot')/x.name codes = array(['Sky', 'Building', 'Pole', 'Road', 'Sidewalk', 'Tree', 'Sign', 'Fence', 'Car', 'Pedestrian', 'Cyclist', 'Void']) mask = open_mask(get_y_fn(img_f)) mask.show(figsize=(5,5), alpha=1) src_size = np.array(mask.shape[1:]) src_size,mask.data ``` ## Datasets ``` bs = 8 src = (SegmentationItemList.from_folder(path) .split_by_folder(valid='val') .label_from_func(get_y_fn, classes=codes)) data = (src.transform(get_transforms(), tfm_y=True) .databunch(bs=bs) .normalize(imagenet_stats)) data.show_batch(2, figsize=(10,7)) ``` ## Model ``` name2id = {v:k for k,v in enumerate(codes)} void_code = name2id['Void'] def acc_camvid(input, target): target = target.squeeze(1) mask = target != void_code return (input.argmax(dim=1)[mask]==target[mask]).float().mean() metrics=acc_camvid wd=1e-2 learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd, bottle=True) lr_find(learn) learn.recorder.plot() lr=2e-3 learn.fit_one_cycle(10, slice(lr), pct_start=0.8) learn.save('stage-1') learn.load('stage-1'); learn.unfreeze() lrs = slice(lr/100,lr) learn.fit_one_cycle(12, lrs, pct_start=0.8) learn.save('stage-2'); ``` ## Go big ``` learn=None gc.collect() ``` You may have to restart your kernel and come back to this stage if you run out of memory, and may also need to decrease `bs`. ``` size = src_size bs=8 data = (src.transform(get_transforms(), size=size, tfm_y=True) .databunch(bs=bs) .normalize(imagenet_stats)) learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd, bottle=True).load('stage-2'); lr_find(learn) learn.recorder.plot() lr=1e-3 learn.fit_one_cycle(10, slice(lr), pct_start=0.8) learn.save('stage-1-big') learn.load('stage-1-big'); learn.unfreeze() lrs = slice(lr/1000,lr/10) learn.fit_one_cycle(10, lrs) learn.save('stage-2-big') learn.load('stage-2-big'); learn.show_results(rows=3, figsize=(9,11)) ``` ## fin ``` # start: 480x360 print(learn.summary()) ```
github_jupyter
##Laboratorio 2 #cumplir con cada uno de los 9 retos en sus grupos de trabajo #y subir el Colab a el repositorio de un seleccionado, for: 30/05/2022 12:59 ##1. Define a procedure histogram () that takes a list of whole numbers and prints a histogram on the screen. Example: procedure ([4, 9, 7]) should print the following: #**** #********* #******* ``` def histogram(lista): for i in lista: if i != lista[0]: print((i-lista[0])*'*') else: print(lista[0]*'*') histogram([4,9,7]) # profe esto fue lo que yo entendi ya que en el ejemplo lo mostraba de esa forma, # si se puede corregir lo corrijo, gracias!! ``` #2. Write a longer_long () function that takes a list of words and returns the longest. ``` def longer_long(lista): la_mas_larga = 0 index_del_mas_largo = 0 for i in list(enumerate(lista)): if len(i[1]) > la_mas_larga: la_mas_larga = len(i[1]) index_del_mas_largo = i[0] print(lista[index_del_mas_largo]) longer_long(['uno', 'dos', 'tres', 'larga', 'cuatro', 'demasiado', 'cinco']) ``` #3. Write a program that tells the user to enter a string. The program has to evaluate the string and say how many capital letters it has. ``` def filt_words(string): num_of_capital = 0 for i in string: if i.isupper() == True: num_of_capital = num_of_capital + 1 return f'This string have {num_of_capital} capital letters' filt_words(str(input('Write a string for be processed: '))) ``` #4.Build a small program that converts binary numbers to integers. Example: #![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAALsAAABTCAYAAADHh3aFAAAF/UlEQVR4Ae2aa47cOAyE+xJzgPmzx9p77HlyyPnZCwb4ggohWZKt9rMCNPim6FKZPUnm9fPz8/bHGDyBA68nPKSf0S9zcMBk9zfbY77ZTXaT3WT3V7+/+u/GAW92b3Zv9ru91X4ef1N5s3uze7OfbRP++vp6x+dsc/XOc/X5e5/zzHmX2exLZCGGrAEe8Vos/GvjnIssnbEUK+XbN//HrtuQHXLUCNsi25a4nqk6M4Vs9ddc6/OJHpg2yf56vX5vw5DoehnZpzY1Jak9tuglcmUfNjKfhx85Ei/VlHy5p+3PEHoJ1y6yZwJrQ42FX21Ijp8YUvus1TOxws4+etf8W+K559L5nGO5P9ED8y6y6+Vkoi7ZGlvSI5Y/euaSDtmUZPhyXc1P3po4NSFVp6flMcQu4X442UtDjfiUZNRBOmxkzb8lPnI+51ge8wIcTva80bF7CVEicMkX/Wp+zloTL9WUfJxheQzRA/epZM9EDZvLrenEt0gll+q551IsctfGtU71fL7t44ge2G8m++8m8i81NVLX/DMIEATTT+6pMXTNwadyS1xrrR9LcMW/SXZNtn6ei/NdjN+Fye7fjfnzo+bdXyCT3WQ32e/+lvv5xn8MuDpm3uze7N7sV3+LPf/zNnfrzr3Zvdm92VtviePenFfjgDe7N7s3+8y39r9//3nHZ2bPWi/9n9pazt7+mOlTc9G7t39v3iyMWue14rPmiD67bPatZO99UdYAFzX6mQkuveiP/QnZ8+w9OUfMttdcu5B9K4A9ZF8LWK7L9tbZ96pvzd2Kf3rO1vmt+Iz5mmSHaGxnbA7HJo4fiZ88/Ej85OEPiS9LzUFvgRXxUk6Pj1okZyLxI/GHxBdS/TmmeUu5uQd2qYYYZ6mNTh3n40cSL9nUlCT5yNwHP7IVJ2+L7CI7hIyDIB6HlmxiKrVH9mtMdfJKPmIhW0BpXPVareaEvmTXeuh8tRztnXWtz+drDF1z8CFbMY2rHvVLdsSIZ52zVZKrPtVbcc1do3eRPTdW8qkeedmmttdfyiv56BtyBKScm+3cb01cZ0Mf6VPKXeqTZyYXOdIv5y7ZGqvpzNCasSeuvdbo08leG6JG2OzPdvQr+fQcBVr96BHXD/6QpVr1aZ3q9NBcfCVZylNfTWfGiPPp7U+e9saHzLERW3NrOueE1Bz1o7fi5K2Vtyd7BrBlB5Cao3oJ5FacmlKe+nr0PBu9l/yjMZ2jVKvxHr13xtJZWjtDHyZ73rLZrg1Vy8v+bEe/kk/PUdDVH7rGQlc7x2t2rtEzSj01jl7qob5eXfPoHbLmH43lPmqHnm1mqPmJt+boiWuvNXoX2YNsfPIhLSJSlyV9cn22NY8e+FQq2OoPnUtCahwfUmPoxFQSC6n+0Jdi5FJHrtapTh51rRh59EXmupq/lKc9Nd6j187Bj9Re+GbLLrLPPvQT/fYA6xNz79XzaHxa57fiM3C6DdkDjD0AmwH6UT2Owqd1bis+C69bkd2Eb/8m5l7EgqCt81px+syQTbLPOMQ92iQ0Rp/HyGT3r/j+9RfqO790JrvJbrLf+Q33s33+R4YzYuzN7s3uzX7GN9MzPXMjz7p3b3Zvdm/2WW+T+3gbn4UD3uze7N7sZ3kbPYe/GWZxwJvdm92bfdbb5D7ezGfhwOvtP0bgIQiY7A+5aD/m+22ymwWPQcBkf8xV+0FNdnPgMQiY7I+5aj+oyW4OPAYBk/0xV+0HNdnNgccgYLI/5qr9oCa7OfAYBEz2x1y1H7Sb7N/f31W0IrYUrxY6YAR2RKCL7C0yt+I7Po+PMgJVBJpkZ2Mjcyf8yBy3bQTOgkCT7AxaInP48CPJtzQCZ0NgM9l5IJMdJCzPisBqsmdyZ/usD+y5novAJrIHwUuf58LpJz8zAqvJnh/Kmz0jYvtsCDTJ3ru5TfazXa3nyQg0yZ4LbBuBqyJgsl/15jz3MAIm+zBkLrgqAib7VW/Ocw8jYLIPQ+aCqyJgsl/15jz3MAIm+zBkLrgqAib7VW/Ocw8jYLIPQ+aCqyJgsl/15jz3MAL/A3KAUM/EuUhhAAAAAElFTkSuQmCC) ``` def aDecimal(num_bin): num_deci = 0 for position, digit_string in enumerate(num_bin[::-1]): num_deci += int(digit_string) * 2 ** position return num_deci print(aDecimal('01110')) ``` #5.Create a function count_vols (), which receives a word and counts how many letters "a" it has, how many letters "e" it has, and so on until all the vowels are completed. You can make the user who chooses the word. only in lower letters ``` def contar_vocales(cadena): dicc = {'a': 0, 'e': 0, 'i': 0, 'o': 0, 'u': 0} n = 1 for i in cadena: for x in dicc.keys(): if i.lower() == x and i.lower() in dicc and dicc[x] != 0: dicc[x] = dicc.get(x) + 1 elif i.lower() == x: dicc[x] = 1 elif i.lower() == x: dicc[x] return f'vocal ___ cantidad\na\t|\t{dicc["a"]}\ne\t|\t{dicc["e"]}\ni\t|\t{dicc["i"]}\no\t|\t{dicc["o"]}\nu\t|\t{dicc["u"]}' print(contar_vocales('hlas sdaliruw sowlasail')) ``` #6.Write a function is leap () that determines if a given year is a leap year A leap year is divisible by 4, but not by 100. It is also divisible by 400 ``` agno = int(input()) if (agno % 4 == 0 and agno % 100 != 0) or (agno % 100 == 0 and agno % 400 == 0): print("Es un año bisiesto") elif agno % 100 == 0 and agno % 400 != 0: print("No es un año bisiesto") else: print("No es un año bisiesto") ``` #7. Write a program that asks for two words and tells whether or not they rhyme. If the last three letters match, you have to say that they rhyme. If only the last two coincide, he has to say that they rhyme a bit and if not, that they do not rhyme. Example: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAATsAAACKCAYAAADVPe2jAAATbklEQVR4Ae1cS64kNw58l/ABvJljzT3mPHOKvoiXRi9n1dte1oBoBDocoEQpK5WfynhAgp8gKSnEpLPt1/76/v37S5+fP3++/JgD94B74JN64EsHXdifdECfxS+se8A9ED3gYeevWP/DzT3wiB7wsHOjP6LR/XXnrzsPOw87Dzv3wCN6wMPOjf6IRveXnb/sPOw87Dzs3AOP6IFTht1//v2vVzxH/NP26+trep3I2ZKXnadV579//PGKJ8tZ7Tti7dVrrK6/+g7eqX/E2VtrtPoZ56lwxJ0hbznsRgflVuIjb2suX2KvRquZOH+VfsTaq9dYXb/Hfazdw1djR5y9t0avr+PsFb6an1b9U4ZdazOj/pFhdzbh1fq9Zhrl4cpxq8+3un6P21i7h38CVvFb9XeFn8HR9LDDoAmJhzeuOGOhIwdxGc5xjHMu6xwDvUU2/CGhIyck/BXGca1Yrjur95oNGKTWhh+ScfhCsp91jtE42IjhvCvouj/Y2Bvs3v6BQXIufCyBQzIWOvwhYSOGMejAIOFHfuZvxUSsYlyHsRk963nOr3COPUrfNOx4UGHoYMOZDYwl11A/Y6wjLvMBC9kjOjDGWa9qcK7qVS7jlc4NyjryMh+wkBXei4lczh+xee2z9ZH96vl4z1V+jztgWr+yeX3UUJ/aXJOxav8ZzvkzeuvdQY0KR9xRctOw083x8GE94tRG7qg/i8t8qBuyR7JiaqNO5mdfS6/WR/1RqU2tttapcMS34jI/+1iPWmqj/lky2w/7WM/2r/hoDM5b5Ste2airUvOAZ372sR45aqPOiOR3IIuv8CxnpW/5sGttvjWw1K921Mt8vE6P5B5W1eDclh41GOOao3o0ID+cVzVnhaNWKy7zs4/1qKU26p8ls/2wj/Vsj4FnD8f2amS5HM961KxsXpd1zQOW+dnHerY+6ozIqs8rfGSNPWM87Bq/Y5VdFPtaelwOY7OXVTWj4lq/whHfisv87GM9aqmN+mfJbD/sYz3bY4VXZ67yFa/sbI+9PWg9jVVc7dZ6mb/q8wrPaq70vT3s9CtL7dbmW3HqVzvqZT5ep0dyD6tqcG5LjxqMcc0RnZsvdLYjP/Nx3QpHrNZlP2NajzHsB7kzMjh6h6fWWtX+FNc6el7Fw+7VqPI1N7PVN7MHXT+zud7IWhzPenV/Fc61jtA3DbsYNnh0k9UgQp5K1NF8tTkONeBj2SK65Udu4NkTOOe2dK4DfVaiQSE1H37IGRw5KrlGhWks26M6OB6NH43r7T1qBF7V0hpZDsdoPcagI0ZrqY09Ik9x9rOO+iP5Gsv2qM79n+VUeJaz2rdp2K3e1B71zyb77PX34HB1jRUc6XBYfYan1q/ursLP4O1jh12QeTbhZ69/RkONrBm8rOLGw279X/iv7q7CR3pkRcxHD7sg7Gziz15/RdNcuaaH3dphV/VzhZ/ZO9PD7szNeu21jWx+ze8n94CHXeNXTz750n02D7Un9oCHnYdd+V8nn/hi+Myf9w8EDzsPOw8798AjesDDzo3+iEb3l9rnfanN3qmHnYedh5174BE9cMqw6/3Nh9lpXcVv+U/hkbMlL9tLqw5++z3LuYNv9f5X178yx0ecvbVGq1/BV4Uj7orylsOu9VfIlOCtFxN5W3N5D70arWbj/FV6rP1u7dX7X12/d/49+OnVr7Ajzt5bo9e3sfcKr853Fn7KsHv3sCPD7uwLqdbvNdu7/FT5e7zMq/e/un6Poz346dW/AlbxW/VvhV/hjLqH6WGHQYM/isJGYdjA4YeEH3HwQ8KPOPhDwqeSY6C3LgP+kNCRExL+CuO4VizXndH1ZctsNGuGxVrAeV34VHIMdMTAvoqMfcVesD/Y2B9s4PCzBAYJDLZK4JA9PLCIQwxyWAKDbGHsZx15kIqFnWEcV+lZT3NOhXPsVfRNww4DKQ6BwYMDZTYwllxD/YyxjrjMByxk7yICY5z1qgbnql7lMl7p0agcw7Y28YjNtULneoopXsVm+St9I+flPbOOs7FP6yGmdQaNH7G1Fq+vGOxWzMh6nMs6ao/K1ruB/ApH3FXkpmGnm+fhw3rEqY3cUX8Wl/lQN2TvEhRTG3UyP/taerU+6vdk1tCIz5qXfaxHjtotH+qrzPI15kg72w/7WI99VfZoDM6o9TRf8cpGXZWaBzzzs4913RtqjEru8SynwrOcM33Lh13rcK2BpX61o17m43V6l9DDqhqc29KjBmNcc0ZH00IiN+zsYRx6SM1v+TSH12DsbL06T4bznvlcrGsM26xzDuuICR/0kJXNsaxrHrDMzz7Ws/VRZ0RWfVzhI2scGeNh1/gdq+wi2dfS4/IY23qZaFpI1FEbfkjF1Y64zDeTj9gzZLZ39rGe7a/CI6cX08OyXI1XO9tjVgdxWT77WO/VQb2erPq4wnu1z8DeHnb6laV261CtOPWrHfUyH6/Tu4QeVtXg3JYeNRjjmjM6mhYSuWGrD1hIxdTOYlr51Vqcp3pwsAcPWlfPU9lZvuZkMeqDXXGitTNbfajNshUTfsYye6QOx7T06v4qvFX3LP+mYRfDBo9uvBpEyFOJOpqvNsehBnwsWxfR8iM38OwJnHNbOteBvlVyU3MNNDhL4JqjNschHz5I+CHhn5HgcCZnJBZ7gtSc8KtPbeSy7MX0MNRAjK6vdsQhBxK5GTYSo/k9m7Gezv2dxVV4lnO2b9OwO3vTI+uffRlnrz/C0eqYFRxkw2P1OZ5Yv7q7Cr8iZx877ILssy/k7PXParg496qze9it/wv91d1V+Fl9V6370cPOA2/9i1E12N64h93aO60GWYXvfd971psednsu7lprG9f8ml/3wO8e8LBr/OqJm+R3k5gLc/EJPTA17H78+PHy0+bg27dvr95j7trcmZtrcxN/fI2nd0+93g+sl3sE5mG34wCPC/3r+1/pc4XLPqKhvMa1h9bW+xkddlfu/03D7hM+aVecIQba3//7O30CW7Gma/qPmEf0AIZdb62r97+H3Y7/zu7ql91rVGMemr0e8LDbOCh6f/OhR/gWLC5pNm/kYrOa2bCLWvG1p1922W/GZzXVh7wzfwUDe9C9HWmv3sPq+iu5WrF3fSeyNaLH0e/6Jxz0f+Arz96rfcqX3bvDrvVXyPSgW4nVi9W6LVuHHV88Lhu5WbMAG5GRPxK3Iqba+xF7q/bw7rlX1393f738FXvXdyJbA/3PfY+hx/0feG//q7BTht27hxkZdmcQisuOC9YL58uO82fNMsNL5M/EHxl7xN7e5a/iY3X9av2r4dHP/E5l/Mz0P9c66qzTww6DBl9nsLFh2MDhh4QfcfBDwo84+EPCp5JjoLfIhD8kdOSEhL/COA6xM5fNa2Z61kwcFzjbrCOXZQ9XLGzkMsb+bH3kqEQN9bONGF0jYhi7io69Z/sBBqkx8EMyHr6wMwx+SMRqfi8XmMpWDayBfuc41Wf6H++M1lhpbxp2GEixMQwebDKzgbHkGupnjHXEZT5gIXtE6qW1YjM/56oe6+KyA8PnO2RgvMeejgaLGNY5Z9SvcWGzb8TmdaFzDfggWxivpTrncj7HIeZsqfvT/TCeYYzr+TKba1Q4YnkN9sHPdeCLOPazzf2OeirR/+h5fg+y/g9ca6y0Nw073RAPH9YjTm3kjvqzuMyHuiF7JCqmNupkfvZlOi47MFw4ZHbZWKsnuRE5btSvcWpHTfaxrtjI+qM5vE5Lx3qMw3cVqXtTW/eZ4exjPXJnbayneVqL8ZbOtaKnueeBsUT/o+f5Pcj6v6rHtffQlw+71iZbA0v9ake9zMfr9EjsYVUNzs10XHZguHDI7LJ5Pdaj+fhhDDo3KHwh1V/ZmqPxXJv1XlwLY/+IjvU4Fr4zZeyHH95LtVfOYx01RvIR25NZHfaN6KgfsdHT3PPAWKL/0fP8HmT9X9Xj2nvoHnaNX5/JLoJ9mY7LDgwXDplddnaB3ISBq42cnj8wPIiHzPLYxzpyMtmLa2HsH9GxLsfCd5bUvVS27lPj98ZRL1uHfSM614qe5p4HxhL9j57n9yDr/6oe195Df3vY6VeW2q1NtuLUr3bUy3y8To/EHlbV4NxMx2UHhguHzC6b14OuTcg2YkLO+pEbeZyb2YjtSa6hcS2M/T1dMbZ1rZYdd8B31Iqb9fNeQmc7amU+XmME53jVdT3FYWdx7OvpioU9wif6Hz3P70HW/yvuB+fP5KZhF8MGjxatBhHyVKKO5qvNcagBH8sWkS0/cgPPnsA5N9P5sgPHpYfMLhtrqozm4odx9rOOGPZBBwYJPyT8IcPHturIUdmLA8a1W3rEVrVRrydxh72YrVi1v1m8x4XukWMVC1vXhg0MOVyHdcQhD9gInzP9z+8P9rRabhp2qze1R/0zyJy57D3OmNVAczKW+Rj/VP2MHvhkLis+Z/q/qrWCx48ddkHW0YTyZcfXXKyPr7vAVlyg1swGW+bTvE+yg/ej7/6T+MvOMsIp+p/7Puv/s+7mo4ddXNqRxOKyccE88I4adnHmGG78ZM1rn//i/0wPjA67bNDFe4D+P/J91PNND7ut/z+sJ+TFhV75/+f1hDvwGdf8//Qw7Hr8Xr3/Pex2/p93xoW3nl6jGFvzkprXfXgdHXat3g//2XcxNez0s9C2/yjkHnAP3KUHPOwav1R8lwv0Pj1s3ANjPeBh52F3yH8l9gs59kKap3U8edh52HnYuQce0QMedm70RzS6v5jWfTHdhVsPOw87Dzv3wCN6wMPOjf6IRr/L14f3ue4L1MPOw87Dzj3wiB7wsHOjP6LR/cW07ovpLtx62HnYedi5Bx7RA18v/5gBM2AGHsCAh90DLtlHNANm4PXysHMXmAEz8AgGPOwecc0+pBkwAx527gEzYAYewYCH3SOu2Yc0A2bAw849YAbMwCMY8LB7xDX7kGbADHjYuQfMgBl4BAMedo+4Zh/SDJgBDzv3gBkwA49gYPdh9+eff77i8Y8ZMANm4EoMTA07DDLI7CA9LIu3zwyYATNwBAPTw4435S84ZsO6GTADV2Zg12GHr7psCDKmuhIEPPOHr4VrvG0zYAbMABjYddihaAwj/eEBpTrHci7rEcN5sDnXuhkwA2agxcD0sMPA0UHEC2QY+1o61wid40ZszbdtBsyAGQAD08MOiTqI4A+ZYexr6cgNHE+vLtfhOOtmwAyYAWXgrWHXGjaZn30jemyU40ZsPZxtM2AGzAAY2DzsooAOIxTN/Owb1TkuW09xrG9pBsyAGVAG3h52PHBCz55YVOOwEfYjjmsgDljPZsy6GTADZoAZmBp2nGjdDJgBM3AnBjzs7nRb3qsZMAObGfCw20ydE82AGbgTAx52d7ot79UMmIHNDHjYbabOiWbADNyJAQ+7O92W92oGzMBmBjzsNlPnRDNgBu7EwO2GHX4H704ke69mwAycz8DUsMsGjf5S8OojZXtYvabrmwEzcH8Gbjfs7k+5T2AGzMAZDEwPu9gkf82xDgxfX4pVB0Q88jUefsQxzpjqHBc68MzfwzXethkwA/dhYNdhp0NE7YoWjQ87+8n8nKs61+Bc1iOG82BzrnUzYAbuy8Duw06p0IGiONsaqzZiMz/7WjryITkufJWNPEszYAbux8CmYRfHxGCAZB/TwDj7M300NotjX0vHHgPHw/vgPMQybt0MmIH7MvCoYVcNswq/7zV752bADGwedkGdfh1VdkW3DptWfBbHvlGd43AeXlNxxqybATNwLwZ2HXZx9BgQ/MzQUQ0Xrss61sVaXId1xGlulodYYJZmwAzcm4GpYXfvo3r3ZsAMPJkBD7sn377PbgYexICH3YMu20c1A09mwMPuybfvs5uBBzHgYfegy/ZRzcCTGfCwe/Lt++xm4EEMeNg96LJ9VDPwZAZ2H3b4HbYnk+qzmwEzcD0GpoYdBhlkdpwelsXbZwbMgBk4goHpYcebisHmHzNgBszAHRjYddjhqy4bgoyprkQBz/zha+Ear3bk8U/Lfrc+8rV+rM3YCpzPZ90MmIHfDOw67FC29xJHDF546MhTW+twnsZyjZae1ePYPerzGlm9Hl6dqarHZ7FuBszAPxmYHnZ44fil/WfJX8Os5+Nc1ns5gWms2pqvtsbP2lpPba0XOPtYRy77WAfOMsMzH+dYNwNm4BcD08MOxPVesgxjX0uP2oHxg/WA9WzGMp3XHamn8VlN9mXx7GMdeexjHTjLDM98nGPdDJiBXwy8NexaL1rmZ9+IHtvjuBG7utTZeho/W1/3nNVjH+vZWhme+bJc+8zA0xnYPOyCuNaLlvnZN6pzXLae4tVlcnzobO9dv1WP19Q9qK3nUVxtjbdtBszAbwbeHnbxwuEHL5/KwDWOc6AjjvMV69mMtXTUDpz3NGK3asKP2pDwswQGyVjo8EPO4hpv2wyYgV8MTA07k9ZnIAaUf8yAGbgmAx52O96Lh92OZLqUGdiZAQ+7HQn1sNuRTJcyAzsz4GG3M6EuZwbMwDUZ8LC75r14V2bADOzMgIfdzoS6nBkwA9dkwMPumvfiXZkBM7AzAx52OxPqcmbADFyTAQ+7a96Ld2UGzMDODHjY7Uyoy5kBM3BNBjzsrnkv3pUZMAM7M+BhtzOhLmcGzMA1GfCwu+a9eFdmwAzszMD/AdRzit3Macm8AAAAAElFTkSuQmCC) ``` def rim(uno,dos): if uno[-1] == dos[-1] and uno[-2] == dos[-2] and uno[-3] == dos[-3]: return 'Riman' elif uno[-1] == dos[-1] and uno[-2] == dos[-2]: return 'Riman un poco' else: return 'No riman' print(rim('accion', 'pretencion')) print(rim('venta', 'pretencion')) print(rim('algoritmo', 'acerrimo')) ``` #8.It has a program that asks the user for a dollar amount, an interest rate, and a number of years. It shows on the screen how much the initial capital will have been converted after those years if the interest rate entered is applied each year. Remember that a capital C dollars at an interest of x percent for n years becomes C * (1 + x / 100) raised to n (years). Test the program knowing that an amount of $ 10,000 at 4.5% annual interest becomes $ 24,117.14 after 20 years. ``` def calculo(dinero, inte, cant_anos): resultado = dinero * (1 + inte / 100)**cant_anos print(f'los intereses de sus {dinero} USD$ a {cant_anos} años son {resultado:.2f} USD$') calculo(10000, 4.5, 20) ``` ##9. Create an algorithm that generates 10 files iteratively, call this files 'tabla_del_n.txt' been n the (0 to 9) numbers with the multiplication tables from (0 to 9) following the format: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACCCAYAAACO9sDAAAAD0UlEQVR4nO2cXY7bIBRGcdXHzgKa/a8tXUD6Tp88cRFguPzE5jtHGikTJ0Di44ttPmXz3nsHsvz49ADgsyCAOAggDgKI01WAbduy2/a/UZS2PWMsPZgxvp+lHbdeLOzvv8KXfqWx5PDeu23bmr/7HFEBjl8QV4lrExXgjOOREwqS22Zts/R9Z9t6yJxrM7VtP5BSn9Eyzl6fzSRAOPiwc0sFOWszRvi645cSa6O1ou3tx9o46y98nGpzPzfZ/z+bBlqrdXMFSA2oZ5tWRrSZ+3yl30vqgNkfl467R0WrFiB31FkZ0aZzfb6gK/Q38mSw6TLwSkdt7H2zx9dj7LOvTKorwFmJKjnRCee9mrKXGkvsJCt2rR++Ljevp/qLfcZcf9Y2Z7CxGngfRkwD3Aq+EZc7B4D7gwDimO4DXIE/f9Pbfv+a29/j6763zKkA4iCAOAggTlSAq6+TQz+SeYDcTYezbWFbI3l8/d/f87VWf6MZMgV476embp4v/70jwh20Qn8jqboMTC1atK5Jx9bEj/+neHxtU4/A2f3NIFkBYgs0xyN7f2yZJmJ9Hd8XPldCzc45hkKt4dBVZBhyI8hSCcKVvRpqd0breckqO9+5EwEsQYSWacCyLGzZGS2p55V2vnOD7wPUZPt2rNkA58p3znH6yk1jvfq7MmYBYvNnGGwsbef43vD5HMcz8Vln/zP7m8HpOUBsGsgdMZa5v6WN2Ufg3Y/4kKIKcNeVLjiHtQBxEEAcQqHi3DYRNJvZCaRZMAWIgwDiIIA4JILEuX0iyLqCaIVEUAGzEkGf+gkbEkHuGomg2Tt/hdW/kGUSQTWQCHqzRCKoFhJBb26fCLJAIujN0FvBLYmgkVWgV9sryGAWoOZnUkrasfykm/WnXqw8X36pNJBzN08EfeoScCVIBInDWoA4CCAOAoiDAOIggDgIIA6BEHGWCYTM6Ms5AiFFzA6E7H+zKheBEEcgZBUIhBAI6c8nAiFW4SyssvOdWyQQYhljCIGQAcwIhFirTQ9WkGGJQMisS0ECIYfncq+vgUDIZyEQIg5rAeIggDj8RIw4VABxEEAcBBAHAcRBAHEQQBwEEAcBxEEAcRBAnKQA1qwc3AsqgDjJQAhrRBpQAcRBAHEQQBwEEAcBxEEAcRBAHAQQBwHEQQBxEEAcBBAHAcRBAHEIhIhDBRCHQIg4VABxEEAcBBAHAcRBAHEQQBwEEAcBxEEAcRBAHAQQBwHEQQBxEEAcAiHiUAHEIRAiDhVAHAQQBwHEQQBxEEAcBBAHAcRBAHEQQBwEEAcBxEEAcf4BB2cTktECnSoAAAAASUVORK5CYII=) ``` def tablas(): for i in range(10): archivo = open(f'/content/drive/MyDrive/tabla_del_{i}.txt', 'w') archivo.write(f'Tabla del {i}') for x in range(10): archivo.write(f'\n{i} x {x} = {i*x}') tablas() ``` ##On the same file system, make an algorithm that asks the user for a number n and a number m, so that it shows the user of the file table_del_n.txt, the multiplication 'm'. ``` def tabla_request(n,m): archivo = open(f'/content/drive/MyDrive/tabla_del_{n}.txt', 'r') linea = archivo.readlines() print(linea[m+1]) tabla_request(6,6) ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Image Classification using tf.keras <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> In this Colab you will classify images of flowers. You will build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`. # Importing Packages Let's start by importing required packages. **os** package is used to read files and directory structure, **numpy** is used to convert python list to numpy array and to perform required matrix operations and **matplotlib.pyplot** is used to plot the graph and display images in our training and validation data. ``` import os import numpy as np import glob import shutil import matplotlib.pyplot as plt ``` ### TODO: Import TensorFlow and Keras Layers In the cell below, import Tensorflow and the Keras layers and models you will use to build your CNN. Also, import the `ImageDataGenerator` from Keras so that you can perform image augmentation. ``` import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator ``` # Data Loading In order to build our image classifier, we can begin by downloading the flowers dataset. We first need to download the archive version of the dataset and after the download we are storing it to "/tmp/" directory. After downloading the dataset, we need to extract its contents. ``` _URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" zip_file = tf.keras.utils.get_file(origin=_URL, fname="flower_photos.tgz", extract=True) base_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos') ``` The dataset we downloaded contains images of 5 types of flowers: 1. Rose 2. Daisy 3. Dandelion 4. Sunflowers 5. Tulips So, let's create the labels for these 5 classes: ``` classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips'] ``` Also, the dataset we have downloaded has following directory structure. n <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>flower_photos</b> |__ <b>daisy</b> |__ <b>dandelion</b> |__ <b>roses</b> |__ <b>sunflowers</b> |__ <b>tulips</b> </pre> As you can see there are no folders containing training and validation data. Therefore, we will have to create our own training and validation set. Let's write some code that will do this. The code below creates a `train` and a `val` folder each containing 5 folders (one for each type of flower). It then moves the images from the original folders to these new folders such that 80% of the images go to the training set and 20% of the images go into the validation set. In the end our directory will have the following structure: <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>flower_photos</b> |__ <b>daisy</b> |__ <b>dandelion</b> |__ <b>roses</b> |__ <b>sunflowers</b> |__ <b>tulips</b> |__ <b>train</b> |______ <b>daisy</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>dandelion</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>roses</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>sunflowers</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>tulips</b>: [1.jpg, 2.jpg, 3.jpg ....] |__ <b>val</b> |______ <b>daisy</b>: [507.jpg, 508.jpg, 509.jpg ....] |______ <b>dandelion</b>: [719.jpg, 720.jpg, 721.jpg ....] |______ <b>roses</b>: [514.jpg, 515.jpg, 516.jpg ....] |______ <b>sunflowers</b>: [560.jpg, 561.jpg, 562.jpg .....] |______ <b>tulips</b>: [640.jpg, 641.jpg, 642.jpg ....] </pre> Since we don't delete the original folders, they will still be in our `flower_photos` directory, but they will be empty. The code below also prints the total number of flower images we have for each type of flower. ``` for cl in classes: img_path = os.path.join(base_dir, cl) images = glob.glob(img_path + '/*.jpg') print("{}: {} Images".format(cl, len(images))) num_train = int(round(len(images)*0.8)) train, val = images[:num_train], images[num_train:] for t in train: if not os.path.exists(os.path.join(base_dir, 'train', cl)): os.makedirs(os.path.join(base_dir, 'train', cl)) shutil.move(t, os.path.join(base_dir, 'train', cl)) for v in val: if not os.path.exists(os.path.join(base_dir, 'val', cl)): os.makedirs(os.path.join(base_dir, 'val', cl)) shutil.move(v, os.path.join(base_dir, 'val', cl)) round(len(images)*0.8) ``` For convenience, let us set up the path for the training and validation sets ``` train_dir = os.path.join(base_dir, 'train') val_dir = os.path.join(base_dir, 'val') ``` # Data Augmentation Overfitting generally occurs when we have small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples via a number of random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This helps expose the model to more aspects of the data and generalize better. In **tf.keras** we can implement this using the same **ImageDataGenerator** class we used before. We can simply pass different transformations we would want to our dataset as a form of arguments and it will take care of applying it to the dataset during our training process. ## Experiment with Various Image Transformations In this section you will get some practice doing some basic image transformations. Before we begin making transformations let's define our `batch_size` and our image size. Remember that the input to our CNN are images of the same size. We therefore have to resize the images in our dataset to the same size. ### TODO: Set Batch and Image Size In the cell below, create a `batch_size` of 100 images and set a value to `IMG_SHAPE` such that our training data consists of images with width of 150 pixels and height of 150 pixels. ``` batch_size = 100 IMG_SHAPE = 150 ``` ### TODO: Apply Random Horizontal Flip In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random horizontal flip. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ``` image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True) train_data_gen = image_gen.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE) ) ``` Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action. ``` # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TODO: Apply Random Rotation In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random 45 degree rotation. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ``` image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45) train_data_gen = image_gen.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE)) ``` Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TODO: Apply Random Zoom In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random zoom of up to 50%. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ``` image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5) train_data_gen = image_gen.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE) ) ``` Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TODO: Put It All Together In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and that applies: - random 45 degree rotation - random zoom of up to 50% - random horizontal flip - width shift of 0.15 - height shift of 0.15 Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, to shuffle the images, and to set the class mode to `sparse`. ``` image_gen_train = ImageDataGenerator( rescale=1./255, rotation_range=45, width_shift_range=.15, height_shift_range=.15, horizontal_flip=True, zoom_range=0.5 ) train_data_gen = image_gen_train.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), class_mode='sparse' ) ``` Let's visualize how a single image would look like 5 different times, when we pass these augmentations randomly to our dataset. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TODO: Create a Data Generator for the Validation Set Generally, we only apply data augmentation to our training examples. So, in the cell below, use ImageDataGenerator to create a transformation that only rescales the images by 255. Then use the `.flow_from_directory` method to apply the above transformation to the images in our validation set. Make sure you indicate the batch size, the path to the directory of the validation images, the target size for the images, and to set the class mode to `sparse`. Remember that it is not necessary to shuffle the images in the validation set. ``` image_gen_val = ImageDataGenerator(rescale=1./255) val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size, directory=val_dir, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='sparse') ``` # TODO: Create the CNN In the cell below, create a convolutional neural network that consists of 3 convolution blocks. Each convolutional block contains a `Conv2D` layer followed by a max pool layer. The first convolutional block should have 16 filters, the second one should have 32 filters, and the third one should have 64 filters. All convolutional filters should be 3 x 3. All max pool layers should have a `pool_size` of `(2, 2)`. After the 3 convolutional blocks you should have a flatten layer followed by a fully connected layer with 512 units. The CNN should output class probabilities based on 5 classes which is done by the **softmax** activation function. All other layers should use a **relu** activation function. You should also add Dropout layers with a probability of 20%, where appropriate. ``` model = Sequential() model.add(Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_SHAPE,IMG_SHAPE, 3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, 3, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, 3, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(5)) ``` # TODO: Compile the Model In the cell below, compile your model using the ADAM optimizer, the sparse cross entropy function as a loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so make sure you also pass the metrics argument. ``` model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) ``` # TODO: Train the Model In the cell below, train your model using the **fit_generator** function instead of the usual **fit** function. We have to use the `fit_generator` function because we are using the **ImageDataGenerator** class to generate batches of training and validation data for our model. Train the model for 80 epochs and make sure you use the proper parameters in the `fit_generator` function. ``` epochs = 80 history = model.fit_generator( train_data_gen, steps_per_epoch=int(np.ceil(train_data_gen.n / float(batch_size))), epochs=epochs, validation_data=val_data_gen, validation_steps=int(np.ceil(val_data_gen.n / float(batch_size))) ) ``` # TODO: Plot Training and Validation Graphs. In the cell below, plot the training and validation accuracy/loss graphs. ``` acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() ```
github_jupyter
# Equilibrium analysis Chemical reaction Number (code) of assignment: 2N4 Description of activity: Report on behalf of: name : Pieter van Halem student number : 4597591 name : Dennis Dane student number :4592239 Data of student taking the role of contact person: name : email address : ``` import numpy as np import matplotlib.pyplot as plt ``` # Function definitons: In the following block the function that are used for the numerical analysis are definend. These are functions for calculation the various time steps, functions for plotting tables ands functions for plotting graphs. ``` def f(t,y,a,b,i): if (t>round(i,2)): a = 0 du = a-(b+1)*y[0,0]+y[0,0]**2*y[0,1] dv = b*y[0,0]-y[0,0]**2*y[0,1] return np.matrix([du,dv]) def FE(t,y,h,a,b,i): f1 = f(t,y,a,b,i) pred = y + f1*h corr = y + (h/2)*(f(t,pred,a,b,i) + f1) return corr def Integrate(y0, t0, tend, N,a,b,i): h = (tend-t0)/N t_arr = np.zeros(N+1) t_arr[0] = t0 w_arr = np.zeros((2,N+1)) w_arr[:,0] = y0 t = t0 y = y0 for k in range(1,N+1): y = FE(t,y,h,a,b,i) w_arr[:,k] = y t = round(t + h,4) t_arr[k] = t return t_arr, w_arr def PrintTable(t_arr, w_arr): print ("%6s %6s: %17s %17s" % ("index", "t", "u(t)", "v(t)")) for k in range(0,N+1): print ("{:6d} {:6.2f}: {:17.7e} {:17.7e}".format(k,t_arr[k], w_arr[0,k],w_arr[1,k])) def PlotGraphs(t_arr, w_arr): plt.figure("Initial value problem") plt.plot(t_arr,w_arr[0,:],'r',t_arr,w_arr[1,:],'--') plt.legend(("$u(t)$", "$v(t)$"),loc="best", shadow=True) plt.xlabel("$t$") plt.ylabel("$u$ and $v$") plt.title("Graphs of $u(t)$ and $v(t)$") plt.show() def PlotGraphs2(t_arr, w_arr): plt.figure("Initial value problem") plt.plot(w_arr[0,:],w_arr[1,:],'g') plt.legend(("$u,v$",""),loc="best", shadow=True) plt.xlabel("$u(t)$") plt.ylabel("$v(t)$") plt.title("$Phase$ $plane$ $(u,v)$") plt.axis("scaled") plt.show() def PlotGraphs3(t_arr, w_arr,t_arr2, w_arr2): plt.figure("Initial value problem") plt.plot(t_arr,w_arr[0,:],'r',t_arr,w_arr[1,:],'b--') plt.plot(t_arr2,w_arr2[0,:],'r',t_arr2,w_arr2[1,:],'b--') #plt.plot([t_array[80],t_array2[0]],[w_array[0,80],w_array2[0,0]],'r') plt.legend(("$u(t)$", "$v(t)$"),loc="best", shadow=True) plt.xlabel("$t$") plt.ylabel("$u$ and $v$") plt.title("Graphs of $u(t)$ and $v(t)$") plt.show() def PlotGraphs4(t_arr, w_arr,t_arr2, w_arr2): #plt.figure("Initial value problem") plt.plot(w_arr[0,:],w_arr[1,:],'g') plt.plot(w_arr2[0,:],w_arr2[1,:],'g') #plt.legend(("$u,v$",""),loc="best", shadow=True) plt.xlabel("$u(t)$") plt.ylabel("$v(t)$") plt.title("$Phase$ $plane$ $(u,v)$") plt.axis("scaled") plt.show() ``` # Assignment 2.9 Integrate the system with Modified Euler and time step h = 0.15. Make a table of u and v on the time interval 0 ≤ t ≤ 1.5. The table needs to give u and v in an 8-digit floating-point format. ``` y0 = np.matrix([0.0,0.0]) t0 = 0.0 tend = 1.5 N = 10 t_array, w_array = Integrate(y0, t0, tend, N,2,4.5,11) print("The integrated system using Modified Euler with time step h = 0.15 is shown in the following table: \n") PrintTable(t_array, w_array) ``` # Assignmet 2.10 Integrate the system with Modified Euler and time step h = 0.05 for the interval [0,20]. Make plots of u and v as functions of t (put them in one figure). Also make a plot of u and v in the phase plane (u,v-plane). Do your plots correspond to your results of part 2? ``` y0 = np.matrix([0.0,0.0]) t0 = 0.0 tend = 20 N = 400 t_array, w_array = Integrate(y0, t0, tend, N,2,4.5, 25) print("In this assignment the system has to be integrated using Modified Euler with a time step of h = 0.05 on \na interval of [0,20].") print("The first graph is u(t) and v(t) against time (t).") PlotGraphs(t_array, w_array) print("The second graph shows the u-v plane") PlotGraphs2(t_array, w_array) print("\n The system is stable and a spiral. Therefor is consistent with the conclusion from assignment 1.3.") ``` # Assignment 2.11 Using the formula derived in question 7, estimate the accuracy of u and v computed with h = 0.05 at t = 8. Hence, integrate once more with time step h = 0.1. The error can be estimated with Richardsons methode. were we use α = 1/3 found in assignment 7. here the estimated error is: E ≈ α( w(h) - w(2h) ). ``` y0 = np.matrix([0.0,0.0]) t0 = 0.0 tend = 20 N = 400 t_array, w_array = Integrate(y0, t0, tend, N, 2, 4.5,25) y0 = np.matrix([0.0,0.0]) t0 = 0.0 tend = 20 N = 200 t_array2, w_array2 = Integrate(y0, t0, tend, N, 2, 4.5, 25) print("The value for u and v at t = 8 with h = 0.05 is:",t_array[160], w_array[:,160]) print("The value for u and v at t = 8 with h = 0.10 is:",t_array2[80], w_array2[:,80]) E1 = (w_array[0,160]-w_array2[0,80])*(1/3) E2 = (w_array[1,160]-w_array2[1,80])*(1/3) print("The estimated acuracy for u is:", E1) print("The estimated acuracy for v is:", E2) ``` # Assignment 2.12 Apply Modified Euler with h = 0.05. For 0 ≤ t ≤ t1 it holds that a = 2. At t = t1 the supply of materials A fails, and therefore a = 0 for t > t1. Take t1 = 4.0. Make plot of u and v as a function of t on the intervals [0, 10] in one figure and a plot of u and v in the uv-plane. Evaluate your results by comparing them to your findings form part 8. ``` y0 = np.matrix([0.0,0.0]) t0 = 0.0 tend = 10.0 N = 200 t_array, w_array = Integrate(y0, t0, tend, N, 2, 4.5, 4) PlotGraphs(t_array, w_array) ``` The first plot shows that u and v indeed converges to a certain value, as predicted in assignment 8. The phase plane shows that uv goes to a point on the u-axis. This was as well predicted in assignment 8. The first plot shows a corner in the u and v graph (a discontinuity in the first derivative). This does not contradict the theory because the system of differential equations changes the first derivatives does not have to be continuous. The line itself is continuous because it is given in the initial values. # Assignment 2.13 Take t1 = 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0. Make a table of the value of v-tilda and t-tilda. Evaluate your rsults. ``` for i in np.arange(3.0,6.5,0.50): t0 = 0.0 tend = 10.0 N = 200 t_array2, w_array2 = Integrate(y0, t0, tend, N, 2.0, 4.5,i) indices = np.nonzero(w_array2[0,:] >= 0.01) index = np.max(indices[0]) t_tilde = t_array2[index+1] v_tilde = w_array2[1,N] if i == 3: print("%6s %17s: %17s " % ("t1", "t_tilde", "v_tilde")) print("{:6.2f} {:17.2f} {:17.7e}".format(i,t_tilde,v_tilde)) ``` lkksdndglnsldkgknlsdagn De waarde moeten zijn: t1 = 6.0 v-tilde = 3.34762, t-tilde = 7.35
github_jupyter
# Creating a simple PDE model In the [previous notebook](./1-an-ode-model.ipynb) we show how to create, discretise and solve an ODE model in pybamm. In this notebook we show how to create and solve a PDE problem, which will require meshing of the spatial domain. As an example, we consider the problem of linear diffusion on a unit sphere, \begin{equation*} \frac{\partial c}{\partial t} = \nabla \cdot (\nabla c), \end{equation*} with the following boundary and initial conditions: \begin{equation*} \left.\frac{\partial c}{\partial r}\right\vert_{r=0} = 0, \quad \left.\frac{\partial c}{\partial r}\right\vert_{r=1} = 2, \quad \left.c\right\vert_{t=0} = 1. \end{equation*} As before, we begin by importing the pybamm library into this notebook, along with any other packages we require: ``` %pip install pybamm -q # install PyBaMM if it is not installed import pybamm import numpy as np import matplotlib.pyplot as plt ``` ## Setting up the model As in the previous example, we start with a `pybamm.BaseModel` object and define our model variables. Since we are now solving a PDE we need to tell pybamm the domain each variable belongs to so that it can be discretised in space in the correct way. This is done by passing the keyword argument `domain`, and in this example we choose the domain "negative particle". ``` model = pybamm.BaseModel() c = pybamm.Variable("Concentration", domain="negative particle") ``` Note that we have given our variable the (useful) name "Concentration", but the symbol representing this variable is simply `c`. We then state out governing equations. Sometime it is useful to define intermediate quantities in order to express the governing equations more easily. In this example we define the flux, then define the rhs to be minus the divergence of the flux. The equation is then added to the dictionary `model.rhs` ``` N = -pybamm.grad(c) # define the flux dcdt = -pybamm.div(N) # define the rhs equation model.rhs = {c: dcdt} # add the equation to rhs dictionary ``` Unlike ODE models, PDE models require both initial and boundary conditions. Similar to initial conditions, boundary conditions can be added using the dictionary `model.boundary_conditions`. Boundary conditions for each variable are provided as a dictionary of the form `{side: (value, type)`, where, in 1D, side can be "left" or "right", value is the value of the boundary conditions, and type is the type of boundary condition (at present, this can be "Dirichlet" or "Neumann"). ``` # initial conditions model.initial_conditions = {c: pybamm.Scalar(1)} # boundary conditions lbc = pybamm.Scalar(0) rbc = pybamm.Scalar(2) model.boundary_conditions = {c: {"left": (lbc, "Neumann"), "right": (rbc, "Neumann")}} ``` Note that in our example the boundary conditions take constant values, but the value can be any valid pybamm expression. Finally, we add any variables of interest to the dictionary `model.variables` ``` model.variables = {"Concentration": c, "Flux": N} ``` ## Using the model Now the model is now completely defined all that remains is to discretise and solve. Since this model is a PDE we need to define the geometry on which it will be solved, and choose how to mesh the geometry and discretise in space. ### Defining a geometry and mesh We can define spatial variables in a similar way to how we defined model variables, providing a domain and a coordinate system. The geometry on which we wish to solve the model is defined using a nested dictionary. The first key is the domain name (here "negative particle") and the entry is a dictionary giving the limits of the domain. ``` # define geometry r = pybamm.SpatialVariable( "r", domain=["negative particle"], coord_sys="spherical polar" ) geometry = {"negative particle": {r: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(1)}}} ``` We then create a mesh using the `pybamm.MeshGenerator` class. As inputs this class takes the type of mesh and any parameters required by the mesh. In this case we choose a uniform one-dimensional mesh which doesn't require any parameters. ``` # mesh and discretise submesh_types = {"negative particle": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh)} var_pts = {r: 20} mesh = pybamm.Mesh(geometry, submesh_types, var_pts) ``` Example of meshes that do require parameters include the `pybamm.Exponential1DSubMesh` which clusters points close to one or both boundaries using an exponential rule. It takes a parameter which sets how closely the points are clustered together, and also lets the users select the side on which more points should be clustered. For example, to create a mesh with more nodes clustered to the right (i.e. the surface in the particle problem), using a stretch factor of 2, we pass an instance of the exponential submesh class and a dictionary of parameters into the `MeshGenerator` class as follows: `pybamm.MeshGenerator(pybamm.Exponential1DSubMesh, submesh_params={"side": "right", "stretch": 2})` After defining a mesh we choose a spatial method. Here we choose the Finite Volume Method. We then set up a discretisation by passing the mesh and spatial methods to the class `pybamm.Discretisation`. The model is then processed, turning the variables into (slices of) a statevector, spatial variables into vector and spatial operators into matrix-vector multiplications. ``` spatial_methods = {"negative particle": pybamm.FiniteVolume()} disc = pybamm.Discretisation(mesh, spatial_methods) disc.process_model(model); ``` Now that the model has been discretised we are ready to solve. ### Solving the model As before, we choose a solver and times at which we want the solution returned. We then solve, extract the variables we are interested in, and plot the result. ``` # solve solver = pybamm.ScipySolver() t = np.linspace(0, 1, 100) solution = solver.solve(model, t) # post-process, so that the solution can be called at any time t or space r # (using interpolation) c = solution["Concentration"] # plot fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 4)) ax1.plot(solution.t, c(solution.t, r=1)) ax1.set_xlabel("t") ax1.set_ylabel("Surface concentration") r = np.linspace(0, 1, 100) ax2.plot(r, c(t=0.5, r=r)) ax2.set_xlabel("r") ax2.set_ylabel("Concentration at t=0.5") plt.tight_layout() plt.show() ``` In the [next notebook](./3-negative-particle-problem.ipynb) we build on the example here to to solve the problem of diffusion in the negative electrode particle within the single particle model. In doing so we will also cover how to include parameters in a model. ## References The relevant papers for this notebook are: ``` pybamm.print_citations() ```
github_jupyter
# Writing Functions This lecture discusses the mechanics of writing functions and how to encapsulate scripts as functions. ``` # Example: We're going to use Pandas dataframes to create a gradebook for this course import pandas as pd # Student Rosters: students = ['Hao', 'Jennifer', 'Alex'] # Gradebook columns: columns = ['raw_grade', 'did_extra_credit', 'final_grade'] # Let's create two dataframes, one for each class section gradebook = pd.DataFrame(index=students, columns=columns) print("Gradebook:") print(gradebook) # Now let's add some data # (in real life we might load this from a CSV or other file) gradebook.loc['Hao']['raw_grade'] = 80 gradebook.loc['Hao']['did_extra_credit'] = True # python supports boolean (True/False) values gradebook.loc['Jennifer']['raw_grade'] = 98 gradebook.loc['Jennifer']['did_extra_credit'] = False gradebook.loc['Alex']['raw_grade'] = 85 gradebook.loc['Alex']['did_extra_credit'] = True print("Gradebook:") print(gradebook) ``` ## Copying and pasting code can introduce bugs: You might forget to change a variable name. If you later make a change (like making extra credit worth 10 points instead of 5), you need to remember to change it in multiple places. If we put the code in a function, we can avoid these problems! ``` # Let's put our extra credit code in a function! def add_final_grades(student, grade): print("in add_final_grades") gradebook.loc[student, 'final_grade'] = grade add_final_grades('Jennifer', 99) print(gradebook) ``` ## Why write functions? 1. Easily reuse code (without introducing bugs) 2. Easy testing of components <ul> <li>Later in the course we will learn about writing unit tests. You will create a set of input values for a function representing potential scenarios, and will test that the function is generating the expected output. </ul> 3. Better readability <ul> <li>Functions encapsulate your code into components with meaningful names. You can get a high-level view of what the code is doing, then dive into the function definitions if you need more detail. </ul> ## A function should have one task Functions should usually be pretty short. It's good to think about functions as trying to do one single thing. ## Mechanics of Writing a Function - Function definition line - How python knows that this is a function - Function body - Code that does the computation of the function - Arguments - the values passed to a function - Formal parameters - the values accepted by the function (the arguments become the formal parameters once they are inside the function) - Return values - value returned to the caller If you are familiar with other languages like Java, you may have needed to declare the types of the parameters and return value. This is not necessary in Python. ``` def example_addition_function(num_1, num_2): """ This function adds two numbers. example_addition_function is the function name Parameters: num_1: This is the first formal parameter num_2: This is the second formal parameter Returns: sum of num_1 and num_2 """ added_value = num_1 + num_2 return added_value arg_1 = 5 arg_2 = 10 result_value = example_addition_function(arg_1, arg_2) # arg_1 and arg_2 are the arguments to the function ``` # Variable names and scope In Python, variables have a scope (a context in which they are valid). Variables created in a function cannot be referenced outside of a function ``` # Let's put our extra credit code in a function! section = "Section 1" def add_final_grades(student, grade): print("in add_final_grades %s" % section) gradebook.loc[student, 'final_grade'] = grade add_final_grades('Jennifer', 99) print(gradebook) # Let's put our extra credit code in a function! section = "Section 1" def add_final_grades(student, grade): print("in add_final_grades %s" % section) gradebook.loc[student, 'final_grade'] = grade if False: section = "new" add_final_grades('Jennifer', 99) print(gradebook) def print_message(message): message_to_print = "Here is your message: " + message print(message_to_print) my_message = "Hello, class!" print_message(my_message) #print(message_to_print) # this will cause an error. This variable only exists within the function. ``` If you modify an object (like a list or a dataframe) inside of a function, the modifications will affect its value outside of the function ``` def add_name_to_list(name_list, new_name): name_list.append(new_name) teachers = ["Bernease", "Dave", "Joe"] print(teachers) add_name_to_list(teachers, "Colin") print(teachers) ``` ## Exercise: Write a function to determine if a number is prime Below is some code that checks if a number is prime. The code has a bug in it! ``` # Determine if num is prime # This code has a bug. What is it? # Also, the efficiency of the code can be improved. How? num = 3 is_prime = True for integer in range(1, num): if num % integer == 0: # The "==" operator checks for equality and returns True or False. # Note the difference between "==" and "=", which assigns a value to a variable. # # The "%" operator calculates the remainder of a division operation # if the remainder is zero, integer is a divisor of num, so num is not prime print("Not prime!") is_prime = False if is_prime: print("Is prime!") ``` Once you've identified the bug in the above code, take the code and turn it into a function that takes a number as input and returns True if the number is prime and False if it is not. See if you can find any ways to make the code more efficient.
github_jupyter
# Identificando y modelando relaciones entre pares de variables ![correlation](https://static.thenounproject.com/png/1569699-200.png) > En la sesión anterior introdujimos el lenguaje de programación Python, y la librería de análisis de datos para Python **Pandas**. Con Pandas, aprendimos a: - Cargar datos desde archivos. - Manipular los datos de manera básica: - Obtener ciertos registros. - Obtener ciertas columnas. - Obtener registros basados en condiciones. - Calcular estadísticas agregadas de cada variable (media, mediana, desviación estándar). > Con las estadísticas de cada variable, podemos darnos una idea del comportamiento de cada variable en particular; sin embargo, no podemos inferir relaciones de una variable con otra. En esta sesión, revisaremos métodos prácticos para identificar relaciones entre variables, y además estudiaremos un método para modelar dichas relaciones de manera lineal. ___ # 1. Coeficiente de correlación El coeficiente de correlación mide qué tan fuerte es la relación entre dos variables. Dejaremos de lado la forma de calcularlo y nos concentraremos más en sus propiedades y la interpretación que podemos darle: - La correlación entre dos variables cualesquiera $x,y$ satisface: $$-1\leq\rho_{xy}\leq 1.$$ - La correlación de una variable con ella misma es igual a uno (1): $$\rho_{xx}=1.$$ - La correlación es simétrica: $$\rho_{xy} = \rho_{yx}.$$ ``` # Importar numpy import numpy as np # Importar pyplot import matplotlib.pyplot as plt ``` - Una correlación positiva $\rho_{xy}>0$ indica que los movimientos relativos entre las variables $x,y$ van en la misma dirección. ``` # Variables con correlación positiva x = np.random.rand(100) y = x + 0.5 * np.random.rand(100) plt.scatter(x, y) plt.xlabel('x') plt.ylabel('y') # Correlación np.corrcoef(x, y) ``` - Una correlación negativa $\rho_{xy}<0$ indica que los movimientos relativos entre las variables $x,y$ van en direcciones opuestas. ``` # Variables con correlación negativa x = np.random.rand(100) y = 1 - x + 0.5 * np.random.rand(100) plt.scatter(x, y) plt.xlabel('x') plt.ylabel('y') # Correlación np.corrcoef(x, y) ``` - Una correlación nula $\rho_{xy}=0$ indica que no hay una relación aparente entre las variables $x, y$. ``` # Variables con correlación nula x = np.random.rand(100) y = 0.5 * np.random.rand(100) plt.scatter(x, y) plt.xlabel('x') plt.ylabel('y') # Correlación np.corrcoef(x, y) ``` Ya que entendimos el coeficiente de correlación, utilicémoslo para descubrir la relación que hay entre el precio de las casas y su tamaño: ``` # Importar pandas import pandas as pd # Cargamos datos de las casas data = pd.read_csv("house_pricing.csv") # Gráfico precio vs. tamaño plt.scatter(data['size'], data['price']) plt.xlabel('Tamaño ($ft^2$)') plt.ylabel('Precio (USD)') # Correlación np.corrcoef(data['size'], data['price']) ``` # 2. Regresión lineal con una variable Una vez hemos identificado la relación entre dos variables, ¿Cómo podemos modelar esta relación? La respuesta es: **regresión lineal**. En términos simples, el objetivo de la regresión lineal es encontrar un modelo de una recta: $$ y = m x + b $$ que **"mejor" (en el sentido de mínimos cuadrados) se ajuste a los puntos**. En otras palabras, el objetivo es encontrar el modelo de una recta que "aprenda" los datos. Matemáticamente, si los puntos son $$ \{(x_1, y_1), (x_2, y_2), \dots, (x_n, y_n)\}, $$ lo que queremos es estimar los valores de los parámetros $m$ y $b$ que minimizan la siguiente función: $$ J(m, b) = \sum_{i=1}^{n}(y_i - (m x_i + b))^2 $$ ### ¿Cómo hacemos esto en python? Mediante la clase `LinearRegression` de la librería `sklearn`: ``` # Importar sklearn.linear_model.LinearRegression from sklearn.linear_model import LinearRegression # Ajustar la mejor recta a los datos de tamaños y precios model = LinearRegression().fit(data[['size']], data['price']) # Obtener parámetros ajustados m = model.coef_ b = model.intercept_ m, b # Gráfico de datos y de modelo ajustado plt.plot(data['size'], model.predict(data[['size']])) plt.scatter(data['size'], data['price']) plt.xlabel('Tamaño ($ft^2$)') plt.ylabel('Precio (USD)') ``` ### ¿Y esto para qué nos sirve? Sistema automático de avalúos Supongamos que un amigo nuestro tiene una casa de $2000$ pies cuadrados en Portland, y desea saber a qué precio la podría vender. Se acaba de enterar que justo acabamos de generar un sistema automático que determina el precio de las casas de Portland usando el tamaño de las mismas. ¿A qué precio podría vender nuestro amigo la casa? ``` # Determinar el precio model.predict([[2000]]) # Gráfico plt.plot(data['size'], model.predict(data[['size']])) plt.plot(2000, model.predict([[2000]]), '*r', ms=10) plt.scatter(data['size'], data['price']) plt.xlabel('Tamaño ($ft^2$)') plt.ylabel('Precio (USD)') ``` <script> $(document).ready(function(){ $('div.prompt').hide(); $('div.back-to-top').hide(); $('nav#menubar').hide(); $('.breadcrumb').hide(); $('.hidden-print').hide(); }); </script> <footer id="attribution" style="float:right; color:#808080; background:#fff;"> Created with Jupyter by Esteban Jiménez Rodríguez. </footer>
github_jupyter
# 线性回归 :label:`sec_linear_regression` *回归*(regression)是能为一个或多个自变量与因变量之间关系建模的一类方法。 在自然科学和社会科学领域,回归经常用来表示输入和输出之间的关系。 在机器学习领域中的大多数任务通常都与*预测*(prediction)有关。 当我们想预测一个数值时,就会涉及到回归问题。 常见的例子包括:预测价格(房屋、股票等)、预测住院时间(针对住院病人等)、 预测需求(零售销量等)。 但不是所有的*预测*都是回归问题。 在后面的章节中,我们将介绍分类问题。分类问题的目标是预测数据属于一组类别中的哪一个。 ## 线性回归的基本元素 *线性回归*(linear regression)可以追溯到19世纪初, 它在回归的各种标准工具中最简单而且最流行。 线性回归基于几个简单的假设: 首先,假设自变量$\mathbf{x}$和因变量$y$之间的关系是线性的, 即$y$可以表示为$\mathbf{x}$中元素的加权和,这里通常允许包含观测值的一些噪声; 其次,我们假设任何噪声都比较正常,如噪声遵循正态分布。 为了解释*线性回归*,我们举一个实际的例子: 我们希望根据房屋的面积(平方英尺)和房龄(年)来估算房屋价格(美元)。 为了开发一个能预测房价的模型,我们需要收集一个真实的数据集。 这个数据集包括了房屋的销售价格、面积和房龄。 在机器学习的术语中,该数据集称为*训练数据集*(training data set) 或*训练集*(training set)。 每行数据(比如一次房屋交易相对应的数据)称为*样本*(sample), 也可以称为*数据点*(data point)或*数据样本*(data instance)。 我们把试图预测的目标(比如预测房屋价格)称为*标签*(label)或*目标*(target)。 预测所依据的自变量(面积和房龄)称为*特征*(feature)或*协变量*(covariate)。 通常,我们使用$n$来表示数据集中的样本数。 对索引为$i$的样本,其输入表示为$\mathbf{x}^{(i)} = [x_1^{(i)}, x_2^{(i)}]^\top$, 其对应的标签是$y^{(i)}$。 ### 线性模型 :label:`subsec_linear_model` 线性假设是指目标(房屋价格)可以表示为特征(面积和房龄)的加权和,如下面的式子: $$\mathrm{price} = w_{\mathrm{area}} \cdot \mathrm{area} + w_{\mathrm{age}} \cdot \mathrm{age} + b.$$ :eqlabel:`eq_price-area` :eqref:`eq_price-area`中的$w_{\mathrm{area}}$和$w_{\mathrm{age}}$ 称为*权重*(weight),权重决定了每个特征对我们预测值的影响。 $b$称为*偏置*(bias)、*偏移量*(offset)或*截距*(intercept)。 偏置是指当所有特征都取值为0时,预测值应该为多少。 即使现实中不会有任何房子的面积是0或房龄正好是0年,我们仍然需要偏置项。 如果没有偏置项,我们模型的表达能力将受到限制。 严格来说, :eqref:`eq_price-area`是输入特征的一个 *仿射变换*(affine transformation)。 仿射变换的特点是通过加权和对特征进行*线性变换*(linear transformation), 并通过偏置项来进行*平移*(translation)。 给定一个数据集,我们的目标是寻找模型的权重$\mathbf{w}$和偏置$b$, 使得根据模型做出的预测大体符合数据里的真实价格。 输出的预测值由输入特征通过*线性模型*的仿射变换决定,仿射变换由所选权重和偏置确定。 而在机器学习领域,我们通常使用的是高维数据集,建模时采用线性代数表示法会比较方便。 当我们的输入包含$d$个特征时,我们将预测结果$\hat{y}$ (通常使用“尖角”符号表示$y$的估计值)表示为: $$\hat{y} = w_1 x_1 + ... + w_d x_d + b.$$ 将所有特征放到向量$\mathbf{x} \in \mathbb{R}^d$中, 并将所有权重放到向量$\mathbf{w} \in \mathbb{R}^d$中, 我们可以用点积形式来简洁地表达模型: $$\hat{y} = \mathbf{w}^\top \mathbf{x} + b.$$ :eqlabel:`eq_linreg-y` 在 :eqref:`eq_linreg-y`中, 向量$\mathbf{x}$对应于单个数据样本的特征。 用符号表示的矩阵$\mathbf{X} \in \mathbb{R}^{n \times d}$ 可以很方便地引用我们整个数据集的$n$个样本。 其中,$\mathbf{X}$的每一行是一个样本,每一列是一种特征。 对于特征集合$\mathbf{X}$,预测值$\hat{\mathbf{y}} \in \mathbb{R}^n$ 可以通过矩阵-向量乘法表示为: $${\hat{\mathbf{y}}} = \mathbf{X} \mathbf{w} + b$$ 这个过程中的求和将使用广播机制 (广播机制在 :numref:`subsec_broadcasting`中有详细介绍)。 给定训练数据特征$\mathbf{X}$和对应的已知标签$\mathbf{y}$, 线性回归的目标是找到一组权重向量$\mathbf{w}$和偏置$b$: 当给定从$\mathbf{X}$的同分布中取样的新样本特征时, 这组权重向量和偏置能够使得新样本预测标签的误差尽可能小。 虽然我们相信给定$\mathbf{x}$预测$y$的最佳模型会是线性的, 但我们很难找到一个有$n$个样本的真实数据集,其中对于所有的$1 \leq i \leq n$,$y^{(i)}$完全等于$\mathbf{w}^\top \mathbf{x}^{(i)}+b$。 无论我们使用什么手段来观察特征$\mathbf{X}$和标签$\mathbf{y}$, 都可能会出现少量的观测误差。 因此,即使确信特征与标签的潜在关系是线性的, 我们也会加入一个噪声项来考虑观测误差带来的影响。 在开始寻找最好的*模型参数*(model parameters)$\mathbf{w}$和$b$之前, 我们还需要两个东西: (1)一种模型质量的度量方式; (2)一种能够更新模型以提高模型预测质量的方法。 ### 损失函数 在我们开始考虑如何用模型*拟合*(fit)数据之前,我们需要确定一个拟合程度的度量。 *损失函数*(loss function)能够量化目标的*实际*值与*预测*值之间的差距。 通常我们会选择非负数作为损失,且数值越小表示损失越小,完美预测时的损失为0。 回归问题中最常用的损失函数是平方误差函数。 当样本$i$的预测值为$\hat{y}^{(i)}$,其相应的真实标签为$y^{(i)}$时, 平方误差可以定义为以下公式: $$l^{(i)}(\mathbf{w}, b) = \frac{1}{2} \left(\hat{y}^{(i)} - y^{(i)}\right)^2.$$ :eqlabel:`eq_mse` 常数$\frac{1}{2}$不会带来本质的差别,但这样在形式上稍微简单一些 (因为当我们对损失函数求导后常数系数为1)。 由于训练数据集并不受我们控制,所以经验误差只是关于模型参数的函数。 为了进一步说明,来看下面的例子。 我们为一维情况下的回归问题绘制图像,如 :numref:`fig_fit_linreg`所示。 ![用线性模型拟合数据。](../img/fit-linreg.svg) :label:`fig_fit_linreg` 由于平方误差函数中的二次方项, 估计值$\hat{y}^{(i)}$和观测值$y^{(i)}$之间较大的差异将导致更大的损失。 为了度量模型在整个数据集上的质量,我们需计算在训练集$n$个样本上的损失均值(也等价于求和)。 $$L(\mathbf{w}, b) =\frac{1}{n}\sum_{i=1}^n l^{(i)}(\mathbf{w}, b) =\frac{1}{n} \sum_{i=1}^n \frac{1}{2}\left(\mathbf{w}^\top \mathbf{x}^{(i)} + b - y^{(i)}\right)^2.$$ 在训练模型时,我们希望寻找一组参数($\mathbf{w}^*, b^*$), 这组参数能最小化在所有训练样本上的总损失。如下式: $$\mathbf{w}^*, b^* = \operatorname*{argmin}_{\mathbf{w}, b}\ L(\mathbf{w}, b).$$ ### 解析解 线性回归刚好是一个很简单的优化问题。 与我们将在本书中所讲到的其他大部分模型不同,线性回归的解可以用一个公式简单地表达出来, 这类解叫作解析解(analytical solution)。 首先,我们将偏置$b$合并到参数$\mathbf{w}$中,合并方法是在包含所有参数的矩阵中附加一列。 我们的预测问题是最小化$\|\mathbf{y} - \mathbf{X}\mathbf{w}\|^2$。 这在损失平面上只有一个临界点,这个临界点对应于整个区域的损失极小点。 将损失关于$\mathbf{w}$的导数设为0,得到解析解: $$\mathbf{w}^* = (\mathbf X^\top \mathbf X)^{-1}\mathbf X^\top \mathbf{y}.$$ 像线性回归这样的简单问题存在解析解,但并不是所有的问题都存在解析解。 解析解可以进行很好的数学分析,但解析解对问题的限制很严格,导致它无法广泛应用在深度学习里。 ### 随机梯度下降 即使在我们无法得到解析解的情况下,我们仍然可以有效地训练模型。 在许多任务上,那些难以优化的模型效果要更好。 因此,弄清楚如何训练这些难以优化的模型是非常重要的。 本书中我们用到一种名为*梯度下降*(gradient descent)的方法, 这种方法几乎可以优化所有深度学习模型。 它通过不断地在损失函数递减的方向上更新参数来降低误差。 梯度下降最简单的用法是计算损失函数(数据集中所有样本的损失均值) 关于模型参数的导数(在这里也可以称为梯度)。 但实际中的执行可能会非常慢:因为在每一次更新参数之前,我们必须遍历整个数据集。 因此,我们通常会在每次需要计算更新的时候随机抽取一小批样本, 这种变体叫做*小批量随机梯度下降*(minibatch stochastic gradient descent)。 在每次迭代中,我们首先随机抽样一个小批量$\mathcal{B}$, 它是由固定数量的训练样本组成的。 然后,我们计算小批量的平均损失关于模型参数的导数(也可以称为梯度)。 最后,我们将梯度乘以一个预先确定的正数$\eta$,并从当前参数的值中减掉。 我们用下面的数学公式来表示这一更新过程($\partial$表示偏导数): $$(\mathbf{w},b) \leftarrow (\mathbf{w},b) - \frac{\eta}{|\mathcal{B}|} \sum_{i \in \mathcal{B}} \partial_{(\mathbf{w},b)} l^{(i)}(\mathbf{w},b).$$ 总结一下,算法的步骤如下: (1)初始化模型参数的值,如随机初始化; (2)从数据集中随机抽取小批量样本且在负梯度的方向上更新参数,并不断迭代这一步骤。 对于平方损失和仿射变换,我们可以明确地写成如下形式: $$\begin{aligned} \mathbf{w} &\leftarrow \mathbf{w} - \frac{\eta}{|\mathcal{B}|} \sum_{i \in \mathcal{B}} \partial_{\mathbf{w}} l^{(i)}(\mathbf{w}, b) = \mathbf{w} - \frac{\eta}{|\mathcal{B}|} \sum_{i \in \mathcal{B}} \mathbf{x}^{(i)} \left(\mathbf{w}^\top \mathbf{x}^{(i)} + b - y^{(i)}\right),\\ b &\leftarrow b - \frac{\eta}{|\mathcal{B}|} \sum_{i \in \mathcal{B}} \partial_b l^{(i)}(\mathbf{w}, b) = b - \frac{\eta}{|\mathcal{B}|} \sum_{i \in \mathcal{B}} \left(\mathbf{w}^\top \mathbf{x}^{(i)} + b - y^{(i)}\right). \end{aligned}$$ :eqlabel:`eq_linreg_batch_update` 公式 :eqref:`eq_linreg_batch_update`中的$\mathbf{w}$和$\mathbf{x}$都是向量。 在这里,更优雅的向量表示法比系数表示法(如$w_1, w_2, \ldots, w_d$)更具可读性。 $|\mathcal{B}|$表示每个小批量中的样本数,这也称为*批量大小*(batch size)。 $\eta$表示*学习率*(learning rate)。 批量大小和学习率的值通常是手动预先指定,而不是通过模型训练得到的。 这些可以调整但不在训练过程中更新的参数称为*超参数*(hyperparameter)。 *调参*(hyperparameter tuning)是选择超参数的过程。 超参数通常是我们根据训练迭代结果来调整的, 而训练迭代结果是在独立的*验证数据集*(validation dataset)上评估得到的。 在训练了预先确定的若干迭代次数后(或者直到满足某些其他停止条件后), 我们记录下模型参数的估计值,表示为$\hat{\mathbf{w}}, \hat{b}$。 但是,即使我们的函数确实是线性的且无噪声,这些估计值也不会使损失函数真正地达到最小值。 因为算法会使得损失向最小值缓慢收敛,但却不能在有限的步数内非常精确地达到最小值。 线性回归恰好是一个在整个域中只有一个最小值的学习问题。 但是对于像深度神经网络这样复杂的模型来说,损失平面上通常包含多个最小值。 深度学习实践者很少会去花费大力气寻找这样一组参数,使得在*训练集*上的损失达到最小。 事实上,更难做到的是找到一组参数,这组参数能够在我们从未见过的数据上实现较低的损失, 这一挑战被称为*泛化*(generalization)。 ### 用模型进行预测 给定“已学习”的线性回归模型$\hat{\mathbf{w}}^\top \mathbf{x} + \hat{b}$, 现在我们可以通过房屋面积$x_1$和房龄$x_2$来估计一个(未包含在训练数据中的)新房屋价格。 给定特征估计目标的过程通常称为*预测*(prediction)或*推断*(inference)。 本书将尝试坚持使用*预测*这个词。 虽然*推断*这个词已经成为深度学习的标准术语,但其实*推断*这个词有些用词不当。 在统计学中,*推断*更多地表示基于数据集估计参数。 当深度学习从业者与统计学家交谈时,术语的误用经常导致一些误解。 ## 矢量化加速 在训练我们的模型时,我们经常希望能够同时处理整个小批量的样本。 为了实现这一点,需要(**我们对计算进行矢量化, 从而利用线性代数库,而不是在Python中编写开销高昂的for循环**)。 ``` %matplotlib inline import math import time import numpy as np import tensorflow as tf from d2l import tensorflow as d2l ``` 为了说明矢量化为什么如此重要,我们考虑(**对向量相加的两种方法**)。 我们实例化两个全为1的10000维向量。 在一种方法中,我们将使用Python的for循环遍历向量; 在另一种方法中,我们将依赖对`+`的调用。 ``` n = 10000 a = tf.ones(n) b = tf.ones(n) ``` 由于在本书中我们将频繁地进行运行时间的基准测试,所以[**我们定义一个计时器**]: ``` class Timer: #@save """记录多次运行时间""" def __init__(self): self.times = [] self.start() def start(self): """启动计时器""" self.tik = time.time() def stop(self): """停止计时器并将时间记录在列表中""" self.times.append(time.time() - self.tik) return self.times[-1] def avg(self): """返回平均时间""" return sum(self.times) / len(self.times) def sum(self): """返回时间总和""" return sum(self.times) def cumsum(self): """返回累计时间""" return np.array(self.times).cumsum().tolist() ``` 现在我们可以对工作负载进行基准测试。 首先,[**我们使用for循环,每次执行一位的加法**]。 ``` c = tf.Variable(tf.zeros(n)) timer = Timer() for i in range(n): c[i].assign(a[i] + b[i]) f'{timer.stop():.5f} sec' ``` (**或者,我们使用重载的`+`运算符来计算按元素的和**)。 ``` timer.start() d = a + b f'{timer.stop():.5f} sec' ``` 结果很明显,第二种方法比第一种方法快得多。 矢量化代码通常会带来数量级的加速。 另外,我们将更多的数学运算放到库中,而无须自己编写那么多的计算,从而减少了出错的可能性。 ## 正态分布与平方损失 :label:`subsec_normal_distribution_and_squared_loss` 接下来,我们通过对噪声分布的假设来解读平方损失目标函数。 正态分布和线性回归之间的关系很密切。 正态分布(normal distribution),也称为*高斯分布*(Gaussian distribution), 最早由德国数学家高斯(Gauss)应用于天文学研究。 简单的说,若随机变量$x$具有均值$\mu$和方差$\sigma^2$(标准差$\sigma$),其正态分布概率密度函数如下: $$p(x) = \frac{1}{\sqrt{2 \pi \sigma^2}} \exp\left(-\frac{1}{2 \sigma^2} (x - \mu)^2\right).$$ 下面[**我们定义一个Python函数来计算正态分布**]。 ``` def normal(x, mu, sigma): p = 1 / math.sqrt(2 * math.pi * sigma**2) return p * np.exp(-0.5 / sigma**2 * (x - mu)**2) ``` 我们现在(**可视化正态分布**)。 ``` # 再次使用numpy进行可视化 x = np.arange(-7, 7, 0.01) # 均值和标准差对 params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params]) ``` 就像我们所看到的,改变均值会产生沿$x$轴的偏移,增加方差将会分散分布、降低其峰值。 均方误差损失函数(简称均方损失)可以用于线性回归的一个原因是: 我们假设了观测中包含噪声,其中噪声服从正态分布。 噪声正态分布如下式: $$y = \mathbf{w}^\top \mathbf{x} + b + \epsilon,$$ 其中,$\epsilon \sim \mathcal{N}(0, \sigma^2)$。 因此,我们现在可以写出通过给定的$\mathbf{x}$观测到特定$y$的*似然*(likelihood): $$P(y \mid \mathbf{x}) = \frac{1}{\sqrt{2 \pi \sigma^2}} \exp\left(-\frac{1}{2 \sigma^2} (y - \mathbf{w}^\top \mathbf{x} - b)^2\right).$$ 现在,根据极大似然估计法,参数$\mathbf{w}$和$b$的最优值是使整个数据集的*似然*最大的值: $$P(\mathbf y \mid \mathbf X) = \prod_{i=1}^{n} p(y^{(i)}|\mathbf{x}^{(i)}).$$ 根据极大似然估计法选择的估计量称为*极大似然估计量*。 虽然使许多指数函数的乘积最大化看起来很困难, 但是我们可以在不改变目标的前提下,通过最大化似然对数来简化。 由于历史原因,优化通常是说最小化而不是最大化。 我们可以改为*最小化负对数似然*$-\log P(\mathbf y \mid \mathbf X)$。 由此可以得到的数学公式是: $$-\log P(\mathbf y \mid \mathbf X) = \sum_{i=1}^n \frac{1}{2} \log(2 \pi \sigma^2) + \frac{1}{2 \sigma^2} \left(y^{(i)} - \mathbf{w}^\top \mathbf{x}^{(i)} - b\right)^2.$$ 现在我们只需要假设$\sigma$是某个固定常数就可以忽略第一项, 因为第一项不依赖于$\mathbf{w}$和$b$。 现在第二项除了常数$\frac{1}{\sigma^2}$外,其余部分和前面介绍的均方误差是一样的。 幸运的是,上面式子的解并不依赖于$\sigma$。 因此,在高斯噪声的假设下,最小化均方误差等价于对线性模型的极大似然估计。 ## 从线性回归到深度网络 到目前为止,我们只谈论了线性模型。 尽管神经网络涵盖了更多更为丰富的模型,我们依然可以用描述神经网络的方式来描述线性模型, 从而把线性模型看作一个神经网络。 首先,我们用“层”符号来重写这个模型。 ### 神经网络图 深度学习从业者喜欢绘制图表来可视化模型中正在发生的事情。 在 :numref:`fig_single_neuron`中,我们将线性回归模型描述为一个神经网络。 需要注意的是,该图只显示连接模式,即只显示每个输入如何连接到输出,隐去了权重和偏置的值。 ![线性回归是一个单层神经网络。](../img/singleneuron.svg) :label:`fig_single_neuron` 在 :numref:`fig_single_neuron`所示的神经网络中,输入为$x_1, \ldots, x_d$, 因此输入层中的*输入数*(或称为*特征维度*,feature dimensionality)为$d$。 网络的输出为$o_1$,因此输出层中的*输出数*是1。 需要注意的是,输入值都是已经给定的,并且只有一个*计算*神经元。 由于模型重点在发生计算的地方,所以通常我们在计算层数时不考虑输入层。 也就是说, :numref:`fig_single_neuron`中神经网络的*层数*为1。 我们可以将线性回归模型视为仅由单个人工神经元组成的神经网络,或称为单层神经网络。 对于线性回归,每个输入都与每个输出(在本例中只有一个输出)相连, 我们将这种变换( :numref:`fig_single_neuron`中的输出层) 称为*全连接层*(fully-connected layer)或称为*稠密层*(dense layer)。 下一章将详细讨论由这些层组成的网络。 ### 生物学 线性回归发明的时间(1795年)早于计算神经科学,所以将线性回归描述为神经网络似乎不合适。 当控制学家、神经生物学家沃伦·麦库洛奇和沃尔特·皮茨开始开发人工神经元模型时, 他们为什么将线性模型作为一个起点呢? 我们来看一张图片 :numref:`fig_Neuron`: 这是一张由*树突*(dendrites,输入终端)、 *细胞核*(nucleu,CPU)组成的生物神经元图片。 *轴突*(axon,输出线)和*轴突端子*(axon terminal,输出端子) 通过*突触*(synapse)与其他神经元连接。 ![真实的神经元。](../img/neuron.svg) :label:`fig_Neuron` 树突中接收到来自其他神经元(或视网膜等环境传感器)的信息$x_i$。 该信息通过*突触权重*$w_i$来加权,以确定输入的影响(即,通过$x_i w_i$相乘来激活或抑制)。 来自多个源的加权输入以加权和$y = \sum_i x_i w_i + b$的形式汇聚在细胞核中, 然后将这些信息发送到轴突$y$中进一步处理,通常会通过$\sigma(y)$进行一些非线性处理。 之后,它要么到达目的地(例如肌肉),要么通过树突进入另一个神经元。 当然,许多这样的单元可以通过正确连接和正确的学习算法拼凑在一起, 从而产生的行为会比单独一个神经元所产生的行为更有趣、更复杂, 这种想法归功于我们对真实生物神经系统的研究。 当今大多数深度学习的研究几乎没有直接从神经科学中获得灵感。 我们援引斯图尔特·罗素和彼得·诺维格谁,在他们的经典人工智能教科书 *Artificial Intelligence:A Modern Approach* :cite:`Russell.Norvig.2016` 中所说:虽然飞机可能受到鸟类的启发,但几个世纪以来,鸟类学并不是航空创新的主要驱动力。 同样地,如今在深度学习中的灵感同样或更多地来自数学、统计学和计算机科学。 ## 小结 * 机器学习模型中的关键要素是训练数据、损失函数、优化算法,还有模型本身。 * 矢量化使数学表达上更简洁,同时运行的更快。 * 最小化目标函数和执行极大似然估计等价。 * 线性回归模型也是一个简单的神经网络。 ## 练习 1. 假设我们有一些数据$x_1, \ldots, x_n \in \mathbb{R}$。我们的目标是找到一个常数$b$,使得最小化$\sum_i (x_i - b)^2$。 1. 找到最优值$b$的解析解。 1. 这个问题及其解与正态分布有什么关系? 1. 推导出使用平方误差的线性回归优化问题的解析解。为了简化问题,可以忽略偏置$b$(我们可以通过向$\mathbf X$添加所有值为1的一列来做到这一点)。 1. 用矩阵和向量表示法写出优化问题(将所有数据视为单个矩阵,将所有目标值视为单个向量)。 1. 计算损失对$w$的梯度。 1. 通过将梯度设为0、求解矩阵方程来找到解析解。 1. 什么时候可能比使用随机梯度下降更好?这种方法何时会失效? 1. 假定控制附加噪声$\epsilon$的噪声模型是指数分布。也就是说,$p(\epsilon) = \frac{1}{2} \exp(-|\epsilon|)$ 1. 写出模型$-\log P(\mathbf y \mid \mathbf X)$下数据的负对数似然。 1. 你能写出解析解吗? 1. 提出一种随机梯度下降算法来解决这个问题。哪里可能出错?(提示:当我们不断更新参数时,在驻点附近会发生什么情况)你能解决这个问题吗? [Discussions](https://discuss.d2l.ai/t/1776)
github_jupyter
# Project Submission Continuous Control for the Udacity Ud893 Deep Reinforcement Learning Nanodegree (DRLND) ## Imports and Dependencies ``` import sys sys.path.append("../python") import random import numpy as np import torch from collections import deque import matplotlib.pyplot as plt from datetime import datetime from unityagents import UnityEnvironment %matplotlib inline ``` ## Unity Environment Note that if your operating system is Windows (64-bit), the Unity environment is included and you can run the below environment instantiation cell. However, if you're using a different operating system, download the file you require from one of the following links: - Linux: [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher_Linux.zip) - Mac OSX: [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher.app.zip) - Windows (32-bit): [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher_Windows_x86.zip) Then, place the file in the main project directory folder and unzip (or decompress) the file. Modify the file_name in the below cell and then run the cell. ``` env = UnityEnvironment(file_name="Reacher_20_Windows_x86_64/Reacher.exe") ``` ## Get Default Brain ``` # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] ``` ## Main Training Loop Function ``` def training_loop_20(agent, actor_model_filename='ckpnt_actor_20.pth', critic_model_filename='ckpnt_critic_20.pth', n_episodes=1000, max_t=3000): """DDPG Training Loop Params ====== agent (function): agent function n_episodes (int): maximum number of training episodes max_t (int): maximum number of timesteps per episode """ start_time = datetime.now() scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations # get the current state agent.reset() score = np.zeros(len(env_info.agents)) for t in range(max_t): action = agent.act(state) env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations # get the next state reward = env_info.rewards # get the reward done = env_info.local_done # see if episode has finished agent.step(state, action, reward, next_state, done, t) state = next_state score += reward if np.any(done): break scores_window.append(np.mean(score)) # save most recent score scores.append(np.mean(score)) print('\rEpisode {}\tAverage Score: {:.2f}\tTime: {}'.format(i_episode, np.mean(scores_window), datetime.now()-start_time), end="") if i_episode % 1 == 0: print('\rEpisode {}\tAverage Score: {:.2f}\tTime: {}'.format(i_episode, np.mean(scores_window), datetime.now()-start_time)) if np.mean(scores_window)>=30.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) torch.save(agent.actor_local.state_dict(), actor_model_filename) torch.save(agent.critic_local.state_dict(), critic_model_filename) break return scores ``` ## DDPG Agent ``` from ddpg_agent_20 import Agent agent = Agent(state_size=33, action_size=4, random_seed=15, lr_a=1e-4, lr_c=1e-3,weight_decay=0, fc1_units=400, fc2_units=300) start = datetime.now() scores = training_loop_20(agent) end = datetime.now() time_taken = end - start print('Time: ',time_taken) # plot the scores plt.plot(np.arange(len(scores)), scores) plt.ylabel('Score') plt.xlabel('Episodes') plt.title('DDPG Agent') plt.show() # purposely left commented. Leave as is. # env.close() ``` ## Run Smart Agent ``` from ddpg_agent_20 import Agent env_info = env.reset(train_mode=True)[brain_name] agent = Agent(state_size=33, action_size=4, random_seed=15, lr_a=1e-4, lr_c=1e-3,weight_decay=0, fc1_units=400, fc2_units=300) agent.actor_local.load_state_dict(torch.load('ckpnt_actor_20.pth')) # load weights from file agent.critic_local.load_state_dict(torch.load('ckpnt_critic_20.pth')) # load weights from file num_agents = len(env_info.agents) episodes = 1 for i in range(episodes): env_info = env.reset(train_mode=False)[brain_name] # reset the environment state = env_info.vector_observations # get the current state scores = np.zeros(num_agents) # initialize the score (for each agent) agent.reset() for j in range(1000): action = agent.act(state, add_noise=False) env_info = env.step(action)[brain_name] # send the action to the environment state = env_info.vector_observations # get the next state reward = env_info.rewards # get the reward done = env_info.local_done # see if episode has finished if np.any(done): break scores += reward score = np.mean(scores) if score > 30: break print('\rEpisode: {}\tStep: {}\tScore: {}'.format(i+1, j+1, score), end="") print('\rEpisode: {}\tStep: {}\tScore: {}'.format(i+1, j+1, score)) env.close() ```
github_jupyter
## Load Model, plain 2D Conv ``` import os os.chdir("../..") os.getcwd() import numpy as np import torch import json from distributed.model_util import choose_model, choose_old_model, load_model, extend_model_config from distributed.util import q_value_index_to_action import matplotlib.pyplot as plt model_name = "conv2d" model_config_path = "src/config/model_spec/conv_agents_slim.json" trained_model_path = "threshold_networks/5/72409/conv2d_5_72409.pt" with open(model_config_path, "r") as jsonfile: model_config = json.load(jsonfile)[model_name] code_size, stack_depth = 5, 5 syndrome_size = code_size + 1 model_config = extend_model_config(model_config, syndrome_size, stack_depth) model_config["network_size"] = "slim" model_config["rl_type"] = "q" model = choose_model(model_name, model_config, transfer_learning=0) model, *_ = load_model(model, trained_model_path, model_device="cpu") from evaluation.final_evaluation import main_evaluation all_ground_states = 0 for i in range(10): is_ground_state, n_syndromes, n_loops = main_evaluation( model, model.device, epsilon=0.0, code_size=code_size, stack_depth=stack_depth, block=False, verbosity=0, rl_type=model_config["rl_type"] ) all_ground_states += is_ground_state print(all_ground_states) print(all_ground_states) ``` ## Prepare States ``` all_states = [] state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) state[-2:, 1, 2] = 1 all_states.append(state) state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) state[-1, 1, 2] = 1 state[-1, 2, 3] = 1 all_states.append(state) state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) state[-1, 2, 3] = 1 all_states.append(state) state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) state[-2:, 1, 2] = 1 state[-2:, 2, 3] = 1 state[-1:, 2, 3] = 0 all_states.append(state) state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) state[:, 1, 2] = 1 state[:, 2, 3] = 1 state[-1, 2, 3] = 0 all_states.append(state) full_error_state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) full_error_state[:, 1, 2] = 1 full_error_state[:, 2, 3] = 1 all_states.append(full_error_state) torch_all_states = torch.stack(all_states) # for i in range(0, stack_depth, 2): # state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) # state[:, 1, 2] = 1 # state[:, 2, 3] = 1 # state[i, 2, 3] = 0 # all_states.append(state) # for i in range(0, stack_depth, 2): # state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) # state[i, 2, 3] = 1 # all_states.append(state) # state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) # state[-1, 1, 2] = 1 # state[-1, 2, 3] = 1 # all_states.append(state) # state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) # state[-2:, 1, 2] = 1 # state[-2:, 2, 3] = 1 # state[-1:, 2, 3] = 0 # all_states.append(state) # state = torch.zeros((stack_depth, syndrome_size, syndrome_size), dtype=torch.float32) # state[-2:, 1, 2] = 1 # all_states.append(state) # torch_all_states = torch.stack(all_states) def calculate_state_image(state, stack_depth, syndrome_size): layer_discount_factor = 0.3 layer_exponents = np.arange(stack_depth - 1, -1, -1) layer_rewards = np.power(layer_discount_factor, layer_exponents) layer_rewards = torch.tensor(layer_rewards, dtype=torch.float32) state_image = torch.zeros((syndrome_size, syndrome_size), dtype=torch.float32) for j, layer in enumerate(state): tmp_layer = layer * layer_rewards[j] state_image += tmp_layer return state_image ``` ## Do the plotting ``` k = 1 # stack_depth = 5 # syndrome_size = 5 from matplotlib import colors plt.rcParams.update({"font.size": 15}) fig, ax = plt.subplots(1, 3, figsize=(18, 8), gridspec_kw={"width_ratios": [4, 4, 8], "wspace": 0.02, "hspace": 0.0},) plot_colors = ["#ffffff", "#404E5C", "#F76C5E", "#E9B44C", "#7F95D1", "#CF1259", "#669900"] markers = ["o", "v", "^", "X", "d", "P"] cmap = colors.ListedColormap(plot_colors) boundaries = range(len(torch_all_states)) norm = colors.BoundaryNorm(boundaries, cmap.N, clip=True) markersize = 70 img_separation = 1 column_width = 8 syndrome_locations = np.array([[1, 2],[2, 3]]) column_filler = np.zeros((stack_depth, 1)) image_filler = np.zeros((stack_depth, img_separation)) img_width = 2 * column_width + img_separation vline_locations = np.array([ column_width + i * img_width for i in range(len(torch_all_states)) ]) image_separators_left = np.array([ (i+1) * 2 * column_width + i * img_separation for i in range(len(torch_all_states)) ]) # image_separators_left[1:] += img_separation image_separators_right = [ i * img_width for i in range(len(torch_all_states)) ] hline_locations = range(0, stack_depth + 1) complete_image_list = [] for i, state in enumerate(torch_all_states): ii = i + 1 # TODO: concat the columns multi-pixel wide with an empty space in between # and empty spaces between each state's column column1 = np.vstack(state[:, syndrome_locations[0, 0], syndrome_locations[0, 1]]) repeated_column1 = np.repeat(column1, column_width, axis=1) column2 = np.vstack(state[:, syndrome_locations[1, 0], syndrome_locations[1, 1]]) repeated_column2 = np.repeat(column2, column_width, axis=1) state_img = np.concatenate((repeated_column1, repeated_column2), axis=1) * ii complete_image_list.append(state_img) if i < len(torch_all_states) - 1: complete_image_list.append(image_filler) complete_image_array = np.concatenate(complete_image_list, axis=1) ax2 = ax[2].twinx() for i, state in enumerate(torch_all_states): ii = i + 1 q_values = model(state.unsqueeze(0)) q_values = q_values.detach().squeeze().clone().numpy() ind = np.argpartition(q_values, -k)[-k:] max_ind = ind action = q_value_index_to_action(ind[0], code_size) ind = np.append(ind, [max(ind[0]-1, 0), min(ind[0]+1, len(q_values)-1)]) ind = np.sort(ind) print(f"{ind=}") q_hist = np.histogram(q_values) if i < 3: ax[0].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii]) ax[0].scatter( max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize ) # marker=markers[i], c=plot_colors[ii] else: ax[1].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii]), ax[1].scatter( max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize ) ax2.imshow( complete_image_array, vmin=0, vmax=6, cmap=cmap, aspect='auto', origin='lower' ) ax2.axvline(x=vline_locations[i] - 0.5, linestyle=':', color='black') ax2.axhline(y=hline_locations[i] - 0.5, linestyle=':', color='black') ax2.axvline(x=image_separators_left[i] - 0.5, color='black') ax2.axvline(x=image_separators_right[i] - 0.5, color='black') ax2.text(x=image_separators_left[i] - 1.8 * column_width, y=1, s=f"{action}") ax[0].set( ylim=(40, 120), xlabel="Q Value Index", ylabel="Q Value", title=f"Q Activation", ) ax[1].set( ylim=(40, 120), xlabel="Q Value Index", title=f"Q Activation", ) all_vline_locations = np.concatenate( [vline_locations - 0.5 * column_width, vline_locations + 0.5 * column_width] ) x_tick_labels = [f"{tuple(syndrome_locations[0])}"] * len(torch_all_states) x_tick_labels2 = [f"{tuple(syndrome_locations[1])}"] * len(torch_all_states) x_tick_labels.extend(x_tick_labels2) # , f"{tuple(syndrome_locations[1])}"] * len(torch_all_states) print(f"{x_tick_labels}") ax2.set(xlabel="", ylabel="h", title="Isolated Syndrome States") ax[2].set_yticks(all_vline_locations) ax[2].set_yticks([]) ax[2].set_yticklabels([]) ax2.set_xticklabels(x_tick_labels) ax2.set_xticks(all_vline_locations) ax[1].set_yticklabels([]) ax[0].legend() ax[1].legend() plt.savefig("plots/q_value_activation.pdf", bbox_inches="tight") ``` ## 3D Conv ``` model_name = "conv3d" model_config_path_3d = "src/config/model_spec/conv_agents_slim.json" trained_model_path_3d = "threshold_networks/5/69312/conv3d_5_69312.pt" with open(model_config_path_3d, "r") as jsonfile: model_config_3d = json.load(jsonfile)[model_name] code_size, stack_depth = 5, 5 syndrome_size = code_size + 1 model_config_3d = extend_model_config(model_config_3d, syndrome_size, stack_depth) model_config_3d["network_size"] = "slim" model_config_3d["rl_type"] = "q" model3d = choose_old_model(model_name, model_config_3d) model3d, *_ = load_model(model3d, trained_model_path_3d, model_device="cpu") from evaluation.final_evaluation import main_evaluation all_ground_states = 0 for i in range(10): is_ground_state, n_syndromes, n_loops = main_evaluation( model3d, model3d.device, epsilon=0.0, code_size=code_size, stack_depth=stack_depth, block=False, verbosity=0, rl_type=model_config_3d["rl_type"] ) all_ground_states += is_ground_state print(all_ground_states) print(all_ground_states) k = 1 # stack_depth = 5 # syndrome_size = 5 from matplotlib import colors plt.rcParams.update({"font.size": 15}) fig, ax = plt.subplots(1, 3, figsize=(18, 8), gridspec_kw={"width_ratios": [4, 4, 8], "wspace": 0.02, "hspace": 0.0},) plot_colors = ["#ffffff", "#404E5C", "#F76C5E", "#E9B44C", "#7F95D1", "#CF1259", "#669900"] markers = ["o", "v", "^", "X", "d", "P"] cmap = colors.ListedColormap(plot_colors) boundaries = range(len(torch_all_states)) norm = colors.BoundaryNorm(boundaries, cmap.N, clip=True) markersize = 70 img_separation = 1 column_width = 8 syndrome_locations = np.array([[1, 2],[2, 3]]) column_filler = np.zeros((stack_depth, 1)) image_filler = np.zeros((stack_depth, img_separation)) img_width = 2 * column_width + img_separation vline_locations = np.array([ column_width + i * img_width for i in range(len(torch_all_states)) ]) image_separators_left = np.array([ (i+1) * 2 * column_width + i * img_separation for i in range(len(torch_all_states)) ]) # image_separators_left[1:] += img_separation image_separators_right = [ i * img_width for i in range(len(torch_all_states)) ] hline_locations = range(0, stack_depth + 1) complete_image_list = [] for i, state in enumerate(torch_all_states): ii = i + 1 # TODO: concat the columns multi-pixel wide with an empty space in between # and empty spaces between each state's column column1 = np.vstack(state[:, syndrome_locations[0, 0], syndrome_locations[0, 1]]) repeated_column1 = np.repeat(column1, column_width, axis=1) column2 = np.vstack(state[:, syndrome_locations[1, 0], syndrome_locations[1, 1]]) repeated_column2 = np.repeat(column2, column_width, axis=1) state_img = np.concatenate((repeated_column1, repeated_column2), axis=1) * ii complete_image_list.append(state_img) if i < len(torch_all_states) - 1: complete_image_list.append(image_filler) complete_image_array = np.concatenate(complete_image_list, axis=1) ax2 = ax[2].twinx() for i, state in enumerate(torch_all_states): ii = i + 1 q_values = model3d(state.unsqueeze(0)) q_values = q_values.detach().squeeze().clone().numpy() ind = np.argpartition(q_values, -k)[-k:] max_ind = ind action = q_value_index_to_action(ind[0], code_size) ind = np.append(ind, [max(ind[0]-1, 0), min(ind[0]+1, len(q_values)-1)]) ind = np.sort(ind) print(f"{ind=}") q_hist = np.histogram(q_values) if i < 3: ax[0].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii]) ax[0].scatter( max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize ) # marker=markers[i], c=plot_colors[ii] else: ax[1].plot(range(len(q_values)), q_values, label=str(ii), color=plot_colors[ii]), ax[1].scatter( max_ind, q_values[max_ind], marker=markers[i], c=plot_colors[ii], s=markersize ) ax2.imshow( complete_image_array, vmin=0, vmax=6, cmap=cmap, aspect='auto', origin='lower' ) ax2.axvline(x=vline_locations[i] - 0.5, linestyle=':', color='black') ax2.axhline(y=hline_locations[i] - 0.5, linestyle=':', color='black') ax2.axvline(x=image_separators_left[i] - 0.5, color='black') ax2.axvline(x=image_separators_right[i] - 0.5, color='black') ax2.text(x=image_separators_left[i] - 1.8 * column_width, y=1, s=f"{action}") ax[0].set( ylim=(40, 100), xlabel="Q Value Index", ylabel="Q Value", title=f"Q Activation", ) ax[1].set( ylim=(40, 100), xlabel="Q Value Index", title=f"Q Activation", ) all_vline_locations = np.concatenate( [vline_locations - 0.5 * column_width, vline_locations + 0.5 * column_width] ) x_tick_labels = [f"{tuple(syndrome_locations[0])}"] * len(torch_all_states) x_tick_labels2 = [f"{tuple(syndrome_locations[1])}"] * len(torch_all_states) x_tick_labels.extend(x_tick_labels2) # , f"{tuple(syndrome_locations[1])}"] * len(torch_all_states) print(f"{x_tick_labels}") ax2.set(xlabel="", ylabel="h", title="Isolated Syndrome States") ax[2].set_yticks(all_vline_locations) ax[2].set_yticks([]) ax[2].set_yticklabels([]) ax2.set_xticklabels(x_tick_labels) ax2.set_xticks(all_vline_locations) ax[1].set_yticklabels([]) ax[0].legend() ax[1].legend() plt.savefig("plots/q_value_activation_3d.pdf", bbox_inches="tight") from distributed.util import select_actions from surface_rl_decoder.surface_code import SurfaceCode from surface_rl_decoder.surface_code_util import create_syndrome_output_stack # q_values = model3d(full_error_state) action, _ = select_actions(full_error_state.unsqueeze(0), model3d, code_size) sc = SurfaceCode(code_size=code_size, stack_depth=stack_depth) sc.qubits[:, 1, 2] = 1 sc.state = create_syndrome_output_stack( sc.qubits, sc.vertex_mask, sc.plaquette_mask ) np.argwhere(sc.state) from copy import deepcopy torch_state = torch.tensor(deepcopy(sc.state), dtype=torch.float32) action, _ = select_actions(torch_state.unsqueeze(0), model3d, code_size) action new_state, *_ = sc.step(action[0]) torch_state = torch.tensor(deepcopy(sc.state), dtype=torch.float32) action, _ = select_actions(torch_state.unsqueeze(0), model3d, code_size) action new_state, *_ = sc.step(action[0]) new_state ```
github_jupyter
``` ls ../test-data/ %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import tables as tb import h5py import dask.dataframe as dd import dask.bag as db import blaze fname = '../test-data/EQY_US_ALL_BBO_201402/EQY_US_ALL_BBO_20140206.h5' max_sym = '/SPY/no_suffix' fname = '../test-data/small_test_data_public.h5' max_sym = '/IXQAJE/no_suffix' # by default, this will be read-only taq_tb = tb.open_file(fname) %%time rec_counts = {curr._v_pathname: len(curr) for curr in taq_tb.walk_nodes('/', 'Table')} # What's our biggest table? (in bytes) max(rec_counts.values()) * 91 / 2 ** 20 # I think it's 91 bytes... ``` Anyway, under a gigabyte. So, nothing to worry about even if we have 24 cores. ``` # But what symbol is that? max_sym = None max_rows = 0 for sym, rows in rec_counts.items(): if rows > max_rows: max_rows = rows max_sym = sym max_sym, max_rows ``` Interesting... the S&P 500 ETF ``` # Most symbols also have way less rows - note this is log xvals plt.hist(list(rec_counts.values()), bins=50, log=True) plt.show() ``` ## Doing some compute We'll use a "big" table to get some sense of timings ``` spy = taq_tb.get_node(max_sym) # PyTables is record oriented... %timeit np.mean(list(x['Bid_Price'] for x in spy.iterrows())) # But this is faster... %timeit np.mean(spy[:]['Bid_Price']) np.mean(spy[:]['Bid_Price']) ``` # Using numexpr? numexpr is currently not set up to do reductions via HDF5. I've opened an issue here: https://github.com/PyTables/PyTables/issues/548 ``` spy_bp = spy.cols.Bid_Price # this works... np.mean(spy_bp) # But it can't use numexpr expr = tb.Expr('sum(spy_bp)') # You can use numexpr to get the values of the column... but that's silly # (sum doesn't work right, and the axis argument is non-functional) %timeit result = expr.eval().mean() tb.Expr('spy_bp').eval().mean() ``` # h5py ``` taq_tb.close() %%time spy_h5py = h5py.File(fname)[max_sym] np.mean(spy_h5py['Bid_Price']) ``` h5py may be a *touch* faster than pytables for this kind of usage. But why does pandas use pytables? ``` %%timeit np.mean(spy_h5py['Bid_Price']) ``` # Dask It seems that there should be no need to, e.g., use h5py - but dask's read_hdf doens't seem to be working nicely... ``` taq_tb.close() ``` spy_h5py = h5py.File(fname)[max_sym] ``` store = pd.HDFStore(fname) store = pd.HDFStore('../test-data/') # this is a fine way to iterate over our datasets (in addition to what's available in PyTables and h5py) it = store.items() key, tab = next(it) tab # The columns argument doesn't seem to work... store.select(max_sym, columns=['Bid_Price']).head() # columns also doesn't work here... pd.read_hdf(fname, max_sym, columns=['Bid_Price']).head() # So we use h5py (actually, pytables appears faster...) spy_dask = dd.from_array(spy_h5py) mean_job = spy_dask['Bid_Price'].mean() mean_job.compute() # This is appreciably slower than directly computing the mean w/ numpy %timeit mean_job.compute() ``` ## Dask for an actual distributed task (but only on one file for now) ``` class DDFs: # A (key, table) list datasets = [] dbag = None def __init__(self, h5fname): h5in = h5py.File(h5fname) h5in.visititems(self.collect_dataset) def collect_dataset(self, key, table): if isinstance(table, h5py.Dataset): self.datasets.append(dd.from_array(table)['Bid_Price'].mean()) def compute_mean(self): # This is still very slow! self.results = {key: result for key, result in dd.compute(*self.datasets)} %%time ddfs = DDFs(fname) ddfs.datasets[:5] len(ddfs.datasets) dd.compute? %%time results = dd.compute(*ddfs.datasets[:20]) import dask.multiprocessing %%time # This crashes out throwing lots of KeyErrors results = dd.compute(*ddfs.datasets[:20], get=dask.multiprocessing.get) results[0] ``` This ends up being a *little* faster than just using blaze (see below), but about half the time is spent setting thigs up in Dask. ``` from dask import delayed @delayed def mean_column(key, data, column='Bid_Price'): return key, blaze.data(data)[column].mean() class DDFs: # A (key, table) list datasets = [] def __init__(self, h5fname): h5in = h5py.File(h5fname) h5in.visititems(self.collect_dataset) def collect_dataset(self, key, table): if isinstance(table, h5py.Dataset): self.datasets.append(mean_column(key, table)) def compute_mean(self, limit=None): # Note that a limit of None includes all values self.results = {key: result for key, result in dd.compute(*self.datasets[:limit])} %%time ddfs = DDFs(fname) %%time ddfs.compute_mean() next(iter(ddfs.results.items())) # You can also compute individual results as needed ddfs.datasets[0].compute() ``` # Blaze? Holy crap! ``` spy_blaze = blaze.data(spy_h5py) %time spy_blaze['Ask_Price'].mean() taq_tb = tb.open_file(fname) spy_tb = taq_tb.get_node(max_sym) spy_blaze = blaze.data(spy_tb) %time spy_blaze['Bid_Price'].mean() taq_tb.close() ``` ## Read directly with Blaze Somehow this is not as impressive ``` %%time blaze_h5_file = blaze.data(fname) # This is rather nice blaze_h5_file.SPY.no_suffix.Bid_Price.mean() blaze_h5_file.ZFKOJB.no_suffix.Bid_Price.mean() ``` # Do some actual compute with Blaze ``` taq_h5py = h5py.File(fname) class SymStats: means = {} def compute_stats(self, key, table): if isinstance(table, h5py.Dataset): self.means[key] = blaze.data(table)['Bid_Price'].mean() ss = SymStats() %time taq_h5py.visititems(ss.compute_stats) means = iter(ss.means.items()) next(means) ss.means['SPY/no_suffix'] ``` # Pandas? ### To load with Pandas, you need to close the pytables session ``` taq_tb = tb.open_file(fname) taq_tb.close() pd.read_hdf? pd.read_hdf(fname, max_sym, start=0, stop=1, chunksize=1) max_sym fname %%timeit node = taq_tb.get_node(max_sym) pd.DataFrame.from_records(node[0:1]) %%timeit # I've also tried this with `.get_node()`, same speed pd.DataFrame.from_records(taq_tb.root.IXQAJE.no_suffix) %%timeit pd.read_hdf(fname, max_sym) # Pandas has optimizations it likes to do with %timeit spy_df = pd.read_hdf(fname, max_sym) # Actually do it spy_df = pd.read_hdf(fname, max_sym) # This is fast, but loading is slow... %timeit spy_df.Bid_Price.mean() ```
github_jupyter
**Copyright 2021 The TensorFlow Authors.** ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/model_optimization/guide/combine/pcqat_example"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/combine/pcqat_example.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/combine/pcqat_example.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/model-optimization/tensorflow_model_optimization/g3doc/guide/combine/pcqat_example.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> # Sparsity and cluster preserving quantization aware training (PCQAT) Keras example ## Overview This is an end to end example showing the usage of the **sparsity and cluster preserving quantization aware training (PCQAT)** API, part of the TensorFlow Model Optimization Toolkit's collaborative optimization pipeline. ### Other pages For an introduction to the pipeline and other available techniques, see the [collaborative optimization overview page](https://www.tensorflow.org/model_optimization/guide/combine/collaborative_optimization). ### Contents In the tutorial, you will: 1. Train a `tf.keras` model for the MNIST dataset from scratch. 2. Fine-tune the model with pruning and see the accuracy and observe that the model was successfully pruned. 3. Apply sparsity preserving clustering on the pruned model and observe that the sparsity applied earlier has been preserved. 4. Apply QAT and observe the loss of sparsity and clusters. 5. Apply PCQAT and observe that both sparsity and clustering applied earlier have been preserved. 6. Generate a TFLite model and observe the effects of applying PCQAT on it. 7. Compare the sizes of the different models to observe the compression benefits of applying sparsity followed by the collaborative optimization techniques of sparsity preserving clustering and PCQAT. 8. Compare the accurracy of the fully optimized model with the un-optimized baseline model accuracy. ## Setup You can run this Jupyter Notebook in your local [virtualenv](https://www.tensorflow.org/install/pip?lang=python3#2.-create-a-virtual-environment-recommended) or [colab](https://colab.sandbox.google.com/). For details of setting up dependencies, please refer to the [installation guide](https://www.tensorflow.org/model_optimization/guide/install). ``` ! pip install -q tensorflow-model-optimization import tensorflow as tf import numpy as np import tempfile import zipfile import os ``` ## Train a tf.keras model for MNIST to be pruned and clustered ``` # Load MNIST dataset mnist = tf.keras.datasets.mnist (train_images, train_labels), (test_images, test_labels) = mnist.load_data() # Normalize the input image so that each pixel value is between 0 to 1. train_images = train_images / 255.0 test_images = test_images / 255.0 model = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=(28, 28)), tf.keras.layers.Reshape(target_shape=(28, 28, 1)), tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10) ]) opt = tf.keras.optimizers.Adam(learning_rate=1e-3) # Train the digit classification model model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.fit( train_images, train_labels, validation_split=0.1, epochs=10 ) ``` ### Evaluate the baseline model and save it for later usage ``` _, baseline_model_accuracy = model.evaluate( test_images, test_labels, verbose=0) print('Baseline test accuracy:', baseline_model_accuracy) _, keras_file = tempfile.mkstemp('.h5') print('Saving model to: ', keras_file) tf.keras.models.save_model(model, keras_file, include_optimizer=False) ``` ## Prune and fine-tune the model to 50% sparsity Apply the `prune_low_magnitude()` API to achieve the pruned model that is to be clustered in the next step. Refer to the [pruning comprehensive guide](https://www.tensorflow.org/model_optimization/guide/pruning/comprehensive_guide) for more information on the pruning API. ### Define the model and apply the sparsity API Note that the pre-trained model is used. ``` import tensorflow_model_optimization as tfmot prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude pruning_params = { 'pruning_schedule': tfmot.sparsity.keras.ConstantSparsity(0.5, begin_step=0, frequency=100) } callbacks = [ tfmot.sparsity.keras.UpdatePruningStep() ] pruned_model = prune_low_magnitude(model, **pruning_params) # Use smaller learning rate for fine-tuning opt = tf.keras.optimizers.Adam(learning_rate=1e-5) pruned_model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy']) ``` ### Fine-tune the model, check sparsity, and evaluate the accuracy against baseline Fine-tune the model with pruning for 3 epochs. ``` # Fine-tune model pruned_model.fit( train_images, train_labels, epochs=3, validation_split=0.1, callbacks=callbacks) ``` Define helper functions to calculate and print the sparsity and clusters of the model. ``` def print_model_weights_sparsity(model): for layer in model.layers: if isinstance(layer, tf.keras.layers.Wrapper): weights = layer.trainable_weights else: weights = layer.weights for weight in weights: if "kernel" not in weight.name or "centroid" in weight.name: continue weight_size = weight.numpy().size zero_num = np.count_nonzero(weight == 0) print( f"{weight.name}: {zero_num/weight_size:.2%} sparsity ", f"({zero_num}/{weight_size})", ) def print_model_weight_clusters(model): for layer in model.layers: if isinstance(layer, tf.keras.layers.Wrapper): weights = layer.trainable_weights else: weights = layer.weights for weight in weights: # ignore auxiliary quantization weights if "quantize_layer" in weight.name: continue if "kernel" in weight.name: unique_count = len(np.unique(weight)) print( f"{layer.name}/{weight.name}: {unique_count} clusters " ) ``` Let's strip the pruning wrapper first, then check that the model kernels were correctly pruned. ``` stripped_pruned_model = tfmot.sparsity.keras.strip_pruning(pruned_model) print_model_weights_sparsity(stripped_pruned_model) ``` ## Apply sparsity preserving clustering and check its effect on model sparsity in both cases Next, apply sparsity preserving clustering on the pruned model and observe the number of clusters and check that the sparsity is preserved. ``` import tensorflow_model_optimization as tfmot from tensorflow_model_optimization.python.core.clustering.keras.experimental import ( cluster, ) cluster_weights = tfmot.clustering.keras.cluster_weights CentroidInitialization = tfmot.clustering.keras.CentroidInitialization cluster_weights = cluster.cluster_weights clustering_params = { 'number_of_clusters': 8, 'cluster_centroids_init': CentroidInitialization.KMEANS_PLUS_PLUS, 'preserve_sparsity': True } sparsity_clustered_model = cluster_weights(stripped_pruned_model, **clustering_params) sparsity_clustered_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) print('Train sparsity preserving clustering model:') sparsity_clustered_model.fit(train_images, train_labels,epochs=3, validation_split=0.1) ``` Strip the clustering wrapper first, then check that the model is correctly pruned and clustered. ``` stripped_clustered_model = tfmot.clustering.keras.strip_clustering(sparsity_clustered_model) print("Model sparsity:\n") print_model_weights_sparsity(stripped_clustered_model) print("\nModel clusters:\n") print_model_weight_clusters(stripped_clustered_model) ``` ## Apply QAT and PCQAT and check effect on model clusters and sparsity Next, apply both QAT and PCQAT on the sparse clustered model and observe that PCQAT preserves weight sparsity and clusters in your model. Note that the stripped model is passed to the QAT and PCQAT API. ``` # QAT qat_model = tfmot.quantization.keras.quantize_model(stripped_clustered_model) qat_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) print('Train qat model:') qat_model.fit(train_images, train_labels, batch_size=128, epochs=1, validation_split=0.1) # PCQAT quant_aware_annotate_model = tfmot.quantization.keras.quantize_annotate_model( stripped_clustered_model) pcqat_model = tfmot.quantization.keras.quantize_apply( quant_aware_annotate_model, tfmot.experimental.combine.Default8BitClusterPreserveQuantizeScheme(preserve_sparsity=True)) pcqat_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) print('Train pcqat model:') pcqat_model.fit(train_images, train_labels, batch_size=128, epochs=1, validation_split=0.1) print("QAT Model clusters:") print_model_weight_clusters(qat_model) print("\nQAT Model sparsity:") print_model_weights_sparsity(qat_model) print("\nPCQAT Model clusters:") print_model_weight_clusters(pcqat_model) print("\nPCQAT Model sparsity:") print_model_weights_sparsity(pcqat_model) ``` ## See compression benefits of PCQAT model Define helper function to get zipped model file. ``` def get_gzipped_model_size(file): # It returns the size of the gzipped model in kilobytes. _, zipped_file = tempfile.mkstemp('.zip') with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.write(file) return os.path.getsize(zipped_file)/1000 ``` Observe that applying sparsity, clustering and PCQAT to a model yields significant compression benefits. ``` # QAT model converter = tf.lite.TFLiteConverter.from_keras_model(qat_model) converter.optimizations = [tf.lite.Optimize.DEFAULT] qat_tflite_model = converter.convert() qat_model_file = 'qat_model.tflite' # Save the model. with open(qat_model_file, 'wb') as f: f.write(qat_tflite_model) # PCQAT model converter = tf.lite.TFLiteConverter.from_keras_model(pcqat_model) converter.optimizations = [tf.lite.Optimize.DEFAULT] pcqat_tflite_model = converter.convert() pcqat_model_file = 'pcqat_model.tflite' # Save the model. with open(pcqat_model_file, 'wb') as f: f.write(pcqat_tflite_model) print("QAT model size: ", get_gzipped_model_size(qat_model_file), ' KB') print("PCQAT model size: ", get_gzipped_model_size(pcqat_model_file), ' KB') ``` ## See the persistence of accuracy from TF to TFLite Define a helper function to evaluate the TFLite model on the test dataset. ``` def eval_model(interpreter): input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] # Run predictions on every image in the "test" dataset. prediction_digits = [] for i, test_image in enumerate(test_images): if i % 1000 == 0: print(f"Evaluated on {i} results so far.") # Pre-processing: add batch dimension and convert to float32 to match with # the model's input data format. test_image = np.expand_dims(test_image, axis=0).astype(np.float32) interpreter.set_tensor(input_index, test_image) # Run inference. interpreter.invoke() # Post-processing: remove batch dimension and find the digit with highest # probability. output = interpreter.tensor(output_index) digit = np.argmax(output()[0]) prediction_digits.append(digit) print('\n') # Compare prediction results with ground truth labels to calculate accuracy. prediction_digits = np.array(prediction_digits) accuracy = (prediction_digits == test_labels).mean() return accuracy ``` Evaluate the model, which has been pruned, clustered and quantized, and then see that the accuracy from TensorFlow persists in the TFLite backend. ``` interpreter = tf.lite.Interpreter(pcqat_model_file) interpreter.allocate_tensors() pcqat_test_accuracy = eval_model(interpreter) print('Pruned, clustered and quantized TFLite test_accuracy:', pcqat_test_accuracy) print('Baseline TF test accuracy:', baseline_model_accuracy) ``` ## Conclusion In this tutorial, you learned how to create a model, prune it using the `prune_low_magnitude()` API, and apply sparsity preserving clustering using the `cluster_weights()` API to preserve sparsity while clustering the weights. Next, sparsity and cluster preserving quantization aware training (PCQAT) was applied to preserve model sparsity and clusters while using QAT. The final PCQAT model was compared to the QAT one to show that sparsity and clusters are preserved in the former and lost in the latter. Next, the models were converted to TFLite to show the compression benefits of chaining sparsity, clustering, and PCQAT model optimization techniques and the TFLite model was evaluated to ensure that the accuracy persists in the TFLite backend. Finally, the PCQAT TFLite model accuracy was compared to the pre-optimization baseline model accuracy to show that collaborative optimization techniques managed to achieve the compression benefits while maintaining a similar accuracy compared to the original model.
github_jupyter
# Testing cnn for classifying universes Nov 10, 2020 ``` import argparse import os import random import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data from torchsummary import summary from torch.utils.data import DataLoader, TensorDataset import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.animation as animation from IPython.display import HTML import time from datetime import datetime import glob import pickle import yaml import logging %matplotlib widget ``` ## Modules ``` def f_load_config(config_file): with open(config_file) as f: config = yaml.load(f, Loader=yaml.SafeLoader) return config ### Transformation functions for image pixel values def f_transform(x): return 2.*x/(x + 4.) - 1. def f_invtransform(s): return 4.*(1. + s)/(1. - s) # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) # Generator Code class View(nn.Module): def __init__(self, shape): super(View, self).__init__() self.shape = shape def forward(self, x): return x.view(*self.shape) class Discriminator(nn.Module): def __init__(self, ngpu, nz,nc,ndf,n_classes,kernel_size,stride,d_padding): super(Discriminator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is (nc) x 64 x 64 # nn.Conv2d(in_channels, out_channels, kernel_size,stride,padding,output_padding,groups,bias, Dilation,padding_mode) nn.Conv2d(nc, ndf,kernel_size, stride, d_padding, bias=True), nn.BatchNorm2d(ndf,eps=1e-05, momentum=0.9, affine=True), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, kernel_size, stride, d_padding, bias=True), nn.BatchNorm2d(ndf * 2,eps=1e-05, momentum=0.9, affine=True), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*2) x 16 x 16 nn.Conv2d(ndf * 2, ndf * 4, kernel_size, stride, d_padding, bias=True), nn.BatchNorm2d(ndf * 4,eps=1e-05, momentum=0.9, affine=True), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*4) x 8 x 8 nn.Conv2d(ndf * 4, ndf * 8, kernel_size, stride, d_padding, bias=True), nn.BatchNorm2d(ndf * 8,eps=1e-05, momentum=0.9, affine=True), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*8) x 4 x 4 nn.Flatten(), nn.Linear(nc*ndf*8*8*8, n_classes) # nn.Sigmoid() ) def forward(self, input): return self.main(input) ``` ## Main code ``` torch.backends.cudnn.benchmark=True t0=time.time() ################################# ###### Initialize variables ####### config_file='config_128.yaml' config_dict=f_load_config(config_file) print(config_dict) workers=config_dict['training']['workers'] nc,nz,ngf,ndf=config_dict['training']['nc'],config_dict['training']['nz'],config_dict['training']['ngf'],config_dict['training']['ndf'] lr,beta1=config_dict['training']['lr'],config_dict['training']['beta1'] kernel_size,stride=config_dict['training']['kernel_size'],config_dict['training']['stride'] g_padding,d_padding=config_dict['training']['g_padding'],config_dict['training']['d_padding'] flip_prob=config_dict['training']['flip_prob'] image_size=config_dict['data']['image_size'] checkpoint_size=config_dict['data']['checkpoint_size'] num_imgs=config_dict['data']['num_imgs'] ip_fname=config_dict['data']['ip_fname'] op_loc=config_dict['data']['op_loc'] # Overriding configs in .yaml file (different for jupyter notebook) ngpu=1 batch_size=128 spec_loss_flag=True checkpoint_size=50 num_imgs=2000 # Number of images to use num_epochs=4 lr=0.0002 n_classes=6 ### Initialize random seed (different for Jpt notebook) manualSeed=21245 print("Random Seed: ", manualSeed) random.seed(manualSeed) torch.manual_seed(manualSeed) device = torch.device("cuda" if (torch.cuda.is_available() and ngpu > 0) else "cpu") print('Device:',device) # ################################# # ####### Read data and precompute ###### # # ip_fname='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_2_smoothing_200k/norm_1_train_val.npy' # ip_fname='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_x.npy' # labels='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_y.npy' # img=np.load(ip_fname)[:num_imgs].transpose(0,1,2,3) # t_img=torch.from_numpy(img) # print(img.shape,t_img.shape) # dataset=TensorDataset(t_img) # dataloader=DataLoader(dataset,batch_size=batch_size,shuffle=True,num_workers=1,drop_last=True) ################################# ###### Build Networks ### print("Building CNN") # Create Discriminator netD = Discriminator(ngpu, nz,nc,ndf,n_classes,kernel_size,stride,g_padding).to(device) netD.apply(weights_init) print(netD) summary(netD,(1,128,128)) # Handle multi-gpu if desired ngpu=torch.cuda.device_count() print("Number of GPUs used",ngpu) if (device.type == 'cuda') and (ngpu > 1): netD = nn.DataParallel(netD, list(range(ngpu))) # Initialize BCELoss function criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(netD.parameters(), lr=0.001, momentum=0.9) # fixed_noise = torch.randn(batch_size, 1, 1, nz, device=device) #Latent vectors to view G progress # Setup Adam optimizers for both G and D # optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999),eps=1e-7) ################################# ###### Set up directories ####### (different for Jpt notebook) # run_suffix='_nb_test' # ### Create prefix for foldername # now=datetime.now() # fldr_name=now.strftime('%Y%m%d_%H%M%S') ## time format # # print(fldr_name) # save_dir=op_loc+fldr_name+run_suffix # if not os.path.exists(save_dir): # os.makedirs(save_dir+'/models') # os.makedirs(save_dir+'/images') # Fresh start # iters = 0; start_epoch=0 # best_chi1,best_chi2=1e10,1e10 # ip_fname='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_x.npy' # labels_file='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_y.npy' # ids_file='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/data_id.npy' # img=np.load(ip_fname) # labels=np.load(labels_file) # ids=np.load(ids_file) # t_img=torch.from_numpy(img) # print(img.shape,t_img.shape) ## Read data from dataframe data_dir='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/raw_data/128_square/dataset_4_four_universes_6k_cnn/' df_data=pd.read_pickle(data_dir+'/df_data.pkle') df_data=df_data.sample(frac=1,random_state=20).reset_index(drop=True) train_size,val_size,test_size=0.7,0.1,0.1 data_size=df_data.shape[0] df_data[['ID','label']].head() idx1,idx2,idx3=int(train_size*data_size),int((train_size+val_size)*data_size),int((train_size+val_size+test_size)*data_size) print(idx1,idx2,idx3) df_temp=df_data.loc[np.arange(0,idx1)] dataset=TensorDataset(torch.Tensor(np.stack(df_temp.img.values)),torch.Tensor(df_temp.label.values)) train_loader=DataLoader(dataset,batch_size=batch_size,shuffle=True,num_workers=1,drop_last=True) df_temp=df_data.loc[np.arange(idx1,idx2)] dataset=TensorDataset(torch.Tensor(np.stack(df_temp.img.values)),torch.Tensor(df_temp.label.values)) val_loader=DataLoader(dataset,batch_size=16,shuffle=True,num_workers=1,drop_last=True) df_temp=df_data.loc[np.arange(idx2,idx3)] dataset=TensorDataset(torch.Tensor(np.stack(df_temp.img.values)),torch.Tensor(df_temp.label.values)) test_loader=DataLoader(dataset,batch_size=8,shuffle=True,num_workers=1,drop_last=True) ## Test model def f_test(data_loader,netD): netD.eval() correct,total=0,0 with torch.no_grad(): for count,data in enumerate(data_loader): images,labels=data[0].to(device),data[1].to(device) outputs=netD(images) _,predictions=torch.max(outputs,1) total+=labels.size(0) correct+=(predictions==labels).sum().item() accuracy=(correct/total)*100 # print("Accuracy %",accuracy) # print(correct,total) return accuracy accuracy=[] for epoch in range(0,4): running_loss=0.0 print("Epoch",epoch) for i, data in enumerate(train_loader): # print(images.shape,labels.shape) images,labels=data[0].to(device),data[1].to(device) optimizer.zero_grad() # netD.train(); ### Need to add these after inference and before training netD.zero_grad() labels=labels.long() output = netD(images) loss= criterion(output, labels) loss.backward() optimizer.step() running_loss+=loss.item() if i%10==0: accuracy.append(f_test(val_loader,netD)) netD.train() plt.figure() plt.plot(accuracy) ## Test model f_test(test_loader,netD) ```
github_jupyter
# User Demo ``` url = "http://127.0.0.1:5000" filepath = 'C:\\Users\\reonh\Documents\\NUS\AY2022_S1\Capstone\capstone_21\python_backend\database\lpdlprnet\plate_2.jpg' folderpath = 'C:\\Users\\reonh\Documents\\NUS\AY2022_S1\Capstone\capstone_21\python_backend\database\lpdlprnet\\' filename = 'plate.jpg' ``` ## Check Server Status ``` import requests response = requests.get( url + "/api/lpdlprnet/" + 'internal') print(response.json(), flush=True) ``` ## Scenario: Developer needs to recognise license plates for the following images ### Get Predictions ``` import matplotlib.pyplot as plt import matplotlib.image as mpimg files = [folderpath + 'plate.jpg'] def process(filename: str=None): """ View multiple images stored in files, stacking vertically Arguments: filename: str - path to filename containing image """ image = mpimg.imread(filename) plt.figure() plt.imshow(image) return image im = process(files[0]) M = im.shape[0]//10 N = im.shape[1]//10 tile_coord = [[x,x+M,y,y+N] for x in range(0,im.shape[0],M) for y in range(0,im.shape[1],N)] ts = im.copy() a = tile_coord[0][0] b = tile_coord[0][1] c = tile_coord[0][2] d = tile_coord[0][3] for i,matrix in enumerate(tile_coord): a,b,c,d = matrix ts = im.copy() ts[a:b, c:d] = 255 plt.imsave(str(i)+'range_rover.jpg', ts) # Used for benchmarking # [x1, x2, y1, y2] and [x3, x4, y3, y4] def calculate_iou_from_coords(bx1, bx2): assert (is_bounding_box(bx1) and is_bounding_box(bx2)) #map list of coordinates into bounding box bb1, bb2 = {},{} bb1['x1'], bb1['x2'], bb1['y1'], bb1['y2'] = bx1 bb2['x1'], bb2['x2'], bb2['y1'], bb2['y2'] = bx2 assert bb1['x1'] < bb1['x2'] assert bb1['y1'] < bb1['y2'] assert bb2['x1'] < bb2['x2'] assert bb2['y1'] < bb2['y2'] # determine the coordinates of the intersection rectangle x_left = max(bb1['x1'], bb2['x1']) y_top = max(bb1['y1'], bb2['y1']) x_right = min(bb1['x2'], bb2['x2']) y_bottom = min(bb1['y2'], bb2['y2']) # print(x_left, y_top, x_right, y_bottom) if x_right < x_left or y_bottom < y_top: return 0.0 # The intersection of two axis-aligned bounding boxes is always an # axis-aligned bounding box intersection_area = (x_right - x_left) * (y_bottom - y_top) # compute the area of both AABBs bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1']) bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1']) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area iou = intersection_area / float(bb1_area + bb2_area - intersection_area) # print(iou, intersection_area, bb1_area, bb2_area) assert iou >= 0.0 assert iou <= 1.0 return iou def is_bounding_box(box): return type(box) in [list, tuple, pd.Series, np.ndarray] and len(box) == 4 # [x1, y1, w1, h1] and [x2, y2, w2, h2] def calculate_iou_from_dim(bx1, bx2): return calculate_iou_from_coords(*list(map(convert_dim_to_coord, [bx1, bx2]))) def convert_dim_to_coord(args): x,y,w,h = args return [x, x+w, y, y+h] def chop_image(im, n): """ Chop image into n segments """ import matplotlib.pyplot as plt org = plt.imread(im) im = org.copy() M = im.shape[0]//n N = im.shape[1]//n tile_coord = [[x,x+M,y,y+N] for x in range(0,im.shape[0],M) for y in range(0,im.shape[1],N)] response =[org] for i,matrix in enumerate(tile_coord): a,b,c,d = matrix ts = im.copy() ts[a:b, c:d] = 0 response.append(ts) return response plate = plt.imread('C:\\Users\\reonh\\Documents\\NUS\\AY2022_S1\\Capstone\\capstone_21\\python_backend\\database\\lpdlprnet\\plate.jpg') oplate = plt.imread('C:\\Users\\reonh\\Documents\\NUS\\AY2022_S1\\Capstone\\capstone_21\\python_backend\\triton_client\\lpdnet\\input\\internal\\081021_042837\\0plate.jpg') plt.imshow(plate-oplate) import pandas as pd import matplotlib.pyplot as plt responses = pd.read_pickle('python_backend/response') responses conf = {} for r in responses: key = r['file_name'].replace('plate.jpg','') try: sc = r['all_bboxes'][0]['confidence_score'] except: sc = 0.01 conf[key] = sc conf chunks = chop_image(folderpath + 'plate_2.jpg', 10) def conf_color(x): return 255/x org = plt.imread(folderpath + 'plate.jpg') n = 10 im = org.copy() M = im.shape[0]//n N = im.shape[1]//n tile_coord = [[x,x+M,y,y+N] for x in range(0,im.shape[0],M) for y in range(0,im.shape[1],N)] response =[] for i,matrix in enumerate(tile_coord): a,b,c,d = matrix im[a:b,c:d] = conf_color(conf[str(i)]) plt.imshow(org) plt.imshow(im, alpha=0.8) import requests baseURL = url request_files=[ ('image',(files[0],open(files[0],'rb'),'image/jpeg')) , ('image',(files[1],open(files[1],'rb'),'image/jpeg'))] headers = {} payload = {'filename':['plate.jpg', 'plate_2.jpg']} response = requests.post( baseURL + "/api/lpdlprnet/internal", headers=headers, data=payload, files=request_files) print(response.json()['0']['0_lpr']['license_plate']) print(response.json()['1']['0_lpr']['license_plate']) ``` ### Can we explain this output? ``` import requests baseURL = url filename = filename filepath = filepath files=[ ('image',(filename,open(filepath,'rb'),'image/jpeg')) ] headers = {} response = requests.post( baseURL + "/api/lpdlprnet/explain/internal", headers=headers, data=payload, files=files) from IPython.display import Markdown, display display(Markdown(response.json()['explain_markdown'])) ``` ### How to write this code? ``` import requests baseURL = url files=[ ('image',(filename,open(filepath,'rb'),'image/jpeg')) ] headers = {} response = requests.post( baseURL + "/api/lpdlprnet/internal", headers=headers, data=payload, files=files) response.json() ```
github_jupyter
#### Abstract Classes: contains abstract methods Abstract methods are those which are only declared but they've no implementation **All methods need to be implemented (mandatory) Module -- abc | | |---> ABC (Class) | |---> Abstract method (as a functionality and used as a decorator) ** You cannot create objects of an abstract class (evne there is only one abstract method) ``` from abc import ABC, abstractmethod class Automobile(ABC): def __init__(self): print("Automobile Created") def start(self): pass def start(self): pass def start(self): pass c = Automobile() from abc import ABC, abstractmethod class Automobile(ABC): def __init__(self): print("Automobile Created") @abstractmethod def start(self): pass @abstractmethod def start(self): pass @abstractmethod def start(self): pass c = Automobile() from abc import ABC, abstractmethod class Automobile(ABC): def __init__(self): print("Automobile Created") @abstractmethod def start(self): pass @abstractmethod def start(self): pass @abstractmethod def start(self): pass class Car(Automobile): def __init__(self, name): print("Car created") self.name = name def start(self): pass def stop(self): pass def drive(self): pass class Bus(Automobile): def __init__(self, name): print("Bus Created") self.name = name def start(self): pass def stop(self): pass def drive(self): pass c = Car("Honda") d = Bus("Delhi Metro BUs") ``` #### 1) Object of abstract class cannot be created #### 2) Implement all the abstract methods in the child class ``` # Predict the output: from abc import ABC,abstractmethod class A(ABC): @abstractmethod def fun1(self): pass @abstractmethod def fun2(self): pass o = A() o.fun1() # Predict the output: from abc import ABC,abstractmethod class A(ABC): @abstractmethod def fun1(self): pass @abstractmethod def fun2(self): pass class B(A): def fun1(self): print("function 1 called") o = B() o.fun1() #Predict the Output: from abc import ABC,abstractmethod class A(ABC): @abstractmethod def fun1(self): pass @abstractmethod def fun2(self): pass class B(A): def fun1(self): print("function 1 called") def fun2(self): print("function 2 called") o = B() o.fun1() ``` ** In this 3rd example, it's clearly visible that you've implemented all the abstract funtions of class A, class B inherits class A and created the object for class B. Then finally called the fun1() from the object. So, the output got printed else you can visit the first two examples. It throws an error either if you implement only one class or if you try to create an object for abstract class (Example 1)
github_jupyter
``` #imports import numpy as np import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC from sklearn import metrics from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.neural_network import MLPClassifier from sklearn.decomposition import PCA from imblearn.over_sampling import RandomOverSampler, SMOTE from imblearn.pipeline import make_pipeline from imblearn.pipeline import Pipeline import time ``` # 1. Classification of Facial Expressions ### 1.1 Importing the data ``` #load the data data = np.load("2023683_face.npz") data.files data["X_train"] #put the data into corresponding arrays X_train = data["X_train"] y_train = data["y_train"] X_test = data["X_test"] y_test = data["y_test"] X_valid = data["X_valid"] y_valid = data["y_valid"] ``` ### 1.2 EDA ``` print(X_train.shape) print(y_train.shape) #unique categories np.unique(y_train) X_train X_train[0].shape ``` #### 1.2.1 Plotting an image from each category in the data ``` #finding the indexes of the first occurances of the unique categories unique_categories = np.unique(y_train, return_index=True)[1] #idea adopted from practical 10 fig, axs = plt.subplots(1, 3, figsize=(10, 10), subplot_kw={'xticks': (), 'yticks': ()}) for index, ax in zip(unique_categories, [0,1,2]): image = X_train[index] # an instance has 2304 features meaning that the square root of this number # gives the the dimensions of the image image.resize(48, 48) axs[ax].imshow(image) axs[ax].set_title(y_train[index]) ``` #### 1.2.2 Displaying the number of observations for each class ``` #combining all the target labels combined_labels = np.concatenate((y_train, y_test, y_valid)) combined_labels.shape labels, counts = np.unique(combined_labels, return_counts=True) plt.bar(labels, counts) plt.xticks(labels) plt.title("Number of observations for each class") plt.xlabel("Class") plt.ylabel("Count") #balancing the data using RandomOverSampler ros = RandomOverSampler(random_state=202, sampling_strategy='not majority') X_train_res, y_train_res = ros.fit_resample(X_train, y_train) X_test_res, y_test_res = ros.fit_resample(X_test, y_test) X_valid_res, y_valid_res = ros.fit_resample(X_valid, y_valid) ``` The data seems to be unbalanced because category 0 is observed 1200 times, 1 - a little less than 800 and 2 - around 400. Fitting a PCA: ``` #starting with a PCA with 250 components pca = PCA(n_components=250, random_state=202) pca.fit(X_train_res) X_train_pca = pca.transform(X_train_res) X_test_pca = pca.transform(X_test_res) X_train_pca.shape #plotting the explained variance ratio: looks like the best number of components is around 75 plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.axhline(y=0.95, color='r', linestyle='-') #initializing a PCA with 75 components pca = PCA(n_components=75, random_state=202) pca.fit(X_train_res) X_train_pca = pca.transform(X_train_res) X_test_pca = pca.transform(X_test_res) X_valid_pca = pca.transform(X_valid_res) ``` ### 1.3 Classfication task #### 1.3.1 Training KNN Classifier Initializing the baseline model. ``` start = time.time() knn = KNeighborsClassifier(n_neighbors=1) knn.fit(X_train_pca, y_train_res) pred = knn.predict(X_test_pca) print(f"Test set score: {knn.score(X_test_pca, y_test_res)}") end = time.time() print(f"Time elapsed: {end-start}") print(f"Confusion matrix:\n {metrics.confusion_matrix(y_test_res, pred)}") target_names = ['class 0', 'class 1', 'class 2'] print(f"Classification report:\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}") ``` Plotting missclassified images. ``` #displaying two missclassified images for each class (code adapted from workbook 11) index = 0 misclassifiedIndexes = [] lab_0 = 0 lab_1 = 0 lab_2 = 0 for label, predict in zip(y_test_res, pred): if label != predict: #we need this part in order to have only 2 images from each class if label == 0 and lab_0 < 2: misclassifiedIndexes.append(index) lab_0 += 1 elif label == 1 and lab_1 < 2: misclassifiedIndexes.append(index) lab_1 += 1 elif label == 2 and lab_2 < 2: misclassifiedIndexes.append(index) lab_2 += 1 index +=1 # plot the corresponding image of the 6th to 9th element in the array plt.figure(figsize=(20, 20)) for plotIndex, badIndex in enumerate(misclassifiedIndexes): plt.subplot(1, 6, plotIndex + 1) plt.imshow(np.reshape(X_test[badIndex], (48,48))) plt.title(f'Predicted: {pred[badIndex]}, Actual: {y_test_res[badIndex]}', fontsize = 15) ``` #### 1.3.2 Finding the optimal number of neighbors for KNN ``` #finding the best number of neighbors using a loop n_neighbors = [1, 2, 3, 4, 5, 6] max_accuracy = 0 best_neighbors = 0 start = time.time() for n in n_neighbors: knn = KNeighborsClassifier(n_neighbors=n) knn.fit(X_train_pca, y_train_res) if knn.score(X_valid_pca, y_valid_res) > max_accuracy: max_accuracy = knn.score(X_valid_pca, y_valid_res) best_neighbors = n print(f"Best accuracy: {max_accuracy:.2f}") print(f"Optimal number of neighbors: {best_neighbors}") end = time.time() print(f"Time elapsed: {end-start}") #evaluate model on test set knn = KNeighborsClassifier(n_neighbors=best_neighbors) knn.fit(X_train_pca, y_train_res) test_score = knn.score(X_test_pca, y_test_res) print(f"Test set score with best parameters: {test_score}") print(f"Classification report:\n {metrics.classification_report(y_test_res, knn.predict(X_test_pca), target_names=target_names)}") ``` #### 1.3.3 Fitting different classifiers For the hyperparameter tunings I used this resource. It had some useful tips for the different models. https://machinelearningmastery.com/hyperparameters-for-classification-machine-learning-algorithms/ ``` #SVM Classifier without hyperparameter tuning start = time.time() svc = SVC() svc.fit(X_train_pca, y_train_res) pred = svc.predict(X_test_pca) end = time.time() print(f"Time elapsed: {end-start}") print(f"Test set score: {svc.score(X_test_pca, y_test_res)}") print(f"Confusion matrix:\n{metrics.confusion_matrix(pred, y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}") #SVM Classifier with hyperparameter tuning best_score = 0 best_parameters = {} start = time.time() for C in [0.01, 0.1, 1, 10, 100]: for kernel in ["rbf", 'poly']: #fitting a model for each combination of hyperparemeters svc = SVC(C=C, kernel=kernel) svc.fit(X_train_pca, y_train_res) #evaluating the model on the validation set score = svc.score(X_valid_pca, y_valid_res) if score > best_score: best_score = score best_parameters = {'C': C, 'kernel':kernel} print(f"Best score: {best_score:.2f}") print(f"Best hyperparameters: {best_parameters}") end = time.time() print(f"Time elapsed: {end-start}") # fitting the model with best parameters on the test set svc = SVC(**best_parameters) svc.fit(X_train_pca, y_train_res) test_score = svc.score(X_test_pca, y_test_res) print(f"Test set score with best parameters: {test_score}") print(f"Classification report:\n {metrics.classification_report(y_test_res, svc.predict(X_test_pca), target_names=target_names)}") #Decision Tree Classifier without hyperparameter tuning start = time.time() dtc = DecisionTreeClassifier(random_state = 202) dtc.fit(X_train_pca, y_train_res) end = time.time() print(f"Time elapsed: {end-start}") pred = dtc.predict(X_test_pca) print(f"Test set score: {dtc.score(X_test_pca, y_test_res)}") print(f"Confusion matrix:\n{metrics.confusion_matrix(pred, y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}") #Decision Tree Classifier with hyperparameter tuning best_score = 0 best_parameters = {} start = time.time() for criterion in ['gini', 'entropy']: for max_depth in [3,6,9,12,15,17,20]: for splitter in ['best', 'random']: #fitting a model for each combination of hyperparemeters dtc = DecisionTreeClassifier(random_state=202, criterion=criterion, max_depth=max_depth, splitter=splitter) dtc.fit(X_train_pca, y_train_res) #evaluating the model on the validation set score = dtc.score(X_valid_pca, y_valid_res) if score > best_score: best_score = score best_parameters = {'criterion': criterion, 'max_depth':max_depth, 'splitter':splitter} print(f"Best score: {best_score:.2f}") print(f"Best hyperparameters: {best_parameters}") end = time.time() print(f"Time elapsed: {end-start}") # fitting the model with best parameters on the test set dtc = DecisionTreeClassifier(**best_parameters, random_state=202) dtc.fit(X_train_pca, y_train_res) test_score = dtc.score(X_test_pca, y_test_res) print(f"Test set score with best parameters: {test_score}") print(f"Confusion matrix:\n{metrics.confusion_matrix(dtc.predict(X_test_pca), y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, dtc.predict(X_test_pca), target_names=target_names)}") #Logistic Regression without hyperparameter tuning start = time.time() log_reg = LogisticRegression(max_iter = 10000) log_reg.fit(X_train_pca, y_train_res) end = time.time() print(f"Time elapsed: {end-start}") pred = log_reg.predict(X_test_pca) print(f"Test set score: {log_reg.score(X_test_pca, y_test_res)}") print(f"Confusion matrix:\n{metrics.confusion_matrix(pred, y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}") #Logistic Regression with hyperparameter tuning best_score = 0 best_parameters = {} start = time.time() for C in [0.0001, 0.01, 0.1, 1, 10]: for solver in ['newton-cg', 'lbfgs', 'sag']: #fitting a model for each combination of hyperparemeters log_reg = LogisticRegression(solver=solver, C=C,max_iter = 10000) log_reg.fit(X_train_pca, y_train_res) #evaluating the model on the validation set score = log_reg.score(X_valid_pca, y_valid_res) if score > best_score: best_score = score best_parameters = {'solver':solver, 'C':C} print(f"Best score: {best_score:.2f}") print(f"Best hyperparameters: {best_parameters}") end = time.time() print(f"Time elapsed: {end-start}") # fitting the model with best parameters on the test set log_reg = LogisticRegression(**best_parameters, max_iter = 10000) log_reg.fit(X_train_pca, y_train_res) test_score = log_reg.score(X_test_pca, y_test_res) print(f"Test set score with best parameters: {test_score}") print(f"Confusion matrix:\n{metrics.confusion_matrix(log_reg.predict(X_test_pca), y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, log_reg.predict(X_test_pca), target_names=target_names)}") #MLP classifier without hyperparameter tuning start = time.time() mnb = MLPClassifier(random_state=202, max_iter=1000) mnb.fit(X_train_pca, y_train_res) end = time.time() print(f"Time elapsed: {end-start}") pred = mnb.predict(X_test_pca) print(f"Test set score: {mnb.score(X_test_pca, y_test_res)}") print(f"Confusion matrix:\n{metrics.confusion_matrix(pred, y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, pred, target_names=target_names)}") #MLP Classifier with hyperparameter tuning best_score = 0 best_parameters = {} start = time.time() for alpha in [0.001, 0.01, 0.1, 0.5, 0.7 ,0.8]: #fitting a model for each combination of hyperparemeters mnb = MLPClassifier(alpha=alpha, max_iter = 1000, random_state=202) mnb.fit(X_train_pca, y_train_res) #evaluating the model on the validation set score = mnb.score(X_valid_pca, y_valid_res) if score > best_score: best_score = score best_parameters = {'alpha': alpha} print(f"Best score: {best_score:.2f}") print(f"Best hyperparameters: {best_parameters}") end = time.time() print(f"Time elapsed: {end-start}") # fitting the model with best parameters on the test set mnb = MLPClassifier(**best_parameters, max_iter = 1000, random_state=202) mnb.fit(X_train_pca, y_train_res) test_score = mnb.score(X_test_pca, y_test_res) print(f"Test set score with best parameters: {test_score}") print(f"Confusion matrix:\n{metrics.confusion_matrix(mnb.predict(X_test_pca), y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, mnb.predict(X_test_pca), target_names=target_names)}") # Random Forest Classifier without hyperparameter tuning start = time.time() rfc = RandomForestClassifier(random_state=202) rfc.fit(X_train_pca, y_train_res) end = time.time() print(f"Time elapsed: {end-start}") pred = rfc.predict(X_test_pca) print(f"Test set score: {rfc.score(X_test_pca, y_test_res)}") print(f"Confusion matrix:\n{metrics.confusion_matrix(pred, y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, rfc.predict(X_test_pca), target_names=target_names)}") print(rfc.get_params()) #Random Forest Classifier with hyperparameter tuning best_score = 0 best_parameters = {} start = time.time() for max_features in [8, 9, 10, 11]: for n_estimators in [10, 100, 1000]: for max_depth in [10, 20, 30]: #fitting a model for each combination of hyperparemeters rfc = RandomForestClassifier(max_features=max_features, n_estimators=n_estimators, max_depth=max_depth, random_state=202) rfc.fit(X_train_pca, y_train_res) #evaluating the model on the validation set score = rfc.score(X_valid_pca, y_valid_res) if score > best_score: best_score = score best_parameters = {'max_features': max_features, 'n_estimators': n_estimators, 'max_depth':max_depth} print(f"Best score: {best_score:.2f}") print(f"Best hyperparameters: {best_parameters}") end = time.time() print(f"Time elapsed: {end-start}") # fitting the model with best parameters on the test set rfc = RandomForestClassifier(**best_parameters, random_state=202) rfc.fit(X_train_pca, y_train_res) test_score = rfc.score(X_test_pca, y_test_res) print(f"Test set score with best parameters: {test_score}") print(f"Confusion matrix:\n{metrics.confusion_matrix(rfc.predict(X_test_pca), y_test_res)}") print(f"Classification report:\n {metrics.classification_report(y_test_res, rfc.predict(X_test_pca), target_names=target_names)}") #Voting model1 = LogisticRegression(max_iter=10000, solver='newton-cg', C=0.0001) model2 = DecisionTreeClassifier(criterion = 'entropy', max_depth = 9, splitter = 'best') model3 = SVC(C = 10, kernel = 'rbf') voting_model = VotingClassifier(estimators = [('log_reg', model1), ('dt', model2), ('svc', model3)], voting='hard') voting_model.fit(X_train_pca, y_train_res) pred = voting_model.predict(X_test_pca) print("Test set score:", voting_model.score(X_test_pca, y_test_res)) print(f"Confusion matrix:\n{metrics.confusion_matrix(pred, y_test_res)}") ```
github_jupyter
``` # This mounts your Google Drive to the Colab VM. from google.colab import drive drive.mount('/content/drive') # TODO: Enter the foldername in your Drive where you have saved the unzipped # assignment folder, e.g. 'cs231n/assignments/assignment1/' FOLDERNAME = None assert FOLDERNAME is not None, "[!] Enter the foldername." # Now that we've mounted your Drive, this ensures that # the Python interpreter of the Colab VM can load # python files from within it. import sys sys.path.append('/content/drive/My Drive/{}'.format(FOLDERNAME)) # This downloads the CIFAR-10 dataset to your Drive # if it doesn't already exist. %cd /content/drive/My\ Drive/$FOLDERNAME/cs231n/datasets/ !bash get_datasets.sh %cd /content/drive/My\ Drive/$FOLDERNAME ``` # Introduction to PyTorch You've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized. For the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, PyTorch (or TensorFlow, if you choose to work with that notebook). ## Why do we use deep learning frameworks? * Our code will now run on GPUs! This will allow our models to train much faster. When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly (which is beyond the scope of this class). * In this class, we want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand. * We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :) * Finally, we want you to be exposed to the sort of deep learning code you might run into in academia or industry. ## What is PyTorch? PyTorch is a system for executing dynamic computational graphs over Tensor objects that behave similarly as numpy ndarray. It comes with a powerful automatic differentiation engine that removes the need for manual back-propagation. ## How do I learn PyTorch? One of our former instructors, Justin Johnson, made an excellent [tutorial](https://github.com/jcjohnson/pytorch-examples) for PyTorch. You can also find the detailed [API doc](http://pytorch.org/docs/stable/index.html) here. If you have other questions that are not addressed by the API docs, the [PyTorch forum](https://discuss.pytorch.org/) is a much better place to ask than StackOverflow. # Table of Contents This assignment has 5 parts. You will learn PyTorch on **three different levels of abstraction**, which will help you understand it better and prepare you for the final project. 1. Part I, Preparation: we will use CIFAR-10 dataset. 2. Part II, Barebones PyTorch: **Abstraction level 1**, we will work directly with the lowest-level PyTorch Tensors. 3. Part III, PyTorch Module API: **Abstraction level 2**, we will use `nn.Module` to define arbitrary neural network architecture. 4. Part IV, PyTorch Sequential API: **Abstraction level 3**, we will use `nn.Sequential` to define a linear feed-forward network very conveniently. 5. Part V, CIFAR-10 open-ended challenge: please implement your own network to get as high accuracy as possible on CIFAR-10. You can experiment with any layer, optimizer, hyperparameters or other advanced features. Here is a table of comparison: | API | Flexibility | Convenience | |---------------|-------------|-------------| | Barebone | High | Low | | `nn.Module` | High | Medium | | `nn.Sequential` | Low | High | # GPU You can manually switch to a GPU device on Colab by clicking `Runtime -> Change runtime type` and selecting `GPU` under `Hardware Accelerator`. You should do this before running the following cells to import packages, since the kernel gets restarted upon switching runtimes. ``` import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data import sampler import torchvision.datasets as dset import torchvision.transforms as T import numpy as np USE_GPU = True dtype = torch.float32 # We will be using float throughout this tutorial. if USE_GPU and torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') # Constant to control how frequently we print train loss. print_every = 100 print('using device:', device) ``` # Part I. Preparation Now, let's load the CIFAR-10 dataset. This might take a couple minutes the first time you do it, but the files should stay cached after that. In previous parts of the assignment we had to write our own code to download the CIFAR-10 dataset, preprocess it, and iterate through it in minibatches; PyTorch provides convenient tools to automate this process for us. ``` NUM_TRAIN = 49000 # The torchvision.transforms package provides tools for preprocessing data # and for performing data augmentation; here we set up a transform to # preprocess the data by subtracting the mean RGB value and dividing by the # standard deviation of each RGB value; we've hardcoded the mean and std. transform = T.Compose([ T.ToTensor(), T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ]) # We set up a Dataset object for each split (train / val / test); Datasets load # training examples one at a time, so we wrap each Dataset in a DataLoader which # iterates through the Dataset and forms minibatches. We divide the CIFAR-10 # training set into train and val sets by passing a Sampler object to the # DataLoader telling how it should sample from the underlying Dataset. cifar10_train = dset.CIFAR10('./cs231n/datasets', train=True, download=True, transform=transform) loader_train = DataLoader(cifar10_train, batch_size=64, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN))) cifar10_val = dset.CIFAR10('./cs231n/datasets', train=True, download=True, transform=transform) loader_val = DataLoader(cifar10_val, batch_size=64, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000))) cifar10_test = dset.CIFAR10('./cs231n/datasets', train=False, download=True, transform=transform) loader_test = DataLoader(cifar10_test, batch_size=64) ``` # Part II. Barebones PyTorch PyTorch ships with high-level APIs to help us define model architectures conveniently, which we will cover in Part II of this tutorial. In this section, we will start with the barebone PyTorch elements to understand the autograd engine better. After this exercise, you will come to appreciate the high-level model API more. We will start with a simple fully-connected ReLU network with two hidden layers and no biases for CIFAR classification. This implementation computes the forward pass using operations on PyTorch Tensors, and uses PyTorch autograd to compute gradients. It is important that you understand every line, because you will write a harder version after the example. When we create a PyTorch Tensor with `requires_grad=True`, then operations involving that Tensor will not just compute values; they will also build up a computational graph in the background, allowing us to easily backpropagate through the graph to compute gradients of some Tensors with respect to a downstream loss. Concretely if x is a Tensor with `x.requires_grad == True` then after backpropagation `x.grad` will be another Tensor holding the gradient of x with respect to the scalar loss at the end. ### PyTorch Tensors: Flatten Function A PyTorch Tensor is conceptionally similar to a numpy array: it is an n-dimensional grid of numbers, and like numpy PyTorch provides many functions to efficiently operate on Tensors. As a simple example, we provide a `flatten` function below which reshapes image data for use in a fully-connected neural network. Recall that image data is typically stored in a Tensor of shape N x C x H x W, where: * N is the number of datapoints * C is the number of channels * H is the height of the intermediate feature map in pixels * W is the height of the intermediate feature map in pixels This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we use fully connected affine layers to process the image, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "flatten" operation to collapse the `C x H x W` values per representation into a single long vector. The flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly). ``` def flatten(x): N = x.shape[0] # read in N, C, H, W return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image def test_flatten(): x = torch.arange(12).view(2, 1, 3, 2) print('Before flattening: ', x) print('After flattening: ', flatten(x)) test_flatten() ``` ### Barebones PyTorch: Two-Layer Network Here we define a function `two_layer_fc` which performs the forward pass of a two-layer fully-connected ReLU network on a batch of image data. After defining the forward pass we check that it doesn't crash and that it produces outputs of the right shape by running zeros through the network. You don't have to write any code here, but it's important that you read and understand the implementation. ``` import torch.nn.functional as F # useful stateless functions def two_layer_fc(x, params): """ A fully-connected neural networks; the architecture is: NN is fully connected -> ReLU -> fully connected layer. Note that this function only defines the forward pass; PyTorch will take care of the backward pass for us. The input to the network will be a minibatch of data, of shape (N, d1, ..., dM) where d1 * ... * dM = D. The hidden layer will have H units, and the output layer will produce scores for C classes. Inputs: - x: A PyTorch Tensor of shape (N, d1, ..., dM) giving a minibatch of input data. - params: A list [w1, w2] of PyTorch Tensors giving weights for the network; w1 has shape (D, H) and w2 has shape (H, C). Returns: - scores: A PyTorch Tensor of shape (N, C) giving classification scores for the input data x. """ # first we flatten the image x = flatten(x) # shape: [batch_size, C x H x W] w1, w2 = params # Forward pass: compute predicted y using operations on Tensors. Since w1 and # w2 have requires_grad=True, operations involving these Tensors will cause # PyTorch to build a computational graph, allowing automatic computation of # gradients. Since we are no longer implementing the backward pass by hand we # don't need to keep references to intermediate values. # you can also use `.clamp(min=0)`, equivalent to F.relu() x = F.relu(x.mm(w1)) x = x.mm(w2) return x def two_layer_fc_test(): hidden_layer_size = 42 x = torch.zeros((64, 50), dtype=dtype) # minibatch size 64, feature dimension 50 w1 = torch.zeros((50, hidden_layer_size), dtype=dtype) w2 = torch.zeros((hidden_layer_size, 10), dtype=dtype) scores = two_layer_fc(x, [w1, w2]) print(scores.size()) # you should see [64, 10] two_layer_fc_test() ``` ### Barebones PyTorch: Three-Layer ConvNet Here you will complete the implementation of the function `three_layer_convnet`, which will perform the forward pass of a three-layer convolutional network. Like above, we can immediately test our implementation by passing zeros through the network. The network should have the following architecture: 1. A convolutional layer (with bias) with `channel_1` filters, each with shape `KW1 x KH1`, and zero-padding of two 2. ReLU nonlinearity 3. A convolutional layer (with bias) with `channel_2` filters, each with shape `KW2 x KH2`, and zero-padding of one 4. ReLU nonlinearity 5. Fully-connected layer with bias, producing scores for C classes. Note that we have **no softmax activation** here after our fully-connected layer: this is because PyTorch's cross entropy loss performs a softmax activation for you, and by bundling that step in makes computation more efficient. **HINT**: For convolutions: http://pytorch.org/docs/stable/nn.html#torch.nn.functional.conv2d; pay attention to the shapes of convolutional filters! ``` def three_layer_convnet(x, params): """ Performs the forward pass of a three-layer convolutional network with the architecture defined above. Inputs: - x: A PyTorch Tensor of shape (N, 3, H, W) giving a minibatch of images - params: A list of PyTorch Tensors giving the weights and biases for the network; should contain the following: - conv_w1: PyTorch Tensor of shape (channel_1, 3, KH1, KW1) giving weights for the first convolutional layer - conv_b1: PyTorch Tensor of shape (channel_1,) giving biases for the first convolutional layer - conv_w2: PyTorch Tensor of shape (channel_2, channel_1, KH2, KW2) giving weights for the second convolutional layer - conv_b2: PyTorch Tensor of shape (channel_2,) giving biases for the second convolutional layer - fc_w: PyTorch Tensor giving weights for the fully-connected layer. Can you figure out what the shape should be? - fc_b: PyTorch Tensor giving biases for the fully-connected layer. Can you figure out what the shape should be? Returns: - scores: PyTorch Tensor of shape (N, C) giving classification scores for x """ conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params scores = None ################################################################################ # TODO: Implement the forward pass for the three-layer ConvNet. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = F.conv2d(x, conv_w1, conv_b1, padding=2) x = F.relu(x) x = F.conv2d(x, conv_w2, conv_b2, padding=1) x = F.relu(x) scores = flatten(x).mm(fc_w) + fc_b # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ################################################################################ # END OF YOUR CODE # ################################################################################ return scores ``` After defining the forward pass of the ConvNet above, run the following cell to test your implementation. When you run this function, scores should have shape (64, 10). ``` def three_layer_convnet_test(): x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32] conv_w1 = torch.zeros((6, 3, 5, 5), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W] conv_b1 = torch.zeros((6,)) # out_channel conv_w2 = torch.zeros((9, 6, 3, 3), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W] conv_b2 = torch.zeros((9,)) # out_channel # you must calculate the shape of the tensor after two conv layers, before the fully-connected layer fc_w = torch.zeros((9 * 32 * 32, 10)) fc_b = torch.zeros(10) scores = three_layer_convnet(x, [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]) print(scores.size()) # you should see [64, 10] three_layer_convnet_test() ``` ### Barebones PyTorch: Initialization Let's write a couple utility methods to initialize the weight matrices for our models. - `random_weight(shape)` initializes a weight tensor with the Kaiming normalization method. - `zero_weight(shape)` initializes a weight tensor with all zeros. Useful for instantiating bias parameters. The `random_weight` function uses the Kaiming normal initialization method, described in: He et al, *Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification*, ICCV 2015, https://arxiv.org/abs/1502.01852 ``` def random_weight(shape): """ Create random Tensors for weights; setting requires_grad=True means that we want to compute gradients for these Tensors during the backward pass. We use Kaiming normalization: sqrt(2 / fan_in) """ if len(shape) == 2: # FC weight fan_in = shape[0] else: fan_in = np.prod(shape[1:]) # conv weight [out_channel, in_channel, kH, kW] # randn is standard normal distribution generator. w = torch.randn(shape, device=device, dtype=dtype) * np.sqrt(2. / fan_in) w.requires_grad = True return w def zero_weight(shape): return torch.zeros(shape, device=device, dtype=dtype, requires_grad=True) # create a weight of shape [3 x 5] # you should see the type `torch.cuda.FloatTensor` if you use GPU. # Otherwise it should be `torch.FloatTensor` random_weight((3, 5)) ``` ### Barebones PyTorch: Check Accuracy When training the model we will use the following function to check the accuracy of our model on the training or validation sets. When checking accuracy we don't need to compute any gradients; as a result we don't need PyTorch to build a computational graph for us when we compute scores. To prevent a graph from being built we scope our computation under a `torch.no_grad()` context manager. ``` def check_accuracy_part2(loader, model_fn, params): """ Check the accuracy of a classification model. Inputs: - loader: A DataLoader for the data split we want to check - model_fn: A function that performs the forward pass of the model, with the signature scores = model_fn(x, params) - params: List of PyTorch Tensors giving parameters of the model Returns: Nothing, but prints the accuracy of the model """ split = 'val' if loader.dataset.train else 'test' print('Checking accuracy on the %s set' % split) num_correct, num_samples = 0, 0 with torch.no_grad(): for x, y in loader: x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU y = y.to(device=device, dtype=torch.int64) scores = model_fn(x, params) _, preds = scores.max(1) num_correct += (preds == y).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f%%)' % (num_correct, num_samples, 100 * acc)) ``` ### BareBones PyTorch: Training Loop We can now set up a basic training loop to train our network. We will train the model using stochastic gradient descent without momentum. We will use `torch.functional.cross_entropy` to compute the loss; you can [read about it here](http://pytorch.org/docs/stable/nn.html#cross-entropy). The training loop takes as input the neural network function, a list of initialized parameters (`[w1, w2]` in our example), and learning rate. ``` def train_part2(model_fn, params, learning_rate): """ Train a model on CIFAR-10. Inputs: - model_fn: A Python function that performs the forward pass of the model. It should have the signature scores = model_fn(x, params) where x is a PyTorch Tensor of image data, params is a list of PyTorch Tensors giving model weights, and scores is a PyTorch Tensor of shape (N, C) giving scores for the elements in x. - params: List of PyTorch Tensors giving weights for the model - learning_rate: Python scalar giving the learning rate to use for SGD Returns: Nothing """ for t, (x, y) in enumerate(loader_train): # Move the data to the proper device (GPU or CPU) x = x.to(device=device, dtype=dtype) y = y.to(device=device, dtype=torch.long) # Forward pass: compute scores and loss scores = model_fn(x, params) loss = F.cross_entropy(scores, y) # Backward pass: PyTorch figures out which Tensors in the computational # graph has requires_grad=True and uses backpropagation to compute the # gradient of the loss with respect to these Tensors, and stores the # gradients in the .grad attribute of each Tensor. loss.backward() # Update parameters. We don't want to backpropagate through the # parameter updates, so we scope the updates under a torch.no_grad() # context manager to prevent a computational graph from being built. with torch.no_grad(): for w in params: w -= learning_rate * w.grad # Manually zero the gradients after running the backward pass w.grad.zero_() if t % print_every == 0: print('Iteration %d, loss = %.4f' % (t, loss.item())) check_accuracy_part2(loader_val, model_fn, params) print() ``` ### BareBones PyTorch: Train a Two-Layer Network Now we are ready to run the training loop. We need to explicitly allocate tensors for the fully connected weights, `w1` and `w2`. Each minibatch of CIFAR has 64 examples, so the tensor shape is `[64, 3, 32, 32]`. After flattening, `x` shape should be `[64, 3 * 32 * 32]`. This will be the size of the first dimension of `w1`. The second dimension of `w1` is the hidden layer size, which will also be the first dimension of `w2`. Finally, the output of the network is a 10-dimensional vector that represents the probability distribution over 10 classes. You don't need to tune any hyperparameters but you should see accuracies above 40% after training for one epoch. ``` hidden_layer_size = 4000 learning_rate = 1e-2 w1 = random_weight((3 * 32 * 32, hidden_layer_size)) w2 = random_weight((hidden_layer_size, 10)) train_part2(two_layer_fc, [w1, w2], learning_rate) ``` ### BareBones PyTorch: Training a ConvNet In the below you should use the functions defined above to train a three-layer convolutional network on CIFAR. The network should have the following architecture: 1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2 2. ReLU 3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1 4. ReLU 5. Fully-connected layer (with bias) to compute scores for 10 classes You should initialize your weight matrices using the `random_weight` function defined above, and you should initialize your bias vectors using the `zero_weight` function above. You don't need to tune any hyperparameters, but if everything works correctly you should achieve an accuracy above 42% after one epoch. ``` learning_rate = 3e-3 channel_1 = 32 channel_2 = 16 conv_w1 = None conv_b1 = None conv_w2 = None conv_b2 = None fc_w = None fc_b = None ################################################################################ # TODO: Initialize the parameters of a three-layer ConvNet. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** conv_w1 = random_weight((channel_1, 3, 5, 5)) conv_b1 = zero_weight((channel_1, )) conv_w2 = random_weight((channel_2, channel_1, 3, 3)) conv_b2 = zero_weight((channel_2, )) fc_w = random_weight((channel_2 * 32 * 32, 10)) fc_b = zero_weight((10, )) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ################################################################################ # END OF YOUR CODE # ################################################################################ params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b] train_part2(three_layer_convnet, params, learning_rate) ``` # Part III. PyTorch Module API Barebone PyTorch requires that we track all the parameter tensors by hand. This is fine for small networks with a few tensors, but it would be extremely inconvenient and error-prone to track tens or hundreds of tensors in larger networks. PyTorch provides the `nn.Module` API for you to define arbitrary network architectures, while tracking every learnable parameters for you. In Part II, we implemented SGD ourselves. PyTorch also provides the `torch.optim` package that implements all the common optimizers, such as RMSProp, Adagrad, and Adam. It even supports approximate second-order methods like L-BFGS! You can refer to the [doc](http://pytorch.org/docs/master/optim.html) for the exact specifications of each optimizer. To use the Module API, follow the steps below: 1. Subclass `nn.Module`. Give your network class an intuitive name like `TwoLayerFC`. 2. In the constructor `__init__()`, define all the layers you need as class attributes. Layer objects like `nn.Linear` and `nn.Conv2d` are themselves `nn.Module` subclasses and contain learnable parameters, so that you don't have to instantiate the raw tensors yourself. `nn.Module` will track these internal parameters for you. Refer to the [doc](http://pytorch.org/docs/master/nn.html) to learn more about the dozens of builtin layers. **Warning**: don't forget to call the `super().__init__()` first! 3. In the `forward()` method, define the *connectivity* of your network. You should use the attributes defined in `__init__` as function calls that take tensor as input and output the "transformed" tensor. Do *not* create any new layers with learnable parameters in `forward()`! All of them must be declared upfront in `__init__`. After you define your Module subclass, you can instantiate it as an object and call it just like the NN forward function in part II. ### Module API: Two-Layer Network Here is a concrete example of a 2-layer fully connected network: ``` class TwoLayerFC(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super().__init__() # assign layer objects to class attributes self.fc1 = nn.Linear(input_size, hidden_size) # nn.init package contains convenient initialization methods # http://pytorch.org/docs/master/nn.html#torch-nn-init nn.init.kaiming_normal_(self.fc1.weight) self.fc2 = nn.Linear(hidden_size, num_classes) nn.init.kaiming_normal_(self.fc2.weight) def forward(self, x): # forward always defines connectivity x = flatten(x) scores = self.fc2(F.relu(self.fc1(x))) return scores def test_TwoLayerFC(): input_size = 50 x = torch.zeros((64, input_size), dtype=dtype) # minibatch size 64, feature dimension 50 model = TwoLayerFC(input_size, 42, 10) scores = model(x) print(scores.size()) # you should see [64, 10] test_TwoLayerFC() ``` ### Module API: Three-Layer ConvNet It's your turn to implement a 3-layer ConvNet followed by a fully connected layer. The network architecture should be the same as in Part II: 1. Convolutional layer with `channel_1` 5x5 filters with zero-padding of 2 2. ReLU 3. Convolutional layer with `channel_2` 3x3 filters with zero-padding of 1 4. ReLU 5. Fully-connected layer to `num_classes` classes You should initialize the weight matrices of the model using the Kaiming normal initialization method. **HINT**: http://pytorch.org/docs/stable/nn.html#conv2d After you implement the three-layer ConvNet, the `test_ThreeLayerConvNet` function will run your implementation; it should print `(64, 10)` for the shape of the output scores. ``` class ThreeLayerConvNet(nn.Module): def __init__(self, in_channel, channel_1, channel_2, num_classes): super().__init__() ######################################################################## # TODO: Set up the layers you need for a three-layer ConvNet with the # # architecture defined above. # ######################################################################## # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.conv1 = nn.Conv2d(in_channel, channel_1, (5, 5), padding=2) nn.init.kaiming_normal_(self.conv1.weight) self.conv2 = nn.Conv2d(channel_1, channel_2, (3, 3), padding=1) nn.init.kaiming_normal_(self.conv2.weight) self.fc = nn.Linear(channel_2 * 32 * 32, num_classes) nn.init.kaiming_normal_(self.fc.weight) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ######################################################################## # END OF YOUR CODE # ######################################################################## def forward(self, x): scores = None ######################################################################## # TODO: Implement the forward function for a 3-layer ConvNet. you # # should use the layers you defined in __init__ and specify the # # connectivity of those layers in forward() # ######################################################################## # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) scores = self.fc(flatten(x)) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ######################################################################## # END OF YOUR CODE # ######################################################################## return scores def test_ThreeLayerConvNet(): x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32] model = ThreeLayerConvNet(in_channel=3, channel_1=12, channel_2=8, num_classes=10) scores = model(x) print(scores.size()) # you should see [64, 10] test_ThreeLayerConvNet() ``` ### Module API: Check Accuracy Given the validation or test set, we can check the classification accuracy of a neural network. This version is slightly different from the one in part II. You don't manually pass in the parameters anymore. ``` def check_accuracy_part34(loader, model): if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') num_correct = 0 num_samples = 0 model.eval() # set model to evaluation mode with torch.no_grad(): for x, y in loader: x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU y = y.to(device=device, dtype=torch.long) scores = model(x) _, preds = scores.max(1) num_correct += (preds == y).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) ``` ### Module API: Training Loop We also use a slightly different training loop. Rather than updating the values of the weights ourselves, we use an Optimizer object from the `torch.optim` package, which abstract the notion of an optimization algorithm and provides implementations of most of the algorithms commonly used to optimize neural networks. ``` def train_part34(model, optimizer, epochs=1): """ Train a model on CIFAR-10 using the PyTorch Module API. Inputs: - model: A PyTorch Module giving the model to train. - optimizer: An Optimizer object we will use to train the model - epochs: (Optional) A Python integer giving the number of epochs to train for Returns: Nothing, but prints model accuracies during training. """ model = model.to(device=device) # move the model parameters to CPU/GPU for e in range(epochs): for t, (x, y) in enumerate(loader_train): model.train() # put model to training mode x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU y = y.to(device=device, dtype=torch.long) scores = model(x) loss = F.cross_entropy(scores, y) # Zero out all of the gradients for the variables which the optimizer # will update. optimizer.zero_grad() # This is the backwards pass: compute the gradient of the loss with # respect to each parameter of the model. loss.backward() # Actually update the parameters of the model using the gradients # computed by the backwards pass. optimizer.step() if t % print_every == 0: print('Iteration %d, loss = %.4f' % (t, loss.item())) check_accuracy_part34(loader_val, model) print() ``` ### Module API: Train a Two-Layer Network Now we are ready to run the training loop. In contrast to part II, we don't explicitly allocate parameter tensors anymore. Simply pass the input size, hidden layer size, and number of classes (i.e. output size) to the constructor of `TwoLayerFC`. You also need to define an optimizer that tracks all the learnable parameters inside `TwoLayerFC`. You don't need to tune any hyperparameters, but you should see model accuracies above 40% after training for one epoch. ``` hidden_layer_size = 4000 learning_rate = 1e-2 model = TwoLayerFC(3 * 32 * 32, hidden_layer_size, 10) optimizer = optim.SGD(model.parameters(), lr=learning_rate) train_part34(model, optimizer) ``` ### Module API: Train a Three-Layer ConvNet You should now use the Module API to train a three-layer ConvNet on CIFAR. This should look very similar to training the two-layer network! You don't need to tune any hyperparameters, but you should achieve above above 45% after training for one epoch. You should train the model using stochastic gradient descent without momentum. ``` learning_rate = 3e-3 channel_1 = 32 channel_2 = 16 model = None optimizer = None ################################################################################ # TODO: Instantiate your ThreeLayerConvNet model and a corresponding optimizer # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = ThreeLayerConvNet(3, channel_1, channel_2, 10) optimizer = optim.SGD(model.parameters(), lr=learning_rate) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ################################################################################ # END OF YOUR CODE # ################################################################################ train_part34(model, optimizer) ``` # Part IV. PyTorch Sequential API Part III introduced the PyTorch Module API, which allows you to define arbitrary learnable layers and their connectivity. For simple models like a stack of feed forward layers, you still need to go through 3 steps: subclass `nn.Module`, assign layers to class attributes in `__init__`, and call each layer one by one in `forward()`. Is there a more convenient way? Fortunately, PyTorch provides a container Module called `nn.Sequential`, which merges the above steps into one. It is not as flexible as `nn.Module`, because you cannot specify more complex topology than a feed-forward stack, but it's good enough for many use cases. ### Sequential API: Two-Layer Network Let's see how to rewrite our two-layer fully connected network example with `nn.Sequential`, and train it using the training loop defined above. Again, you don't need to tune any hyperparameters here, but you shoud achieve above 40% accuracy after one epoch of training. ``` # We need to wrap `flatten` function in a module in order to stack it # in nn.Sequential class Flatten(nn.Module): def forward(self, x): return flatten(x) hidden_layer_size = 4000 learning_rate = 1e-2 model = nn.Sequential( Flatten(), nn.Linear(3 * 32 * 32, hidden_layer_size), nn.ReLU(), nn.Linear(hidden_layer_size, 10), ) # you can use Nesterov momentum in optim.SGD optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True) train_part34(model, optimizer) ``` ### Sequential API: Three-Layer ConvNet Here you should use `nn.Sequential` to define and train a three-layer ConvNet with the same architecture we used in Part III: 1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2 2. ReLU 3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1 4. ReLU 5. Fully-connected layer (with bias) to compute scores for 10 classes You can use the default PyTorch weight initialization. You should optimize your model using stochastic gradient descent with Nesterov momentum 0.9. Again, you don't need to tune any hyperparameters but you should see accuracy above 55% after one epoch of training. ``` channel_1 = 32 channel_2 = 16 learning_rate = 1e-2 model = None optimizer = None ################################################################################ # TODO: Rewrite the 2-layer ConvNet with bias from Part III with the # # Sequential API. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** model = nn.Sequential( nn.Conv2d(3, channel_1, (5, 5), padding=2), nn.ReLU(), nn.Conv2d(channel_1, channel_2, (3, 3), padding=1), nn.ReLU(), Flatten(), nn.Linear(channel_2 * 32 * 32, 10) ) optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ################################################################################ # END OF YOUR CODE # ################################################################################ train_part34(model, optimizer) ``` # Part V. CIFAR-10 open-ended challenge In this section, you can experiment with whatever ConvNet architecture you'd like on CIFAR-10. Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves **at least 70%** accuracy on the CIFAR-10 **validation** set within 10 epochs. You can use the check_accuracy and train functions from above. You can use either `nn.Module` or `nn.Sequential` API. Describe what you did at the end of this notebook. Here are the official API documentation for each component. One note: what we call in the class "spatial batch norm" is called "BatchNorm2D" in PyTorch. * Layers in torch.nn package: http://pytorch.org/docs/stable/nn.html * Activations: http://pytorch.org/docs/stable/nn.html#non-linear-activations * Loss functions: http://pytorch.org/docs/stable/nn.html#loss-functions * Optimizers: http://pytorch.org/docs/stable/optim.html ### Things you might try: - **Filter size**: Above we used 5x5; would smaller filters be more efficient? - **Number of filters**: Above we used 32 filters. Do more or fewer do better? - **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions? - **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster? - **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include: - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM] - **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture). - **Regularization**: Add l2 weight regularization, or perhaps use Dropout. ### Tips for training For each network architecture that you try, you should tune the learning rate and other hyperparameters. When doing this there are a couple important things to keep in mind: - If the parameters are working well, you should see improvement within a few hundred iterations - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all. - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs. - You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set. ### Going above and beyond If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these, but don't miss the fun if you have time! - Alternative optimizers: you can try Adam, Adagrad, RMSprop, etc. - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut. - Model ensembles - Data augmentation - New Architectures - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output. - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together. - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32) ### Have fun and happy training! ``` ################################################################################ # TODO: # # Experiment with any architectures, optimizers, and hyperparameters. # # Achieve AT LEAST 70% accuracy on the *validation set* within 10 epochs. # # # # Note that you can use the check_accuracy function to evaluate on either # # the test set or the validation set, by passing either loader_test or # # loader_val as the second argument to check_accuracy. You should not touch # # the test set until you have finished your architecture and hyperparameter # # tuning, and only run the test set once at the end to report a final value. # ################################################################################ model = None optimizer = None # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** learning_rate = 1e-2 model = nn.Sequential( nn.Conv2d(3, 32, (3, 3), padding=1), nn.ReLU(), nn.Conv2d(32, 32, (3, 3), padding=1), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d((2, 2)), nn.Conv2d(32, 64, (3, 3), padding=1), nn.ReLU(), nn.Conv2d(64, 64, (3, 3), padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d((2, 2)), nn.Conv2d(64, 128, (3, 3), padding=1), nn.ReLU(), nn.Conv2d(128, 128, (3, 3), padding=1), nn.ReLU(), nn.Conv2d(128, 128, (3, 3), padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d((2, 2)), Flatten(), nn.Linear(128 * 4 * 4, 512), nn.ReLU(), nn.Linear(512, 128), nn.ReLU(), nn.Linear(128, 10), ) optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True) # train_part34(model, optimizer, epochs=1) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ################################################################################ # END OF YOUR CODE # ################################################################################ # You should get at least 70% accuracy train_part34(model, optimizer, epochs=10) ``` ## Describe what you did In the cell below you should write an explanation of what you did, any additional features that you implemented, and/or any graphs that you made in the process of training and evaluating your network. **Answer:** ## Test set -- run this only once Now that we've gotten a result we're happy with, we test our final model on the test set (which you should store in best_model). Think about how this compares to your validation set accuracy. ``` best_model = model check_accuracy_part34(loader_test, best_model) ```
github_jupyter
# [Dictionaries](https://docs.python.org/3/library/stdtypes.html#dict) Collections of `key`-`value` pairs. ``` my_empty_dict = {} # alternative: my_empty_dict = dict() print('dict: {}, type: {}'.format(my_empty_dict, type(my_empty_dict))) ``` ## Initialization ``` dict1 = {'value1': 1.6, 'value2': 10, 'name': 'John Doe'} dict2 = dict(value1=1.6, value2=10, name='John Doe') print(dict1) print(dict2) print('equal: {}'.format(dict1 == dict2)) print('length: {}'.format(len(dict1))) ``` ## `dict.keys(), dict.values(), dict.items()` ``` print('keys: {}'.format(dict1.keys())) print('values: {}'.format(dict1.values())) print('items: {}'.format(dict1.items())) ``` ## Accessing and setting values ``` my_dict = {} my_dict['key1'] = 'value1' my_dict['key2'] = 99 my_dict['key1'] = 'new value' # overriding existing value print(my_dict) print('value of key1: {}'.format(my_dict['key1'])) ``` Accessing a nonexistent key will raise `KeyError` (see [`dict.get()`](#dict_get) for workaround): ``` # print(my_dict['nope']) ``` ## Deleting ``` my_dict = {'key1': 'value1', 'key2': 99, 'keyX': 'valueX'} del my_dict['keyX'] print(my_dict) # Usually better to make sure that the key exists (see also pop() and popitem()) key_to_delete = 'my_key' if key_to_delete in my_dict: del my_dict[key_to_delete] else: print('{key} is not in {dictionary}'.format(key=key_to_delete, dictionary=my_dict)) ``` ## Dictionaries are mutable ``` my_dict = {'ham': 'good', 'carrot': 'semi good'} my_other_dict = my_dict my_other_dict['carrot'] = 'super tasty' my_other_dict['sausage'] = 'best ever' print('my_dict: {}\nother: {}'.format(my_dict, my_other_dict)) print('equal: {}'.format(my_dict == my_other_dict)) ``` Create a new `dict` if you want to have a copy: ``` my_dict = {'ham': 'good', 'carrot': 'semi good'} my_other_dict = dict(my_dict) my_other_dict['beer'] = 'decent' print('my_dict: {}\nother: {}'.format(my_dict, my_other_dict)) print('equal: {}'.format(my_dict == my_other_dict)) ``` <a id='dict_get'></a> ## `dict.get()` Returns `None` if `key` is not in `dict`. However, you can also specify `default` return value which will be returned if `key` is not present in the `dict`. ``` my_dict = {'a': 1, 'b': 2, 'c': 3} d = my_dict.get('d') print('d: {}'.format(d)) d = my_dict.get('d', 'my default value') print('d: {}'.format(d)) ``` ## `dict.pop()` ``` my_dict = dict(food='ham', drink='beer', sport='football') print('dict before pops: {}'.format(my_dict)) food = my_dict.pop('food') print('food: {}'.format(food)) print('dict after popping food: {}'.format(my_dict)) food_again = my_dict.pop('food', 'default value for food') print('food again: {}'.format(food_again)) print('dict after popping food again: {}'.format(my_dict)) ``` ## `dict.setdefault()` Returns the `value` of `key` defined as first parameter. If the `key` is not present in the dict, adds `key` with default value (second parameter). ``` my_dict = {'a': 1, 'b': 2, 'c': 3} a = my_dict.setdefault('a', 'my default value') d = my_dict.setdefault('d', 'my default value') print('a: {}\nd: {}\nmy_dict: {}'.format(a, d, my_dict)) ``` ## `dict.update()` Merge two `dict`s ``` dict1 = {'a': 1, 'b': 2} dict2 = {'c': 3} dict1.update(dict2) print(dict1) # If they have same keys: dict1.update({'c': 4}) print(dict1) ``` ## The keys of a `dict` have to be immutable Thus you can not use e.g. a `list` or a `dict` as key because they are mutable types : ``` # bad_dict = {['my_list'], 'value'} # Raises TypeError ``` Values can be mutable ``` good_dict = {'my key': ['Python', 'is', 'still', 'cool']} print(good_dict) ```
github_jupyter
``` import re import tweepy from tweepy import OAuthHandler from textblob import TextBlob class TwitterClient(object): ''' Generic Twitter Class for sentiment analysis. ''' def __init__(self): ''' Class constructor or initialization method. ''' # keys and tokens from the Twitter Dev Console #consumer_key = 'VabVIqTlwNiAFg7NxqQtuSQ8g' consumer_key = 'DRh9fiZ6OOeTIyywoRteKXcqJ' #consumer_secret = '5rPgXh5gyhvGDZDyJuYNpMtYwEGMYG52q3akk4wkN3I5KGR5MM' consumer_secret = 'I6wlXOItz1ryxOSM540lhydDqHvmegfyNoXXTSUOz6DwcrwMMk' #access_token = '85755855-3RPlt0XAVOrKeZjHSsehRLsdKfosE0XUuRBljptgL' access_token = '4214012714-34yS5BD4p6wXG55Di6Sm8DL5nmS5zGBqbfn4Ik1' #access_token_secret = 'Gx9VvkXcb5xUR1cUB7LBxtkHjhwtrb4bXQo0b9wf525rY' access_token_secret = 'mhBR7wtOQBpIrAFec4UQlV4vsAMpGsWh9EwcWe2N9tJWe' # attempt authentication try: # create OAuthHandler object self.auth = OAuthHandler(consumer_key, consumer_secret) # set access token and secret self.auth.set_access_token(access_token, access_token_secret) # create tweepy API object to fetch tweets self.api = tweepy.API(self.auth) except: print("Error: Authentication Failed") def clean_tweet(self, tweet): ''' Utility function to clean tweet text by removing links, special characters using simple regex statements. ''' return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split()) def get_tweet_sentiment(self, tweet): ''' Utility function to classify sentiment of passed tweet using textblob's sentiment method ''' # create TextBlob object of passed tweet text analysis = TextBlob(self.clean_tweet(tweet)) # set sentiment if analysis.sentiment.polarity > 0: return 'positive' elif analysis.sentiment.polarity == 0: return 'neutral' else: return 'negative' def get_tweets(self, query, count = 10): ''' Main function to fetch tweets and parse them. ''' # empty list to store parsed tweets tweets = [] try: # call twitter api to fetch tweets fetched_tweets = self.api.search(q = query, count = count) # parsing tweets one by one for tweet in fetched_tweets: # empty dictionary to store required params of a tweet parsed_tweet = {} # saving text of tweet parsed_tweet['text'] = tweet.text # saving sentiment of tweet parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text) # appending parsed tweet to tweets list if tweet.retweet_count > 0: # if tweet has retweets, ensure that it is appended only once if parsed_tweet not in tweets: tweets.append(parsed_tweet) else: tweets.append(parsed_tweet) # return parsed tweets return tweets except tweepy.TweepError as e: # print error (if any) print("Error : " + str(e)) def main(): # creating object of TwitterClient Class api = TwitterClient() # calling function to get tweets tweets = api.get_tweets(query = 'Narendra MOdi', count = 200) # picking positive tweets from tweets ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive'] # percentage of positive tweets print("Positive tweets percentage: {} %".format(100*len(ptweets)/len(tweets))) # picking negative tweets from tweets ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative'] # percentage of negative tweets #print("Negative tweets percentage: {} %".format(100*len(ntweets)/len(tweets))) # percentage of neutral tweets #print("Neutral tweets percentage: {} % \ # ".format(100*len(tweets - ntweets - ptweets)/len(tweets))) # printing first 5 positive tweets print("\n\nPositive tweets:") for tweet in ptweets[:10]: print(tweet['text']) # printing first 5 negative tweets print("\n\nNegative tweets:") for tweet in ntweets[:10]: print(tweet['text']) if __name__ == "__main__": main() ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](#scrollTo=bPJq2qP2KE3u). ``` // #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. ``` <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/swift/tutorials/raw_tensorflow_operators"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/swift/blob/main/docs/site/tutorials/raw_tensorflow_operators.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/swift/blob/main/docs/site/tutorials/raw_tensorflow_operators.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> # Raw TensorFlow operators Building on TensorFlow, Swift for TensorFlow takes a fresh approach to API design. APIs are carefully curated from established libraries and combined with new language idioms. This means that not all TensorFlow APIs will be directly available as Swift APIs, and our API curation needs time and dedicated effort to evolve. However, do not worry if your favorite TensorFlow operator is not available in Swift -- the TensorFlow Swift library gives you transparent access to most TensorFlow operators, under the `_Raw` namespace. Import `TensorFlow` to get started. ``` import TensorFlow ``` ## Calling raw operators Simply find the function you need under the `_Raw` namespace via code completion. ``` print(_Raw.mul(Tensor([2.0, 3.0]), Tensor([5.0, 6.0]))) ``` ## Defining a new multiply operator Multiply is already available as operator `*` on `Tensor`, but let us pretend that we wanted to make it available under a new name as `.*`. Swift allows you to retroactively add methods or computed properties to existing types using `extension` declarations. Now, let us add `.*` to `Tensor` by declaring an extension and make it available when the tensor's `Scalar` type conforms to [`Numeric`](https://developer.apple.com/documentation/swift/numeric). ``` infix operator .* : MultiplicationPrecedence extension Tensor where Scalar: Numeric { static func .* (_ lhs: Tensor, _ rhs: Tensor) -> Tensor { return _Raw.mul(lhs, rhs) } } let x: Tensor<Double> = [[1.0, 2.0], [3.0, 4.0]] let y: Tensor<Double> = [[8.0, 7.0], [6.0, 5.0]] print(x .* y) ``` ## Defining a derivative of a wrapped function Not only can you easily define a Swift API for a raw TensorFlow operator, you can also make it differentiable to work with Swift's first-class automatic differentiation. To make `.*` differentiable, use the `@derivative` attribute on the derivative function and specify the original function as an attribute argument under the `of:` label. Since the `.*` operator is defined when the generic type `Scalar` conforms to `Numeric`, it is not enough for making `Tensor<Scalar>` conform to the `Differentiable` protocol. Born with type safety, Swift will remind us to add a generic constraint on the `@differentiable` attribute to require `Scalar` to conform to `TensorFlowFloatingPoint` protocol, which would make `Tensor<Scalar>` conform to `Differentiable`. ```swift @differentiable(where Scalar: TensorFlowFloatingPoint) ``` ``` infix operator .* : MultiplicationPrecedence extension Tensor where Scalar: Numeric { @differentiable(where Scalar: TensorFlowFloatingPoint) static func .* (_ lhs: Tensor, _ rhs: Tensor) -> Tensor { return _Raw.mul(lhs, rhs) } } extension Tensor where Scalar : TensorFlowFloatingPoint { @derivative(of: .*) static func multiplyDerivative( _ lhs: Tensor, _ rhs: Tensor ) -> (value: Tensor, pullback: (Tensor) -> (Tensor, Tensor)) { return (lhs * rhs, { v in ((rhs * v).unbroadcasted(to: lhs.shape), (lhs * v).unbroadcasted(to: rhs.shape)) }) } } // Now, we can take the derivative of a function that calls `.*` that we just defined. print(gradient(at: x, y) { x, y in (x .* y).sum() }) ``` ## More examples ``` let matrix = Tensor<Float>([[1, 2], [3, 4]]) print(_Raw.matMul(matrix, matrix, transposeA: true, transposeB: true)) print(_Raw.matMul(matrix, matrix, transposeA: true, transposeB: false)) print(_Raw.matMul(matrix, matrix, transposeA: false, transposeB: true)) print(_Raw.matMul(matrix, matrix, transposeA: false, transposeB: false)) ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Libraries-and-functions" data-toc-modified-id="Libraries-and-functions-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Libraries and functions</a></span><ul class="toc-item"><li><span><a href="#Import-libraries" data-toc-modified-id="Import-libraries-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Import libraries</a></span></li><li><span><a href="#Definition-of--functions-used-locally" data-toc-modified-id="Definition-of--functions-used-locally-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Definition of functions used locally</a></span></li></ul></li><li><span><a href="#Options" data-toc-modified-id="Options-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Options</a></span></li><li><span><a href="#Load-data" data-toc-modified-id="Load-data-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Load data</a></span></li><li><span><a href="#Create-base-indices" data-toc-modified-id="Create-base-indices-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Create base indices</a></span><ul class="toc-item"><li><span><a href="#Market-cap-weighted" data-toc-modified-id="Market-cap-weighted-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Market cap weighted</a></span></li><li><span><a href="#Equal-weights" data-toc-modified-id="Equal-weights-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Equal weights</a></span></li><li><span><a href="#compute-index-returns" data-toc-modified-id="compute-index-returns-4.3"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>compute index returns</a></span></li></ul></li><li><span><a href="#Index-based-on-model-predictions" data-toc-modified-id="Index-based-on-model-predictions-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Index based on model predictions</a></span><ul class="toc-item"><li><span><a href="#Weight-generation" data-toc-modified-id="Weight-generation-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Weight generation</a></span></li><li><span><a href="#Build-your-strategy" data-toc-modified-id="Build-your-strategy-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Build your strategy</a></span></li><li><span><a href="#Build-index" data-toc-modified-id="Build-index-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Build index</a></span><ul class="toc-item"><li><span><a href="#Monthly-index-levels" data-toc-modified-id="Monthly-index-levels-5.3.1"><span class="toc-item-num">5.3.1&nbsp;&nbsp;</span>Monthly index levels</a></span></li></ul></li></ul></li></ul></div> # Libraries and functions ## Import libraries Need to be able to access functions in base_dir/src ``` # libraries # general import sys import os import itertools import dateutil.relativedelta as relativedelta import datetime import numpy as np import pandas as pd # plotting %matplotlib inline import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns # # statistics and machine learning from IPython.display import display pd.options.display.max_columns = None # # add the base path to python system path path = os.getcwd() #dir_up = os.path.abspath(os.path.join(path, os.pardir)) base_path = os.path.abspath(os.path.join(path, os.pardir)) sys.path.append(base_path) # from mpl_toolkits.axes_grid.anchored_artists import AnchoredText from matplotlib import gridspec # # libraries within package from src.finance_functions import multiple_returns_from_levels_vec, project_to_first from src.finance_functions import df_restrict_dates from src.automotive_dictionaries import equity_name2first_date from src.index_functionality import index_levels_from_returns from src.financial_metrics import extract_performance %load_ext autoreload %autoreload 2 %reload_ext autoreload ``` # Options # Load data ``` filename = '../data/data_sample_monthly.csv' infile = filename df_comb_long = pd.read_csv(infile) df_comb_long['date'] = pd.to_datetime(df_comb_long['date']) #df_comb_long.head() #eq_name = 'Equity Parent' eq_name = 'company' df_prices = df_comb_long.pivot(values='stock_price', index='date', columns=eq_name) # monthly returns df_returns = multiple_returns_from_levels_vec(df_prices.ffill()) ``` # Create base indices ``` df_market_cap = df_comb_long.pivot(values='MarketCap_Mlns', index='date', columns=eq_name) df_market_cap.index = df_market_cap.index.map(project_to_first) # deal with the missing values by taking the previously available one df_market_cap.ffill(inplace=True) # set to zero when not available, this takes care of the market cap weights for col in df_market_cap.columns: first_date = project_to_first(equity_name2first_date[col]) mask = df_market_cap.index < first_date df_market_cap.loc[mask, col] = 0.0 #print(first_date) #df_market_cap.head() ``` ## Market cap weighted ``` total_market_cap = df_market_cap.sum(axis=1) # weights determined in the same month as market cap df_weights = df_market_cap.div(total_market_cap, axis=0) # the weights for the index should be determined by past information, # i.e.by previous month market_cap #df_weights_mc = df_weights.shift(1).bfill() df_weights_mc = df_weights.shift(1) ``` ## Equal weights ``` df_temp = (df_weights_mc > 0.0).astype(int) df_weights_equal = df_temp.div(df_temp.sum(axis=1), axis=0) ``` ## compute index returns ``` df_mc_index_returns = pd.DataFrame((df_returns * df_weights_mc).sum(axis=1),columns=['mc_return']) df_mc_index_returns.dropna(inplace=True) df_equal_index_returns = pd.DataFrame((df_returns * df_weights_equal).sum(axis=1),columns=['equal_return']) df_equal_index_returns.dropna(inplace=True) ``` # Index based on model predictions Base indices * market cap weighted * equal weighted ## Weight generation ``` l_base_weights = ['Market Cap', 'Equal'] l_weighting_schemes = ['0'] # Cartesian product tuples l_weights = list(itertools.product(*[l_base_weights,l_weighting_schemes])) # WEIGHT ADJUSTMENTS OPTIONS d_weights = {} #execution loop for base_mod, scheme in l_weights: print(base_mod, scheme) if base_mod == 'Market Cap' : df_base_weights = df_weights_mc.copy() if base_mod == 'Equal' : df_base_weights = df_weights_equal.copy() df_mod_weights = df_base_weights.copy() name_d = base_mod + ' ' + scheme d_weights[name_d] = df_mod_weights ``` ## Build your strategy ``` # do something better than random df_rand = pd.DataFrame(np.random.uniform(low=0.0, high=0.01, size=(len(df_weights_mc.index), len(df_weights_mc.columns))), columns=list(df_weights_mc.columns), index=df_weights_mc.index) df_w = df_weights_mc + df_rand df_w = df_w.div(df_w.sum(axis=1), axis=0) d_weights['Market Cap smart modify'] = df_w # # SCHEMATIC: CODE DOES NOT EXECUTE LIKE THIS # # possible loop for training a model # # and producing df_oos_predictions # # alternatively use portfolio optimization # x_names = ['feature1',...] # y_name = 'returns' # prediction_dates = df_weights_mc.index[24:] # for date in prediction_dates: # #print(date) # train_ini_date = date + relativedelta.relativedelta(months=-24) # train_final_date = date + relativedelta.relativedelta(months=-1) # df1 = df_restrict_dates(df_comb_long, train_ini_date, train_final_date) # df_x_train = df1[x_names].copy() # df_y_train = df1[[y_name]].copy() # X_train_full = df_x_train.values # y_train_full = df_y_train[y_name].values # model.fit(X_train_full, y_train_full, sample_weight=sample_weights) # ##### oos results # df2 = df_restrict_dates(df_comb_long, date, date) # df_x = df2[x_names].copy() # X_oos = df_x.values # predictions = model.predict(X_oos) # df_oos_predictions.loc[date] = predictions # # SCHEMATIC: CODE DOES NOT EXECUTE LIKE THIS # # possible loop for weight updates (schematic) # # based on model predictions df_oos_predictions # df_base_weights = df_weights_mc.copy() # df_mod_weights = df_base_weights.copy() # for date in prediction_dates: # # assume you have made some predictions # predictions = df_oos_predictions.loc[date].values # # relate predictions to weight updates # weights_mod = .... # # possibly apply capping rules # df_mod_weights.loc[date] = weights_mod # name_d = 'xx' # d_weights[name_d] = df_mod_weights ``` ## Build index ### Monthly index levels ``` # build date frame with indices (no rebalancing) start_date = datetime.datetime(2009,1,1) end_date = datetime.datetime(2015,12,31) starting_level = 100 df_r_in = df_restrict_dates(df_returns, start_date, end_date) frequency = 'monthly' for k, name in enumerate(sorted(d_weights.keys())): print(name) df_w_in = df_restrict_dates(d_weights[name], start_date, end_date) df_temp = index_levels_from_returns(df_w_in, df_r_in, out_field=name, starting_level=starting_level, transaction_costs=False, frequency=frequency) if k == 0: df_i_comb = df_temp else: df_i_comb = df_i_comb.merge(df_temp, left_index=True, right_index=True) # plot without rebalancing costs font = {'size' : 24} mpl.rc('font', **font) cm = plt.get_cmap('jet') #cm = plt.get_cmap('viridis') sns.set(font_scale=2.5) sns.set_style("whitegrid") #fields = 'Equal' #fields = 'Market Cap' fields = None headers = df_i_comb.columns if fields is not None: headers = list(filter(lambda s: fields in s, df_i_comb.columns)) df_i_comb[headers].plot(figsize=(20,12), colormap=cm) plt.title(frequency.title() + ' Index performance (no rebalancing costs)') print() extract_performance(df_i_comb[headers]) ```
github_jupyter
# Segmentation Image segmentation is another early as well as an important image processing task. Segmentation is the process of breaking an image into groups, based on similarities of the pixels. Pixels can be similar to each other in multiple ways like brightness, color, or texture. The segmentation algorithms are to find a partition of the image into sets of similar pixels which usually indicating objects or certain scenes in an image. The segmentations in this chapter can be categorized into two complementary ways: one focussing on detecting the boundaries of these groups, and the other on detecting the groups themselves, typically called regions. We will introduce some principles of some algorithms in this notebook to present the basic ideas in segmentation. ## Probability Boundary Detection A boundary curve passing through a pixel $(x,y)$ in an image will have an orientation $\theta$, so we can formulize boundary detection problem as a classification problem. Based on features from a local neighborhood, we want to compute the probability $P_b(x,y,\theta)$ that indeed there is a boundary curve at that pixel along that orientation. One of the sampling ways to calculate $P_b(x,y,\theta)$ is to generate a series sub-divided into two half disks by a diameter oriented at θ. If there is a boundary at (x, y, θ) the two half disks might be expected to differ significantly in their brightness, color, and texture. For detailed proof of this algorithm, please refer to this [article](https://people.eecs.berkeley.edu/~malik/papers/MFM-boundaries.pdf). ### Implementation We implemented a simple demonstration of probability boundary detector as `probability_contour_detection` in `perception.py`. This method takes three inputs: - image: an image already transformed into the type of numpy ndarray. - discs: a list of sub-divided discs. - threshold: the standard to tell whether the difference between intensities of two discs implying there is a boundary passing the current pixel. we also provide a helper function `gen_discs` to gen a list of discs. It takes `scales` as the number of sizes of discs will be generated which is default 1. Please note that for each scale size, there will be 8 sub discs generated which are in the horizontal, verticle and two diagnose directions. Another `init_scale` indicates the starting scale size. For instance, if we use `init_scale` of 10 and `scales` of 2, then scales of sizes of 10 and 20 will be generated and thus we will have 16 sub-divided scales. ### Example Now let's demonstrate the inner mechanism with our navie implementation of the algorithm. First, let's generate some very simple test images. We already generated a grayscale image with only three steps of gray scales in `perceptron.py`: ``` import os, sys sys.path = [os.path.abspath("../../")] + sys.path from perception4e import * from notebook4e import * import matplotlib.pyplot as plt ``` Let's take a look at it: ``` plt.imshow(gray_scale_image, cmap='gray', vmin=0, vmax=255) plt.axis('off') plt.show() ``` You can also generate your own grayscale images by calling `gen_gray_scale_picture` and pass the image size and grayscale levels needed: ``` gray_img = gen_gray_scale_picture(100, 5) plt.imshow(gray_img, cmap='gray', vmin=0, vmax=255) plt.axis('off') plt.show() ``` Now let's generate the discs we are going to use as sampling masks to tell the intensity difference between two half of the care area of an image. We can generate the discs of size 100 pixels and show them: ``` discs = gen_discs(100, 1) fig=plt.figure(figsize=(10, 10)) for i in range(8): img = discs[0][i] fig.add_subplot(1, 8, i+1) plt.axis('off') plt.imshow(img, cmap='gray', vmin=0, vmax=255) plt.show() ``` The white part of disc images is of value 1 while dark places are of value 0. Thus convolving the half-disc image with the corresponding area of an image will yield only half of its content. Of course, discs of size 100 is too large for an image of the same size. We will use discs of size 10 and pass them to the detector. ``` discs = gen_discs(10, 1) contours = probability_contour_detection(gray_img, discs[0]) show_edges(contours) ``` As we are using discs of size 10 and some boundary conditions are not dealt with in our naive algorithm, the extracted contour has a bold edge with missings near the image border. But the main structures of contours are extracted correctly which shows the ability of this algorithm. ## Group Contour Detection The alternative approach is based on trying to “cluster” the pixels into regions based on their brightness, color and texture properties. There are multiple grouping algorithms and the simplest and the most popular one is k-means clustering. Basically, the k-means algorithm starts with k randomly selected centroids, which are used as the beginning points for every cluster, and then performs iterative calculations to optimize the positions of the centroids. For a detailed description, please refer to the chapter of unsupervised learning. ### Implementation Here we will use the module of `cv2` to perform K-means clustering and show the image. To use it you need to have `opencv-python` pre-installed. Using `cv2.kmeans` is quite simple, you only need to specify the input image and the characters of cluster initialization. Here we use modules provide by `cv2` to initialize the clusters. `cv2.KMEANS_RANDOM_CENTERS` can randomly generate centers of clusters and the cluster number is defined by the user. `kmeans` method will return the centers and labels of clusters, which can be used to classify pixels of an image. Let's try this algorithm again on the small grayscale image we imported: ``` contours = group_contour_detection(gray_scale_image, 3) ``` Now let's show the extracted contours: ``` show_edges(contours) ``` It is not obvious as our generated image already has very clear boundaries. Let's apply the algorithm on the stapler example to see whether it will be more obvious: ``` import numpy as np import matplotlib.image as mpimg stapler_img = mpimg.imread('images/stapler.png', format="gray") contours = group_contour_detection(stapler_img, 5) plt.axis('off') plt.imshow(contours, cmap="gray") ``` The segmentation is very rough when using only 5 clusters. Adding to the cluster number will increase the degree of subtle of each group thus the whole picture will be more alike the original one: ``` contours = group_contour_detection(stapler_img, 15) plt.axis('off') plt.imshow(contours, cmap="gray") ``` ## Minimum Cut Segmentation Another way to do clustering is by applying the minimum cut algorithm in graph theory. Roughly speaking, the criterion for partitioning the graph is to minimize the sum of weights of connections across the groups and maximize the sum of weights of connections within the groups. ### Implementation There are several kinds of representations of a graph such as a matrix or an adjacent list. Here we are using a util function `image_to_graph` to convert an image in ndarray type to an adjacent list. It is integrated into the class of `Graph`. `Graph` takes an image as input and offer the following implementations of some graph theory algorithms: - bfs: performing bread searches from a source vertex to a terminal vertex. Return `True` if there is a path between the two nodes else return `False`. - min_cut: performing minimum cut on a graph from a source vertex to sink vertex. The method will return the edges to be cut. Now let's try the minimum cut method on a simple generated grayscale image of size 10: ``` image = gen_gray_scale_picture(size=10, level=2) show_edges(image) graph = Graph(image) graph.min_cut((0,0), (9,9)) ``` There are ten edges to be cut. By cutting the ten edges, we can separate the pictures into two parts by the pixel intensities.
github_jupyter
<center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> </center> # Decision Trees Estimated time needed: **15** minutes ## Objectives After completing this lab you will be able to: * Develop a classification model using Decision Tree Algorithm In this lab exercise, you will learn a popular machine learning algorithm, Decision Trees. You will use this classification algorithm to build a model from the historical data of patients, and their response to different medications. Then you will use the trained decision tree to predict the class of a unknown patient, or to find a proper drug for a new patient. <h1>Table of contents</h1> <div class="alert alert-block alert-info" style="margin-top: 20px"> <ol> <li><a href="#about_dataset">About the dataset</a></li> <li><a href="#downloading_data">Downloading the Data</a></li> <li><a href="#pre-processing">Pre-processing</a></li> <li><a href="#setting_up_tree">Setting up the Decision Tree</a></li> <li><a href="#modeling">Modeling</a></li> <li><a href="#prediction">Prediction</a></li> <li><a href="#evaluation">Evaluation</a></li> <li><a href="#visualization">Visualization</a></li> </ol> </div> <br> <hr> Import the Following Libraries: <ul> <li> <b>numpy (as np)</b> </li> <li> <b>pandas</b> </li> <li> <b>DecisionTreeClassifier</b> from <b>sklearn.tree</b> </li> </ul> ``` import numpy as np import pandas as pd from sklearn.tree import DecisionTreeClassifier ``` <div id="about_dataset"> <h2>About the dataset</h2> Imagine that you are a medical researcher compiling data for a study. You have collected data about a set of patients, all of whom suffered from the same illness. During their course of treatment, each patient responded to one of 5 medications, Drug A, Drug B, Drug c, Drug x and y. <br> <br> Part of your job is to build a model to find out which drug might be appropriate for a future patient with the same illness. The features of this dataset are Age, Sex, Blood Pressure, and the Cholesterol of the patients, and the target is the drug that each patient responded to. <br> <br> It is a sample of multiclass classifier, and you can use the training part of the dataset to build a decision tree, and then use it to predict the class of a unknown patient, or to prescribe a drug to a new patient. </div> <div id="downloading_data"> <h2>Downloading the Data</h2> To download the data, we will use !wget to download it from IBM Object Storage. </div> ``` !wget -O drug200.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/data/drug200.csv ``` **Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) Now, read the data using pandas dataframe: ``` my_data = pd.read_csv("drug200.csv", delimiter=",") my_data[0:5] ``` <div id="practice"> <h3>Practice</h3> What is the size of data? </div> ``` # write your code here my_data.shape ``` <details><summary>Click here for the solution</summary> ```python my_data.shape ``` </details> <div href="pre-processing"> <h2>Pre-processing</h2> </div> Using <b>my_data</b> as the Drug.csv data read by pandas, declare the following variables: <br> <ul> <li> <b> X </b> as the <b> Feature Matrix </b> (data of my_data) </li> <li> <b> y </b> as the <b> response vector </b> (target) </li> </ul> Remove the column containing the target name since it doesn't contain numeric values. ``` X = my_data[['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K']].values X[0:5] ``` As you may figure out, some features in this dataset are categorical, such as **Sex** or **BP**. Unfortunately, Sklearn Decision Trees does not handle categorical variables. We can still convert these features to numerical values using **pandas.get_dummies()** to convert the categorical variable into dummy/indicator variables. ``` from sklearn import preprocessing le_sex = preprocessing.LabelEncoder() le_sex.fit(['F','M']) X[:,1] = le_sex.transform(X[:,1]) le_BP = preprocessing.LabelEncoder() le_BP.fit([ 'LOW', 'NORMAL', 'HIGH']) X[:,2] = le_BP.transform(X[:,2]) le_Chol = preprocessing.LabelEncoder() le_Chol.fit([ 'NORMAL', 'HIGH']) X[:,3] = le_Chol.transform(X[:,3]) X[0:5] ``` Now we can fill the target variable. ``` y = my_data["Drug"] y[0:5] ``` <hr> <div id="setting_up_tree"> <h2>Setting up the Decision Tree</h2> We will be using <b>train/test split</b> on our <b>decision tree</b>. Let's import <b>train_test_split</b> from <b>sklearn.cross_validation</b>. </div> ``` from sklearn.model_selection import train_test_split ``` Now <b> train_test_split </b> will return 4 different parameters. We will name them:<br> X_trainset, X_testset, y_trainset, y_testset <br> <br> The <b> train_test_split </b> will need the parameters: <br> X, y, test_size=0.3, and random_state=3. <br> <br> The <b>X</b> and <b>y</b> are the arrays required before the split, the <b>test_size</b> represents the ratio of the testing dataset, and the <b>random_state</b> ensures that we obtain the same splits. ``` X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3) ``` <h3>Practice</h3> Print the shape of X_trainset and y_trainset. Ensure that the dimensions match. ``` # your code X_trainset.shape y_trainset.shape ``` <details><summary>Click here for the solution</summary> ```python print('Shape of X training set {}'.format(X_trainset.shape),'&',' Size of Y training set {}'.format(y_trainset.shape)) ``` </details> Print the shape of X_testset and y_testset. Ensure that the dimensions match. ``` # your code X_testset.shape ``` <details><summary>Click here for the solution</summary> ```python print('Shape of X training set {}'.format(X_testset.shape),'&',' Size of Y training set {}'.format(y_testset.shape)) ``` </details> ``` y_testset.shape ``` <hr> <div id="modeling"> <h2>Modeling</h2> We will first create an instance of the <b>DecisionTreeClassifier</b> called <b>drugTree</b>.<br> Inside of the classifier, specify <i> criterion="entropy" </i> so we can see the information gain of each node. </div> ``` drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4) drugTree # it shows the default parameters ``` Next, we will fit the data with the training feature matrix <b> X_trainset </b> and training response vector <b> y_trainset </b> ``` drugTree.fit(X_trainset,y_trainset) ``` <hr> <div id="prediction"> <h2>Prediction</h2> Let's make some <b>predictions</b> on the testing dataset and store it into a variable called <b>predTree</b>. </div> ``` predTree = drugTree.predict(X_testset) ``` You can print out <b>predTree</b> and <b>y_testset</b> if you want to visually compare the predictions to the actual values. ``` print (predTree [0:5]) print (y_testset [0:5]) ``` <hr> <div id="evaluation"> <h2>Evaluation</h2> Next, let's import <b>metrics</b> from sklearn and check the accuracy of our model. </div> ``` from sklearn import metrics import matplotlib.pyplot as plt print("DecisionTrees's Accuracy: ", metrics.accuracy_score(y_testset, predTree)) ``` **Accuracy classification score** computes subset accuracy: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true. In multilabel classification, the function returns the subset accuracy. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0. ``` ``` <h2>Want to learn more?</h2> IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="https://www.ibm.com/analytics/spss-statistics-software?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01">SPSS Modeler</a> Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://www.ibm.com/cloud/watson-studio?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01">Watson Studio</a> ### Thank you for completing this lab! ## Author Saeed Aghabozorgi ### Other Contributors <a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01" target="_blank">Joseph Santarcangelo</a> ## Change Log | Date (YYYY-MM-DD) | Version | Changed By | Change Description | |---|---|---|---| | 2020-11-20 | 2.2 | Lakshmi | Changed import statement of StringIO| | 2020-11-03 | 2.1 | Lakshmi | Changed URL of the csv | | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab | | | | | | | | | | | ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
github_jupyter
# Results Analysis This notebook analyzes results produced by the _anti-entropy reinforcement learning_ experiments. The practical purpose of this notebook is to create graphs that can be used to display anti-entropy topologies, but also to extract information relevant to each experimental run. ``` %matplotlib notebook import os import re import glob import json import unicodedata import numpy as np import pandas as pd import seaborn as sns import networkx as nx import matplotlib as mpl import graph_tool.all as gt import matplotlib.pyplot as plt from nx2gt import nx2gt from datetime import timedelta from collections import defaultdict ``` ## Data Loading The data directory contains directories whose names are the hosts along with configuration files for each run. Each run is stored in its own `metrics.json` file, suffixed by the run number. The data loader yields _all_ rows from _all_ metric files and appends them with the correct configuration data. ``` DATA = "../data" FIGS = "../figures" GRAPHS = "../graphs" HOSTS = "hosts.json" RESULTS = "metrics-*.json" CONFIGS = "config-*.json" NULLDATE = "0001-01-01T00:00:00Z" DURATION = re.compile("^([\d\.]+)(\w+)$") def suffix(path): # Get the run id from the path name, _ = os.path.splitext(path) return int(name.split("-")[-1]) def parse_duration(d): match = DURATION.match(d) if match is None: raise TypeError("could not parse duration '{}'".format(d)) amount, units = match.groups() amount = float(amount) unitkw = { "µs": "microseconds", "ms": "milliseconds", "s": "seconds", }[units] return timedelta(**{unitkw:amount}).total_seconds() def load_hosts(path=DATA): with open(os.path.join(path, HOSTS), 'r') as f: return json.load(f) def load_configs(path=DATA): configs = {} for name in glob.glob(os.path.join(path, CONFIGS)): with open(name, 'r') as f: configs[suffix(name)] = json.load(f) return configs def slugify(name): slug = unicodedata.normalize('NFKD', name) slug = str(slug.encode('ascii', 'ignore')).lower() slug = re.sub(r'[^a-z0-9]+', '-', slug).strip('-') slug = re.sub(r'[-]+', '-', slug) return slug def load_results(path=DATA): hosts = load_hosts(path) configs = load_configs(path) for host in os.listdir(path): for name in glob.glob(os.path.join(path, host, "metrics-*.json")): run = suffix(name) with open(name, 'r', encoding='utf-8') as f: for line in f: row = json.loads(line.strip()) row['name'] = host row['host'] = hosts[host]["hostname"] + ":3264" row['runid'] = run row['config'] = configs[run] yield row def merge_results(path, data=DATA): # Merge all of the results into a single unified file with open(path, 'w') as f: for row in load_results(data): f.write(json.dumps(row)) f.write("\n") ``` ## Graph Extraction This section extracts a NeworkX graph for each of the experimental runs such that each graph defines an anti-entropy topology. ``` def extract_graphs(path=DATA, outdir=None): graphs = defaultdict(nx.DiGraph) for row in load_results(path): # Get the graph for the topology G = graphs[row["runid"]] # Update the graph information name = row["bandit"]["strategy"].title() epsilon = row["config"]["replicas"].get("epsilon", None) if epsilon: name += " ε={}".format(epsilon) G.graph.update({ "name": name + " (E{})".format(row["runid"]), "experiment": row["runid"], "uptime": row["config"]["replicas"]["uptime"], "bandit": row["config"]["replicas"]["bandit"], "epsilon": epsilon or "", "anti_entropy_interval": row["config"]["replicas"]["delay"], "workload_duration": row["config"]["clients"]["config"]["duration"], "n_clients": len(row["config"]["clients"]["hosts"]), # "workload": row["config"]["clients"]["hosts"], "store": row["store"], }) # Update the vertex information vnames = row["name"].split("-") vertex = { "duration": row["duration"], "finished": row["finished"] if row["finished"] != NULLDATE else "", "started": row["started"] if row["started"] != NULLDATE else "", "keys_stored": row["nkeys"], "reads": row["reads"], "writes": row["writes"], "throughput": row["throughput"], "location": " ".join(vnames[1:-1]).title(), "pid": int(vnames[-1]), "name": row["name"] } source_id = row["host"] source = G.add_node(source_id, **vertex) # Get bandit edge information bandit_counts = dict(zip(row["peers"], row["bandit"]["counts"])) bandit_values = dict(zip(row["peers"], row["bandit"]["values"])) # Add the edges from the sync table for target_id, stats in row["syncs"].items(): edge = { "count": bandit_counts[target_id], "reward": bandit_values[target_id], "misses": stats["Misses"], "pulls": stats["Pulls"], "pushes": stats["Pushes"], "syncs": stats["Syncs"], "versions": stats["Versions"], "mean_pull_latency": parse_duration(stats["PullLatency"]["mean"]), "mean_push_latency": parse_duration(stats["PushLatency"]["mean"]), } G.add_edge(source_id, target_id, **edge) # Write Graphs if outdir: for G in graphs.values(): opath = os.path.join(outdir, slugify(G.name)+".graphml.gz") nx.write_graphml(G, opath) return graphs # for G in extract_graphs(outdir=GRAPHS).values(): for G in extract_graphs().values(): print(nx.info(G)) print() LOCATION_COLORS = { "Virginia": "#D91E18", "Ohio": "#E26A6A", "California": "#8E44AD", "Sao Paulo": "#6BB9F0", "London": "#2ECC71", "Frankfurt": "#6C7A89", "Seoul": "#F9690E", "Sydney": "#F7CA18", } LOCATION_GROUPS = sorted(list(LOCATION_COLORS.keys())) LOCATION_CODES = { "Virginia": "VA", "Ohio": "OH", "California": "CA", "Sao Paulo": "BR", "London": "GB", "Frankfurt": "DE", "Seoul": "KR", "Sydney": "AU", } def filter_edges(h, pulls=0, pushes=0): # Create a view of the graph with only edges with syncs > 0 efilt = h.new_edge_property('bool') for edge in h.edges(): efilt[edge] = (h.ep['pulls'][edge] > pulls or h.ep['pushes'][edge] > pushes) return gt.GraphView(h, efilt=efilt) def mklabel(name, loc): code = LOCATION_CODES[loc] parts = name.split("-") return "{}{}".format(code, parts[-1]) def visualize_graph(G, layout='sfdp', filter=True, save=True): print(G.name) output = None if save: output = os.path.join(FIGS, slugify(G.name) + ".pdf") # Convert the nx Graph to a gt Graph g = nx2gt(G) if filter: g = filter_edges(g) # Vertex Properties vgroup = g.new_vertex_property('int32_t') vcolor = g.new_vertex_property('string') vlabel = g.new_vertex_property('string') for vertex in g.vertices(): vcolor[vertex] = LOCATION_COLORS[g.vp['location'][vertex]] vgroup[vertex] = LOCATION_GROUPS.index(g.vp['location'][vertex]) vlabel[vertex] = mklabel(g.vp['name'][vertex], g.vp['location'][vertex]) vsize = gt.prop_to_size(g.vp['writes'], ma=65, mi=35) # Edge Properties esize = gt.prop_to_size(g.ep['versions'], mi=.01, ma=6) ecolor = gt.prop_to_size(g.ep['mean_pull_latency'], mi=1, ma=5, log=True) # Compute the layout and draw if layout == 'fruchterman_reingold': pos = gt.fruchterman_reingold_layout(g, weight=esize, circular=True, grid=False) elif layout == 'sfdp': pos = gt.sfdp_layout(g, eweight=esize, groups=vgroup) else: raise ValueError("unknown layout '{}".format(layout)) gt.graph_draw( g, pos=pos, output_size=(1200,1200), output=output, inline=True, vertex_size=vsize, vertex_fill_color=vcolor, vertex_text=vlabel, vertex_halo=False, vertex_pen_width=1.2, edge_pen_width=esize, ) visualize_graph(extract_graphs()[5]) ``` ## Rewards DataFrame This section extracts a timeseries of rewards on a per-replica basis. ``` def extract_rewards(path=DATA): for row in load_results(path): bandit = row["bandit"] history = bandit["history"] strategy = bandit["strategy"] epsilon = row["config"]["replicas"].get("epsilon") if epsilon: strategy += " ε={}".format(epsilon) values = np.array(list(map(float, history["rewards"]))) series = pd.Series(values, name=row["name"] + " " + strategy) yield series, row['runid'] total_rewards = {} for series, rowid in extract_rewards(): if rowid not in total_rewards: total_rewards[rowid] = series else: total_rewards[rowid] += series cumulative_rewards = { rowid: s.cumsum() for rowid, s in total_rewards.items() } from pandas.plotting import autocorrelation_plot df = pd.DataFrame({ " ".join(s.name.split(" ")[1:]): s for s in total_rewards.values() }).iloc[15:361] df.reset_index(inplace=True, drop=True) fig,ax = plt.subplots(figsize=(9,6)) df.rolling(window=15,center=False).mean().plot(ax=ax) ax.set_ylabel("Rolling Mean of Total System Reward (w=15)") ax.set_xlabel("Timesteps (Anti-Entropy Sessions)") ax.grid(True, ls='--') ax.set_xlim(12, 346) plt.savefig(os.path.join(FIGS, "rewards.pdf")) ```
github_jupyter
``` # reload packages %load_ext autoreload %autoreload 2 ``` ### Choose GPU (this may not be needed on your computer) ``` %env CUDA_DEVICE_ORDER=PCI_BUS_ID %env CUDA_VISIBLE_DEVICES=1 import tensorflow as tf gpu_devices = tf.config.experimental.list_physical_devices('GPU') if len(gpu_devices)>0: tf.config.experimental.set_memory_growth(gpu_devices[0], True) print(gpu_devices) tf.keras.backend.clear_session() ``` ### load packages ``` from tfumap.umap import tfUMAP import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tqdm.autonotebook import tqdm import umap import pandas as pd ``` ### Load dataset ``` dataset = 'fmnist' from tensorflow.keras.datasets import fashion_mnist # load dataset (train_images, Y_train), (test_images, Y_test) = fashion_mnist.load_data() X_train = (train_images/255.).astype('float32') X_test = (test_images/255.).astype('float32') X_train = X_train.reshape((len(X_train), np.product(np.shape(X_train)[1:]))) X_test = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:]))) # subset a validation set n_valid = 10000 X_valid = X_train[-n_valid:] Y_valid = Y_train[-n_valid:] X_train = X_train[:-n_valid] Y_train = Y_train[:-n_valid] # flatten X X_train_flat = X_train.reshape((len(X_train), np.product(np.shape(X_train)[1:]))) X_test_flat = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:]))) X_valid_flat= X_valid.reshape((len(X_valid), np.product(np.shape(X_valid)[1:]))) print(len(X_train), len(X_valid), len(X_test)) ``` ### define networks ``` dims = (28,28,1) n_components = 64 encoder = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=dims), tf.keras.layers.Conv2D( filters=64, kernel_size=3, strides=(2, 2), activation="relu" ), tf.keras.layers.Conv2D( filters=128, kernel_size=3, strides=(2, 2), activation="relu" ), tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=512, activation="relu"), tf.keras.layers.Dense(units=n_components), ]) decoder = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=(n_components)), tf.keras.layers.Dense(units=512, activation="relu"), tf.keras.layers.Dense(units=7 * 7 * 256, activation="relu"), tf.keras.layers.Reshape(target_shape=(7, 7, 256)), tf.keras.layers.Conv2DTranspose( filters=128, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu" ), tf.keras.layers.Conv2DTranspose( filters=64, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu" ), tf.keras.layers.Conv2DTranspose( filters=1, kernel_size=3, strides=(1, 1), padding="SAME", activation="sigmoid" ) ]) input_img = tf.keras.Input(dims) output_img = decoder(encoder(input_img)) autoencoder = tf.keras.Model(input_img, output_img) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') X_train = X_train.reshape([len(X_train)] + list(dims)) history = autoencoder.fit(X_train, X_train, epochs=50, batch_size=256, shuffle=True, #validation_data=(X_valid, X_valid) ) z = encoder.predict(X_train) ``` ### Plot model output ``` fig, ax = plt.subplots( figsize=(8, 8)) sc = ax.scatter( z[:, 0], z[:, 1], c=Y_train.astype(int)[:len(z)], cmap="tab10", s=0.1, alpha=0.5, rasterized=True, ) ax.axis('equal') ax.set_title("UMAP in Tensorflow embedding", fontsize=20) plt.colorbar(sc, ax=ax); ``` ### View loss ``` from tfumap.umap import retrieve_tensors import seaborn as sns ``` ### Save output ``` from tfumap.paths import ensure_dir, MODEL_DIR output_dir = MODEL_DIR/'projections'/ dataset / '64' /'ae_only' ensure_dir(output_dir) encoder.save(output_dir / 'encoder') decoder.save(output_dir / 'encoder') #loss_df.to_pickle(output_dir / 'loss_df.pickle') np.save(output_dir / 'z.npy', z) ``` ### compute metrics ``` X_test.shape z_test = encoder.predict(X_test.reshape((len(X_test), 28,28,1))) ``` #### silhouette ``` from tfumap.silhouette import silhouette_score_block ss, sil_samp = silhouette_score_block(z, Y_train, n_jobs = -1) ss ss_test, sil_samp_test = silhouette_score_block(z_test, Y_test, n_jobs = -1) ss_test fig, axs = plt.subplots(ncols = 2, figsize=(10, 5)) axs[0].scatter(z[:, 0], z[:, 1], s=0.1, alpha=0.5, c=sil_samp, cmap=plt.cm.viridis) axs[1].scatter(z_test[:, 0], z_test[:, 1], s=1, alpha=0.5, c=sil_samp_test, cmap=plt.cm.viridis) ``` #### KNN ``` from sklearn.neighbors import KNeighborsClassifier neigh5 = KNeighborsClassifier(n_neighbors=5) neigh5.fit(z, Y_train) score_5nn = neigh5.score(z_test, Y_test) score_5nn neigh1 = KNeighborsClassifier(n_neighbors=1) neigh1.fit(z, Y_train) score_1nn = neigh1.score(z_test, Y_test) score_1nn ``` #### Trustworthiness ``` from sklearn.manifold import trustworthiness tw = trustworthiness(X_train_flat[:10000], z[:10000]) tw_test = trustworthiness(X_test_flat[:10000], z_test[:10000]) tw, tw_test ``` ### Save output metrics ``` from tfumap.paths import ensure_dir, MODEL_DIR, DATA_DIR ``` #### train ``` metrics_df = pd.DataFrame( columns=[ "dataset", "class_", "dim", "trustworthiness", "silhouette_score", "silhouette_samples", ] ) metrics_df.loc[len(metrics_df)] = [dataset, 'ae_only', n_components, tw, ss, sil_samp] metrics_df save_loc = DATA_DIR / 'projection_metrics' / 'ae_only' / 'train' / str(n_components) / (dataset + '.pickle') ensure_dir(save_loc) metrics_df.to_pickle(save_loc) ``` #### test ``` metrics_df_test = pd.DataFrame( columns=[ "dataset", "class_", "dim", "trustworthiness", "silhouette_score", "silhouette_samples", ] ) metrics_df_test.loc[len(metrics_df)] = [dataset, 'ae_only', n_components, tw_test, ss_test, sil_samp_test] metrics_df_test save_loc = DATA_DIR / 'projection_metrics' / 'ae' / 'test' / str(n_components) / (dataset + '.pickle') ensure_dir(save_loc) metrics_df.to_pickle(save_loc) ``` #### knn ``` nn_acc_df = pd.DataFrame(columns = ["method_","dimensions","dataset","1NN_acc","5NN_acc"]) nn_acc_df.loc[len(nn_acc_df)] = ['ae_only', n_components, dataset, score_1nn, score_5nn] nn_acc_df save_loc = DATA_DIR / 'knn_classifier' / 'ae_only' / 'train' / str(n_components) / (dataset + '.pickle') ensure_dir(save_loc) nn_acc_df.to_pickle(save_loc) ``` ### Reconstruction ``` from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score X_recon = decoder.predict(encoder.predict(X_test.reshape((len(X_test), 28, 28, 1)))) X_real = X_test.reshape((len(X_test), 28, 28, 1)) x_real = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:]))) x_recon = X_recon.reshape((len(X_test), np.product(np.shape(X_test)[1:]))) reconstruction_acc_df = pd.DataFrame( columns=["method_", "dimensions", "dataset", "MSE", "MAE", "MedAE", "R2"] ) MSE = mean_squared_error( x_real, x_recon ) MAE = mean_absolute_error( x_real, x_recon ) MedAE = median_absolute_error( x_real, x_recon ) R2 = r2_score( x_real, x_recon ) reconstruction_acc_df.loc[len(reconstruction_acc_df)] = ['ae_only', n_components, dataset, MSE, MAE, MedAE, R2] reconstruction_acc_df save_loc = DATA_DIR / 'reconstruction_acc' / 'ae_only' / str(n_components) / (dataset + '.pickle') ensure_dir(save_loc) reconstruction_acc_df.to_pickle(save_loc) ``` ### Compute clustering quality ``` from sklearn.cluster import KMeans from sklearn.metrics import homogeneity_completeness_v_measure def get_cluster_metrics(row, n_init=5): # load cluster information save_loc = DATA_DIR / 'clustering_metric_df'/ ('_'.join([row.class_, str(row.dim), row.dataset]) + '.pickle') print(save_loc) if save_loc.exists() and save_loc.is_file(): cluster_df = pd.read_pickle(save_loc) return cluster_df # make cluster metric dataframe cluster_df = pd.DataFrame( columns=[ "dataset", "class_", "dim", "silhouette", "homogeneity", "completeness", "v_measure", "init_", "n_clusters", "model", ] ) y = row.train_label z = row.train_z n_labels = len(np.unique(y)) for n_clusters in tqdm(np.arange(n_labels - int(n_labels / 2), n_labels + int(n_labels / 2)), leave=False, desc = 'n_clusters'): for init_ in tqdm(range(n_init), leave=False, desc='init'): kmeans = KMeans(n_clusters=n_clusters, random_state=init_).fit(z) clustered_y = kmeans.labels_ homogeneity, completeness, v_measure = homogeneity_completeness_v_measure( y, clustered_y ) ss, _ = silhouette_score_block(z, clustered_y) cluster_df.loc[len(cluster_df)] = [ row.dataset, row.class_, row.dim, ss, homogeneity, completeness, v_measure, init_, n_clusters, kmeans, ] # save cluster df in case this fails somewhere ensure_dir(save_loc) cluster_df.to_pickle(save_loc) return cluster_df projection_df = pd.DataFrame(columns = ['dataset', 'class_', 'train_z', 'train_label', 'dim']) projection_df.loc[len(projection_df)] = [dataset, 'ae_only', z, Y_train, n_components] projection_df get_cluster_metrics(projection_df.iloc[0], n_init=5) ```
github_jupyter
# **Space X Falcon 9 First Stage Landing Prediction** ## Web scraping Falcon 9 and Falcon Heavy Launches Records from Wikipedia We will be performing web scraping to collect Falcon 9 historical launch records from a Wikipedia page titled `List of Falcon 9 and Falcon Heavy launches` [https://en.wikipedia.org/wiki/List_of_Falcon\_9\_and_Falcon_Heavy_launches](https://en.wikipedia.org/wiki/List_of_Falcon\_9\_and_Falcon_Heavy_launches?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01) More specifically, the launch records are stored in a HTML table. First let's import required packages for this lab ``` !pip3 install beautifulsoup4 !pip3 install requests import sys import requests from bs4 import BeautifulSoup import re import unicodedata import pandas as pd ``` Some helper functions for processing web scraped HTML table ``` def date_time(table_cells): """ This function returns the data and time from the HTML table cell Input: the element of a table data cell extracts extra row """ return [data_time.strip() for data_time in list(table_cells.strings)][0:2] def booster_version(table_cells): """ This function returns the booster version from the HTML table cell Input: the element of a table data cell extracts extra row """ out=''.join([booster_version for i,booster_version in enumerate( table_cells.strings) if i%2==0][0:-1]) return out def landing_status(table_cells): """ This function returns the landing status from the HTML table cell Input: the element of a table data cell extracts extra row """ out=[i for i in table_cells.strings][0] return out def get_mass(table_cells): mass=unicodedata.normalize("NFKD", table_cells.text).strip() if mass: mass.find("kg") new_mass=mass[0:mass.find("kg")+2] else: new_mass=0 return new_mass def extract_column_from_header(row): """ This function returns the landing status from the HTML table cell Input: the element of a table data cell extracts extra row """ if (row.br): row.br.extract() if row.a: row.a.extract() if row.sup: row.sup.extract() colunm_name = ' '.join(row.contents) # Filter the digit and empty names if not(colunm_name.strip().isdigit()): colunm_name = colunm_name.strip() return colunm_name ``` To keep the lab tasks consistent, we will be asked to scrape the data from a snapshot of the `List of Falcon 9 and Falcon Heavy launches` Wikipage updated on `9th June 2021` ``` static_url = "https://en.wikipedia.org/w/index.php?title=List_of_Falcon_9_and_Falcon_Heavy_launches&oldid=1027686922" ``` Next, request the HTML page from the above URL and get a `response` object ### TASK 1: Request the Falcon9 Launch Wiki page from its URL First, let's perform an HTTP GET method to request the Falcon9 Launch HTML page, as an HTTP response. ``` # use requests.get() method with the provided static_url # assign the response to a object page=requests.get(static_url) ``` Create a `BeautifulSoup` object from the HTML `response` ``` # Use BeautifulSoup() to create a BeautifulSoup object from a response text content soup = BeautifulSoup(page.text, 'html.parser') ``` Print the page title to verify if the `BeautifulSoup` object was created properly ``` # Use soup.title attribute soup.title ``` ### TASK 2: Extract all column/variable names from the HTML table header Next, we want to collect all relevant column names from the HTML table header Let's try to find all tables on the wiki page first. ``` # Use the find_all function in the BeautifulSoup object, with element type `table` # Assign the result to a list called `html_tables` html_tables=soup.find_all('table') ``` Starting from the third table is our target table contains the actual launch records. ``` # Let's print the third table and check its content first_launch_table = html_tables[2] print(first_launch_table) ``` We can see the columns names embedded in the table header elements `<th>` as follows: ``` <tr> <th scope="col">Flight No. </th> <th scope="col">Date and<br/>time (<a href="/wiki/Coordinated_Universal_Time" title="Coordinated Universal Time">UTC</a>) </th> <th scope="col"><a href="/wiki/List_of_Falcon_9_first-stage_boosters" title="List of Falcon 9 first-stage boosters">Version,<br/>Booster</a> <sup class="reference" id="cite_ref-booster_11-0"><a href="#cite_note-booster-11">[b]</a></sup> </th> <th scope="col">Launch site </th> <th scope="col">Payload<sup class="reference" id="cite_ref-Dragon_12-0"><a href="#cite_note-Dragon-12">[c]</a></sup> </th> <th scope="col">Payload mass </th> <th scope="col">Orbit </th> <th scope="col">Customer </th> <th scope="col">Launch<br/>outcome </th> <th scope="col"><a href="/wiki/Falcon_9_first-stage_landing_tests" title="Falcon 9 first-stage landing tests">Booster<br/>landing</a> </th></tr> ``` Next, we just need to iterate through the `<th>` elements and apply the provided `extract_column_from_header()` to extract column name one by one ``` column_names = [] # Apply find_all() function with `th` element on first_launch_table # Iterate each th element and apply the provided extract_column_from_header() to get a column name # Append the Non-empty column name (`if name is not None and len(name) > 0`) into a list called column_names for i in first_launch_table.find_all('th'): if extract_column_from_header(i)!=None: if len(extract_column_from_header(i))>0: column_names.append(extract_column_from_header(i)) ``` Check the extracted column names ``` print(column_names) ``` ## TASK 3: Create a data frame by parsing the launch HTML tables We will create an empty dictionary with keys from the extracted column names in the previous task. Later, this dictionary will be converted into a Pandas dataframe ``` launch_dict= dict.fromkeys(column_names) # Remove an irrelvant column del launch_dict['Date and time ( )'] # Let's initial the launch_dict with each value to be an empty list launch_dict['Flight No.'] = [] launch_dict['Launch site'] = [] launch_dict['Payload'] = [] launch_dict['Payload mass'] = [] launch_dict['Orbit'] = [] launch_dict['Customer'] = [] launch_dict['Launch outcome'] = [] # Added some new columns launch_dict['Version Booster']=[] launch_dict['Booster landing']=[] launch_dict['Date']=[] launch_dict['Time']=[] ``` Next, we just need to fill up the `launch_dict` with launch records extracted from table rows. Usually, HTML tables in Wiki pages are likely to contain unexpected annotations and other types of noises, such as reference links `B0004.1[8]`, missing values `N/A [e]`, inconsistent formatting, etc. ``` extracted_row = 0 #Extract each table for table_number,table in enumerate(soup.find_all('table',"wikitable plainrowheaders collapsible")): # get table row for rows in table.find_all("tr"): #check to see if first table heading is as number corresponding to launch a number if rows.th: if rows.th.string: flight_number=rows.th.string.strip() flag=flight_number.isdigit() else: flag=False #get table element row=rows.find_all('td') #if it is number save cells in a dictonary if flag: extracted_row += 1 # Flight Number value # TODO: Append the flight_number into launch_dict with key `Flight No.` launch_dict['Flight No.'].append(flight_number) print(flight_number) datatimelist=date_time(row[0]) # Date value # TODO: Append the date into launch_dict with key `Date` date = datatimelist[0].strip(',') launch_dict['Date'].append(date) print(date) # Time value # TODO: Append the time into launch_dict with key `Time` time = datatimelist[1] launch_dict['Time'].append(time) print(time) # Booster version # TODO: Append the bv into launch_dict with key `Version Booster` bv=booster_version(row[1]) if not(bv): bv=row[1].a.string launch_dict['Version Booster'].append(bv) print(bv) # Launch Site # TODO: Append the bv into launch_dict with key `Launch Site` launch_site = row[2].a.string launch_dict['Launch site'].append(launch_site) print(launch_site) # Payload # TODO: Append the payload into launch_dict with key `Payload` payload = row[3].a.string launch_dict['Payload'].append(payload) print(payload) # Payload Mass # TODO: Append the payload_mass into launch_dict with key `Payload mass` payload_mass = get_mass(row[4]) launch_dict['Payload mass'].append(payload_mass) print(payload_mass) # Orbit # TODO: Append the orbit into launch_dict with key `Orbit` orbit = row[5].a.string launch_dict['Orbit'].append(orbit) print(orbit) # Customer # TODO: Append the customer into launch_dict with key `Customer` if row[6].a!=None: customer = row[6].a.string else: customer='None' launch_dict['Customer'].append(customer) print(customer) # Launch outcome # TODO: Append the launch_outcome into launch_dict with key `Launch outcome` launch_outcome = list(row[7].strings)[0] launch_dict['Launch outcome'].append(launch_outcome) print(launch_outcome) # Booster landing # TODO: Append the launch_outcome into launch_dict with key `Booster landing` booster_landing = landing_status(row[8]) launch_dict['Booster landing'].append(booster_landing) print(booster_landing) print("******") ``` After we have filled in the parsed launch record values into `launch_dict`, we can create a dataframe from it. ``` df=pd.DataFrame(launch_dict) df.head() ```
github_jupyter
# 여러그림 그리기 > 여러그림 그리기, Anscombe's quartet - toc: true - branch: master - badges: true - comments: true - author: dinonene - categories: [python] `-` (1/2) 여러그림그리기 `-` (2/2) Anscombe's quartet ### 여러그림 그리기 #### (1) 겹쳐그리기 ``` import numpy as np import matplotlib.pyplot as plt x=np.arange(-5,5,0.1) y=2*x+np.random.normal(loc=0,scale=1,size=100) plt.plot(x,y,'.b') plt.plot(x,2*x,'--r') ``` #### (2) 따로그리기 -subplots ``` x=[1,2,3,4] y=[1,2,4,3] _, axs = plt.subplots(2,2) axs[0,0].plot(x,y,'o:r') axs[0,1].plot(x,y,'xb') axs[1,0].plot(x,y,'xm') axs[1,1].plot(x,y,'.--k') ``` > note: fmt = `[marker][line][color]` ```python plt.subplots?? # using the variable ax for single a Axes fig, ax = plt.subplots() # using the variable axs for multiple Axes fig, axs = plt.subplots(2, 2) # using tuple unpacking for multiple Axes fig, (ax1, ax2) = plt.subplots(1, 2) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) fig = figure(**fig_kw) axs = fig.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey, squeeze=squeeze, subplot_kw=subplot_kw, gridspec_kw=gridspec_kw) return fig, axs ``` - subplots의 리턴값이 $\tt{(fig,axs)}$ 이 나오게 된다. 우리는 뒤의 axs만 관심이 있으므로 앞의 fig는 `_`로 처리한다. ### Anscombe's quartet `-` 교훈: 데이터를 분석하기 전에 항상 시각화를 하라. ``` x = [10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5] y1 = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68] y2 = [9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74] y3 = [7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73] x4 = [8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8] y4 = [6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89] _, axs = plt.subplots(2,2) axs[0,0].plot(x,y1,'o', color='orange') axs[0,1].plot(x,y2,'o', color='orange') axs[1,0].plot(x,y3,'o', color='orange') axs[1,1].plot(x4,y4,'o', color='orange') ``` `-` 상관계수를 잠깐 복습해보자. - 상관계수는 -1~1 사이의 값을 가진다. (코쉬슈바르츠 부등식을 사용하여 증명가능) - 완전한 직선이라면 상관계수가 1 또는 -1이다. - 상관계수가 1에 가까우면 양의 상관관계에 있다고 말하고 -1에 가까우면 음의 상관관계에 있다고 말한다. `-` 의문: 자료의 모양이 직선모양에 가까우면 상관계수가 큰 것이 맞나? - $x,y$ 값이 모두 큰 하나의 관측치가 상관계수값을 키울 수 있지 않나? `-` 상관계수가 좋은 것은 맞나? (=상관계수는 두 변수의 관계를 설명하기에 충분히 적절한 통계량인가?) ``` n=len(x) xtilde = (x-np.mean(x)) / (np.std(x)*np.sqrt(n)) ## x표준화 y1tilde = (y1-np.mean(y1)) / (np.std(y1)*np.sqrt(n)) ## y1표준화 sum(xtilde*y1tilde) np.corrcoef(x,y1) np.corrcoef([x,y1,y2,y3]) np.corrcoef([x4,y4]) ``` `-` 위의 4개의 그림에 대한 상관계수는 모두 같다. (0.81652) `-` 상관계수는 두 변수의 관계를 설명하기에 부적절하다. - 상관계수는 1번그림과 같이 두 변수가 선형관계에 있을 때 그 정도를 나타내는 통계량일 뿐이다. - 선형관계가 아닌 것처럼 보이는 자료에서 상관계수를 계산할 수 있겠으나 의미가 없다. `-` 교훈2: 기본적인 통계량들은 실제자료를 분석하기에 부적절할 수 있다. (=통계량은 적절한 가정이 동반되어야 의미가 있다.) > note: 통계학자는 (1) 적절한 가정을 수학적인 언어로 정의하고 (2) 그 가정하에서 통계량이 의미있다는 것을 증명해야 한다. (3) 그리고 그 결과를 시각화하여 설득한다.
github_jupyter
``` # Basic Modules for data and text processing import pandas as pd import numpy as np import string import nltk import re from nltk.corpus import stopwords from nltk.stem.snowball import SnowballStemmer from sklearn.model_selection import train_test_split # Keras Modules from keras.preprocessing.text import Tokenizer # This tokenizes the text from keras.preprocessing.sequence import pad_sequences # This equalises the input we want to give from keras.models import Sequential # We will build sequential models only from keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation # All of the layers of our model from keras.layers.embeddings import Embedding # How to build our word vectors # Plotly import plotly.offline as py import plotly.graph_objs as go py.init_notebook_mode(connected=True) ``` ## Data Processing Stuff ``` df = pd.read_csv('D:\Data\processed.csv',delimiter = "\t",error_bad_lines=False) print(df.head(1)) # Clearning the Dataframe a bit df = df.drop(['Unnamed: 0','Unnamed: 0.1', 'Subject','user','labeled','To'], axis=1) df['label'] = df['rep'] df = df.drop(['rep'],axis=1) #df = df[df.label != 'unsup'] print(df.shape) # Text Normalising Function def clean_text(text): ## Remove puncuation text = text.translate(string.punctuation) ## Convert Words to lower case and split them text = text.lower().split() ## Remove stop words (commonly used stuff eg, is and was) stops = set(stopwords.words("english")) text = [w for w in text if not w in stops and len(w) >= 3] text = " ".join(text) # Common Dictionary Corpus text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text) text = re.sub(r"what's", "what is ", text) text = re.sub(r"\'s", " ", text) text = re.sub(r"\'ve", " have ", text) text = re.sub(r"n't", " not ", text) text = re.sub(r"i'm", "i am ", text) text = re.sub(r"\'re", " are ", text) text = re.sub(r"\'d", " would ", text) text = re.sub(r"\'ll", " will ", text) text = re.sub(r",", " ", text) text = re.sub(r"\.", " ", text) text = re.sub(r"!", " ! ", text) text = re.sub(r"\/", " ", text) text = re.sub(r"\^", " ^ ", text) text = re.sub(r"\+", " + ", text) text = re.sub(r"\-", " - ", text) text = re.sub(r"\=", " = ", text) text = re.sub(r"'", " ", text) text = re.sub(r"(\d+)(k)", r"\g<1>000", text) text = re.sub(r":", " : ", text) text = re.sub(r" e g ", " eg ", text) text = re.sub(r" b g ", " bg ", text) text = re.sub(r" u s ", " american ", text) text = re.sub(r"\0s", "0", text) text = re.sub(r" 9 11 ", "911", text) text = re.sub(r"e - mail", "email", text) text = re.sub(r"j k", "jk", text) text = re.sub(r"\s{2,}", " ", text) ## Stemming text = text.split() stemmer = SnowballStemmer('english') stemmed_words = [stemmer.stem(word) for word in text] text = " ".join(stemmed_words) return text # Clear some text print(df.shape) # Drop empty rows (NaN) df = df.dropna() # Using the text cleaning function df['content'] = df['content'].map(lambda x: clean_text(x)) print(df.shape) ## Creating Sequences vocabulary_size = 25000 tokenizer = Tokenizer(num_words = vocabulary_size) tokenizer.fit_on_texts(df['content']) sequences = tokenizer.texts_to_sequences(df['content']) data = pad_sequences(sequences,maxlen=70) # Now splitting training and testing data print("Reached here") X_train, X_test, y_train, y_test = train_test_split(data, df['label'], test_size=.25) print("X_train {} \n y_train {}".format(X_train.shape,y_train.shape)) #import csv #csv.field_size_limit() #csv.field_size_limit(10000000) ``` ## Neural Architecture begins here ``` # Defining the Model model = Sequential() model.add(Embedding(vocabulary_size,140,input_length=70)) model.add(LSTM(140, dropout =0.2, recurrent_dropout=0.3)) model.add(Activation('softmax')) model.add(Dense(1, activation='sigmoid')) print(model.summary()) # Compile the model model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy']) # Fit and Train the Model ## HyperParameters batch_size = 7000 num_epochs = 10 # Making validation set X_valid, y_valid = X_train[:batch_size], y_train[:batch_size] X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:] model.fit(X_train2,y_train2, validation_data=(X_valid,y_valid), batch_size=batch_size,epochs=num_epochs) # Finding the accuracy scores = model.evaluate(X_test, y_test, verbose=0) print("Test Accuracy: ",scores[1]) ```
github_jupyter
# Importing Important File regarding Analysis ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import datetime as dt import time import ipywidgets #from ipython.display import display data1 = pd.read_csv("covid_19_india (1).csv",dayfirst=True) data1 ``` # Data cleaning of Covid 19 india ``` missing_values = ["NaN","na","Na","-"] data1 = pd.read_csv("covid_19_india.csv",na_values= missing_values) data1.isnull().sum() #Now we have seen that "-" means empty values. Now we gonna fill this empty value with Zero because in terms of corona cases # it does not effect on our data data1 = data1.fillna(0) data1.loc[445:446] data1.rename(columns = {"State/UnionTerritory":"state","ConfirmedIndianNational":"confirmed_indian","ConfirmedForeignNational":"confirmed_foreign"},inplace = True) #data1.isnull().sum() #chaging date as date type data1.head() data1["Date"] = pd.to_datetime(data1["Date"]) data1["confirmed_indian"] = pd.to_numeric(data1["confirmed_indian"]) data1.dtypes # Now we are going to analyise state wise data of covid data1 = data1[["Date","state","Cured","Deaths","Confirmed"]] data1.columns = ["date","state","cured","death","confirmed"] data1.head() data1.tail() #here we are inserting rows of entire india cases temp_data = data1 temp_new = data1 temp_local = data1.date.unique() n = len(temp_local) for i in range(n): temp_data = data1[data1.date == temp_local[i]] #print(temp_local[50],temp_data) x = temp_data.cured.sum() y = temp_data.death.sum() z = temp_data.confirmed.sum() #print(x,y,z) #temp_df = {"state":"india",} temp_new.loc[i] = [temp_local[i],"india",x,y,z] # As we can clearly see that the latest dates of data we have is of 8 may 2021 so we are going to till that day f = len(data1.index) #print(f) z = data1.date.values[f-1] #print(z) latest_date = data1[data1.date == z ] latest_date.head() latest_date = latest_date.sort_values(by='confirmed',ascending=False) latest_date.head(10) #Here above we have top most confirmed cases according to state #Now we are going to see graphical representation of confirmed cases x = 0 y = 0 state = latest_date.state ``` # State v/s Confirmed cases in India by Graph ``` for i in range(len(state)): new_date = latest_date[x:y+6] x+=6 y+=6 sns.set(rc = {'figure.figsize':(20,10)}) sns.barplot(x = "state", y = "confirmed", data = new_date, hue = "state") plt.show() #time.sleep(1) #Start date of cases is from 30-01-2020 and end date is 08-05-2021 of our data ``` # Visualising the statical data of covid 19 India # Statewise Visualisation of (Confirmed vs Cured vs Death data of covid) ``` #Here we are making Daily cases column in our dataset and returning new dataframe named as result_data list1 = [] for i in range(len(state)): new_data = data1[data1.state == state.values[i]] x = 0 list = [] new_data = new_data.reset_index() new_data4 = new_data.confirmed.values[0] for j in range(len(new_data.confirmed)): #print("hello",j) new_data1 = new_data.confirmed.values[j] if new_data1 == new_data4: new_data4 -= 10 #print("fuck",new_data1) list.append(new_data1) if new_data.index.stop != j+1: new_data3 = new_data.confirmed.values[j+1] - new_data1 list.append(new_data3) #print(list) #print("here",len(list)) new_data_state = new_data new_data_state.insert(6,"daily_cases",list) list1.append(new_data_state) result_data = pd.concat(list1) result_data = result_data.reset_index() #Here we are using the ipywidgets of library to make our data more handy to use for users , #so they can select the state and look for the data temp_state = latest_date.state new_temp_state = ipywidgets.Dropdown( options=temp_state, #options = temp_value value=temp_state.values[0], description='Select state:', disabled=False, ) #print(new_temp_state.value) #print(data_select2.value) #display(new_temp_state) def select (select): temp2_state = result_data[result_data.state == new_temp_state.value] temp_value = ["confirmed","death","cured","daily_cases"] data_select2 = ipywidgets.Dropdown( options= temp_value, value=temp_value[0], description='Select option:', disabled=False, ) #print("first",new_temp_state) #print("second",temp2_state) def data_select(data_select): sns.set(rc = {'figure.figsize':(15,10)}) if data_select2.value == "confirmed": plot1 = sns.lineplot(x = "date",y = "confirmed", data = temp2_state,color = "g") elif data_select2.value == "cured": plot2 = sns.lineplot(x = "date",y = "cured", data = temp2_state, color = "b") elif data_select2.value =="death": plot3 = sns.lineplot(x = "date",y = "death", data = temp2_state, color = "r") elif data_select2.value =="daily_cases": plot3 = sns.lineplot(x = "date",y = "daily_cases", data = temp2_state, color = "y") #plt.title(new_temp_state) data_select(data_select2) ipywidgets.interact(data_select, data_select = data_select2) # Here Green Line indicating Confirmed Cases of Covid in that state # Here Blue Line indicating Cured Cases of Covid in that state # Here red indicating death Cases of Covid in that state ``` # please select the state and different cases option to visualise your data ``` ipywidgets.interact(select, select = new_temp_state) ``` # Here we are making our prediction based on each state ``` #using ipywidgets library to create dropdown option for user to easily select the query temp_value = ["confirmed","death","cured","daily_cases"] data_select3 = ipywidgets.Dropdown( options= temp_value, value=temp_value[0], description='Select option:', disabled=False, ) #making our ultimate state predictor function to predict our data statewise def ultimate_prediction(predict): #creating datepicker for user select_date = ipywidgets.DatePicker( description='Pick a Date', disabled=False ) #creating predict_data function for returning the future cases depend upon the user selection def predict_data(prediction): from sklearn.model_selection import train_test_split #print("first",new_temp_state) temp_state_value = result_data[result_data.state == new_temp_state.value] temp_state_value['date'] = temp_state_value['date'].map(dt.datetime.toordinal) #print("second",temp_state_value['date']) #temp_state_value.head() x = temp_state_value['date'] temp = data_select3.value #print(temp) y = temp_state_value[temp] x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3) from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression rf = RandomForestRegressor() lr = LinearRegression() rf.fit(np.array(x_train).reshape(-1,1),np.array(y_train).reshape(-1,1)) lr.fit(np.array(x_train).reshape(-1,1),np.array(y_train).reshape(-1,1)) choose_date = select_date.value h = None #print(choose_date) choose_date2 = dt.date.today() choose_date2 = choose_date2.toordinal() if choose_date == h: result1 = rf.predict([[choose_date2]]) result2 = lr.predict([[choose_date2]]) if choose_date != h: choose_date = choose_date.toordinal() #print(choose_date) result1 = rf.predict([[choose_date]]) result2 = lr.predict([[choose_date]]) result1 = result1.astype(int) result2 = result2.astype(int) print('Output from Random Forest Regressor is :',result1) print('Output from Linear Regression Model is :',result2) return None ipywidgets.interact(predict_data, prediction = select_date) new_temp_state ``` # *please select the option to avoid error # *please note following things ### Confirmed cases is total confirmed cases untill selected day. ### Death cases is total death cases untill selected day. ### Cured cases is total Cures cases untill selected day. ### daily cases is the cases only of that day you select. ``` ipywidgets.interact(ultimate_prediction, predict = data_select3) ```
github_jupyter
``` import pandas as pd import numpy as np import sys from sklearn.preprocessing import LabelEncoder,OneHotEncoder from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeClassifier from sklearn import preprocessing col_names = ["duration","protocol_type","service","flag","src_bytes", "dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins", "logged_in","num_compromised","root_shell","su_attempted","num_root", "num_file_creations","num_shells","num_access_files","num_outbound_cmds", "is_host_login","is_guest_login","count","srv_count","serror_rate", "srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate", "diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count", "dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate", "dst_host_rerror_rate","dst_host_srv_rerror_rate","label"] df = pd.read_csv("KDDTrain+_2.csv", header=None, names = col_names) df_test = pd.read_csv("KDDTest+_2.csv", header=None, names = col_names) categorical_columns=['protocol_type', 'service', 'flag'] # insert code to get a list of categorical columns into a variable, categorical_columns categorical_columns=['protocol_type', 'service', 'flag'] # Get the categorical values into a 2D numpy array df_categorical_values = df[categorical_columns] testdf_categorical_values = df_test[categorical_columns] df_categorical_values.head() # protocol type unique_protocol=sorted(df.protocol_type.unique()) string1 = 'Protocol_type_' unique_protocol2=[string1 + x for x in unique_protocol] # service unique_service=sorted(df.service.unique()) string2 = 'service_' unique_service2=[string2 + x for x in unique_service] # flag unique_flag=sorted(df.flag.unique()) string3 = 'flag_' unique_flag2=[string3 + x for x in unique_flag] # put together dumcols=unique_protocol2 + unique_service2 + unique_flag2 print(dumcols) #do same for test set unique_service_test=sorted(df_test.service.unique()) unique_service2_test=[string2 + x for x in unique_service_test] testdumcols=unique_protocol2 + unique_service2_test + unique_flag2 df_categorical_values_enc=df_categorical_values.apply(LabelEncoder().fit_transform) print(df_categorical_values_enc.head()) # test set testdf_categorical_values_enc=testdf_categorical_values.apply(LabelEncoder().fit_transform) enc = OneHotEncoder() df_categorical_values_encenc = enc.fit_transform(df_categorical_values_enc) df_cat_data = pd.DataFrame(df_categorical_values_encenc.toarray(),columns=dumcols) # test set testdf_categorical_values_encenc = enc.fit_transform(testdf_categorical_values_enc) testdf_cat_data = pd.DataFrame(testdf_categorical_values_encenc.toarray(),columns=testdumcols) df_cat_data.head() trainservice=df['service'].tolist() testservice= df_test['service'].tolist() difference=list(set(trainservice) - set(testservice)) string = 'service_' difference=[string + x for x in difference] for col in difference: testdf_cat_data[col] = 0 testdf_cat_data.shape newdf=df.join(df_cat_data) newdf.drop('flag', axis=1, inplace=True) newdf.drop('protocol_type', axis=1, inplace=True) newdf.drop('service', axis=1, inplace=True) # test data newdf_test=df_test.join(testdf_cat_data) newdf_test.drop('flag', axis=1, inplace=True) newdf_test.drop('protocol_type', axis=1, inplace=True) newdf_test.drop('service', axis=1, inplace=True) print(newdf.shape) print(newdf_test.shape) # take label column labeldf=newdf['label'] labeldf_test=newdf_test['label'] # change the label column newlabeldf=labeldf.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1, 'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2 ,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3, 'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4}) newlabeldf_test=labeldf_test.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1, 'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2 ,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3, 'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4}) # put the new label column back newdf['label'] = newlabeldf newdf_test['label'] = newlabeldf_test print(newdf['label'].head()) to_drop_DoS = [2,3,4] DoS_df=newdf[~newdf['label'].isin(to_drop_DoS)]; #test DoS_df_test=newdf_test[~newdf_test['label'].isin(to_drop_DoS)]; print('Train:') print('Dimensions of DoS:' ,DoS_df.shape) print('Test:') print('Dimensions of DoS:' ,DoS_df_test.shape) X_DoS = DoS_df.drop('label',1) Y_DoS = DoS_df.label Y_DoS=Y_DoS.astype('int') X_DoS_test = DoS_df_test.drop('label',1) colNames=list(X_DoS) colNames_test=list(X_DoS_test) # scaler1 = preprocessing.StandardScaler().fit(X_DoS) # X_DoS=scaler1.transform(X_DoS) # scaler5 = preprocessing.StandardScaler().fit(X_DoS_test) # X_DoS_test=scaler5.transform(X_DoS_test) from sklearn.feature_selection import SelectPercentile, f_classif np.seterr(divide='ignore', invalid='ignore'); selector=SelectPercentile(f_classif, percentile=10) X_newDoS = selector.fit_transform(X_DoS,Y_DoS) X_newDoS.shape true=selector.get_support() newcolindex_DoS=[i for i, x in enumerate(true) if x] print(newcolindex_DoS) newcolname_DoS=list( colNames[i] for i in newcolindex_DoS ) newcolname_DoS # newcolname_DoS_test=list( colNames_test[i] for i in newcolindex_DoS ) # newcolname_DoS_test from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeClassifier # Create a decision tree classifier. By convention, clf means 'classifier' clf = DecisionTreeClassifier(random_state=0) #rank all features, i.e continue the elimination until the last one # rfe = RFE(clf, n_features_to_select=1) clf.fit(X_newDoS, Y_DoS) X_DoS_test=(X_DoS_test[['logged_in', 'count', 'serror_rate', 'srv_serror_rate', 'same_srv_rate', 'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'service_http', 'flag_S0', 'flag_SF']]) clf.predict(X_D) ```
github_jupyter
# FINAL PROJECT In the final project, you will create a closed loop system for an SBML model. Start by selecting a model from the [BioModels Curated branch](https://www.ebi.ac.uk/biomodels/search?query=*%3A*+AND+curationstatus%3A%22Manually+curated%22&domain=biomodels).) You don't have to restrict yourself to thoses models, but you'll find that the curated BioModels are fairly easy to reproduce.) You'll find a paper associated with each model. 1. **Specify the requirements for the control system.** (10 pt) After reading the paper for the model, propose a chemical species for the control output and an operating point for it. You should also indicate the desired operating characteristics such as settling time and overshoot. Justify this in terms of the potential value of controlling the chemical species and why the particular operating point makes sense. 1. **Determine the control input you will use**. (10 pt) This will require evaluating the DC gain of of candidate control inputs on chemical species that are the control outputs. 1. **Design the control system**. (15 pt) You will specify a controller and possibly a filter. You will select the parameters of the elements based on the poles and DC gains implied by the operating characteristics in (1). 1. **Evaluate your design.** (25 pt) You will construct a simulation testbed in which the SBML model is regulated by your control architecture. You should evaluate the effects of disturbances and noise. 1. **Discuss trade-offs.** (10 pt) Discuss trade-offs in the selection of parameter values in your design. This should consider the impact of parameter values on the closed loop transfer functions. You will receive 5 extra points if you use LaTex for your mathematical analyses. **Students will do a 15 minute presentation of their project during the last week of class.** The presentation is ungraded. The purpose is to provide early feedback on the project. # Preliminaries ``` !pip install -q controlSBML import control import pandas as pd import matplotlib.pyplot as plt import numpy as np import tellurium as te ``` # Helpful Hints ## LaTex Basics 1. All LaTex appears between dollar signs (``$``) 1. Subscripts: $x_2$ 1. Superscript: $x^2$ 1. Summation: $\sum_{n=0}^{\infty} e^{nt}$ 1. Derivatives: $\dot{x}(t)$ 1. Bold: ${\bf A}$ 1. Fractions: $\frac{a}{b}$ ## Closed Loop Transfer Functions <img src="https://github.com/joseph-hellerstein/advanced-controls-lectures/blob/main/Lecture_13_14-Closed-Loop-Systems/Closed-Loop-System.png?raw=true" alt="Markdown Monster icon" width=600pt style="float: left; margin-right: 10px;" /> **Transfer Functions** \begin{eqnarray} H_{RY}(s) & = & \frac{Y(s)}{R(s)} & = & \frac{C(s) G(s)}{1 + C(s) G(s) F(s)} \\ H_{RE}(s) & = & \frac{E(s)}{R(s)} & = & \frac{1}{1 + C(s) G(s) F(s)} \\ H_{NY}(s) & = & \frac{Y(s)}{N(s)} & = & -\frac{ F(s)}{1 + C(s) G(s) F(s)} \\ H_{DY}(s) & = & \frac{Y(s)}{D(s)} & = & \frac{ C(s)}{1 + C(s) G(s) F(s)} \\ \end{eqnarray} # 1. Specify Requirements # 2. Determine the Control Input # 3. Design the Control System # 4. Evaluate the Design # 5. Discuss Trade-offs
github_jupyter
# Movielens EDA and Modeling > EDA and modeling on movielens dataset - toc: true - badges: true - comments: true - categories: [EDA, Movie, Visualization] - image: ## Setup ``` # download dataset !wget http://files.grouplens.org/datasets/movielens/ml-100k.zip && unzip ml-100k.zip !wget http://files.grouplens.org/datasets/movielens/ml-latest.zip && unzip ml-latest.zip import numpy as np import pandas as pd from datetime import datetime from collections import OrderedDict import gc import sys from os.path import join from os.path import exists from functools import reduce from sklearn.metrics import mean_absolute_error from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.metrics.pairwise import cosine_similarity import matplotlib.pyplot as plt import seaborn as sns from IPython.display import Markdown import json import requests from IPython.display import HTML import sys sys.path.append("/content/drive/MyDrive") import mykeys api_key = mykeys.moviedb_key headers = {'Accept': 'application/json'} payload = {'api_key': api_key} try: response = requests.get( "http://api.themoviedb.org/3/configuration", params=payload, headers=headers, ) response = json.loads(response.text) base_url = response['images']['base_url'] + 'w185' except: raise ValueError("Your API key might be invalid.") ``` ## Loading ``` # loading ratings data names = ['user_id', 'item_id', 'rating', 'timestamp'] ratings_df = pd.read_csv('ml-100k/u.data', sep='\t', names=names) print('First 5:') display(ratings_df.head()) print() print('Last 5:') display(ratings_df.tail()) names = ["user_id", "age" , "gender","occupation", "zip_code"] user_df = pd.read_csv('ml-100k/u.user', sep='|', names=names) print('First 5:') display(user_df.head()) print() print('Last 5:') display(user_df.tail()) # loading ratings data names = ["genre", "id"] genre_df = pd.read_csv('ml-100k/u.genre', sep='|', names=names) print('First 5:') display(genre_df.head()) print() print('Last 5:') display(genre_df.tail()) # loading ratings data names = ["item_id", "movie_title", "release_date", "video_release_date", "IMDb_URL"] items_df = pd.read_csv('ml-100k/u.item', sep='|', encoding="iso-8859-1", header=None) print('First 5:') display(items_df.head()) print() print('Last 5:') display(items_df.tail()) # loading movies info # item_info = pd.read_csv('ml-100k/u.item', sep='|', encoding="iso-8859-1", # header=None) # item_info.columns = ['title'] # item_info.head() links_df = pd.read_csv('ml-latest/links.csv', dtype=str).set_index('movieId', drop=True) links_df.head() ``` Processed Data Loading ``` pratings = pd.read_csv(dpath+'ratings.csv', sep=',', encoding='latin-1') print('First 5:') display(pratings.head()) print() print('Last 5:') display(pratings.tail()) pmovies = pd.read_csv(dpath+'items.csv', sep=',', encoding='latin-1') print('First 5:') display(pmovies.head()) print() print('Last 5:') display(pmovies.tail()) ``` ## EDA How are the ratings distributed? ``` norm_counts = ( ratings_df['rating'] .value_counts(normalize=True, sort=False) .multiply(100) .reset_index() .rename(columns={'rating': 'percent', 'index': 'rating'}) ) ax = sns.barplot(x='rating', y='percent', data=norm_counts) ax.set_title('Rating Frequencies') plt.show() ``` How many ratings were submitted per month? ``` def _process_ratings(ratings_df): ratings_df = ratings_df.copy() ratings_df['timestamp'] = ratings_df['timestamp'].apply(datetime.fromtimestamp) ratings_df['year'] = ratings_df['timestamp'].dt.year ratings_df['month'] = ratings_df['timestamp'].dt.month ratings_df = ratings_df.sort_values('timestamp') return ratings_df _ratings_df = _process_ratings(ratings_df) month_counts = _ratings_df[['year', 'month', 'rating']].groupby(['year', 'month']).count() month_counts = month_counts.rename(index=str, columns={'rating': '# of Ratings'}) month_counts = month_counts.reset_index() month_counts['Date'] = month_counts[['year', 'month']].apply( lambda x: datetime(year=int(x[0]), month=int(x[1]), day=1), axis=1 ) month_counts = month_counts.set_index('Date', drop=True) month_counts['# of Ratings'].plot(style='o-') plt.ylabel('# of Ratings') plt.title('# of Ratings per Month') plt.ylim([0, 25000]) plt.gca().grid(which='minor') plt.show() ``` How consistent are the average ratings over time? ``` month_counts = _ratings_df.groupby(['year', 'month'])['rating'].agg([np.mean, np.std]) month_counts = month_counts.rename(index=str, columns={'mean': 'Rating'}) month_counts = month_counts.reset_index() month_counts['Date'] = month_counts[['year', 'month']].apply( lambda x: datetime(year=int(x[0]), month=int(x[1]), day=1), axis=1 ) month_counts = month_counts.set_index('Date', drop=True) month_counts['Rating'].plot(style='o-') plt.fill_between(month_counts.index, month_counts['Rating'] - month_counts['std'], month_counts['Rating'] + month_counts['std'], alpha=0.3, ) plt.ylim([0, 5]) plt.ylabel('Rating') plt.gca().grid(which='minor') plt.title('Rating Consistency over Time') plt.show() ``` How quickly do the movie and user bases grow over time? *(assume that a user has joined on her first rating, and that she remains a user from then on.)* ``` _ratings_df['Date'] = _ratings_df[['year', 'month']].apply( lambda x: datetime(year=int(x[0]), month=int(x[1]), day=1), axis=1 ) n_users = [] n_movies = [] dates = np.unique(_ratings_df['Date']) for date in dates: n_users.append(_ratings_df[_ratings_df['Date'] <= date]['user_id'].nunique()) n_movies.append(_ratings_df[_ratings_df['Date'] <= date]['item_id'].nunique()) df_users = pd.DataFrame({'Date': dates, '# of Users': n_users}).set_index('Date') df_movies = pd.DataFrame({'Date': dates, '# of Movies': n_movies}).set_index('Date') fig, ax = plt.subplots() df_movies['# of Movies'].plot(style='o-', ax=ax) df_users['# of Users'].plot(style='o-', ax=ax) plt.ylabel('Count') plt.ylim([0, 2000]) ax.grid(which='minor') plt.tight_layout() plt.legend() plt.show() ``` How sparse is the user/movies matrix we'll be dealing with? ``` def get_rating_matrix(X): """Function to generate a ratings matrix and mappings for the user and item ids to the row and column indices Parameters ---------- X : pandas.DataFrame, shape=(n_ratings,>=3) First 3 columns must be in order of user, item, rating. Returns ------- rating_matrix : 2d numpy array, shape=(n_users, n_items) user_map : pandas Series, shape=(n_users,) Mapping from the original user id to an integer in the range [0,n_users) item_map : pandas Series, shape=(n_items,) Mapping from the original item id to an integer in the range [0,n_items) """ user_col, item_col, rating_col = X.columns[:3] rating = X[rating_col] user_map = pd.Series( index=np.unique(X[user_col]), data=np.arange(X[user_col].nunique()), name='user_map', ) item_map = pd.Series( index=np.unique(X[item_col]), data=np.arange(X[item_col].nunique()), name='columns_map', ) user_inds = X[user_col].map(user_map) item_inds = X[item_col].map(item_map) rating_matrix = ( pd.pivot_table( data=X, values=rating_col, index=user_inds, columns=item_inds, ) .fillna(0) .values ) return rating_matrix, user_map, item_map rating_matrix, user_map, item_map = get_rating_matrix(ratings_df) with plt.style.context('seaborn-white'): rating_matrix_binary = rating_matrix > 0 plt.imshow(rating_matrix_binary) plt.xlabel('Movie') plt.ylabel('User') plt.show() rating_matrix Markdown( r"The matrix density is $n_{{ratings}}/(n_{{users}} \times n_{{movies}}) = {:0.3f}$" .format(np.sum(rating_matrix_binary) / np.prod(rating_matrix.shape)) ) user_counts = ratings_df['user_id'].value_counts(ascending=True) user_counts.index = np.arange(len(user_counts)) / len(user_counts) plt.plot(user_counts, user_counts.index, '.', label='Users') movie_counts = ratings_df['item_id'].value_counts(ascending=True) movie_counts.index = np.arange(len(movie_counts)) / len(movie_counts) plt.plot(movie_counts, movie_counts.index, '.', label='Movies') plt.xlabel('Number of Ratings') plt.ylabel('ECDF') plt.legend() plt.show() ``` ## Preprocessing ``` display(ratings_df.head()) ``` ## Baseline Models ### Baseline - Simple Average Model The first model we'll test is about the simplest one possible. We'll just average all the training set ratings and use that average for the prediction for all test set examples. ``` class SimpleAverageModel(): """A very simple model that just uses the average of the ratings in the training set as the prediction for the test set. Attributes ---------- mean : float Average of the training set ratings """ def __init__(self): pass def fit(self, X): """Given a ratings dataframe X, compute the mean rating Parameters ---------- X : pandas dataframe, shape = (n_ratings, >=3) User, item, rating dataframe. Only the 3rd column is used. Returns ------- self """ self.mean = X.iloc[:, 2].mean() return self def predict(self, X): return np.ones(len(X)) * self.mean ``` ### Baseline - Average by ID Model We can probably do a little better by using the user or item (movie) average. Here we'll set up a baseline model class that allows you to pass either a list of userIds or movieIds as X. The prediction for a given ID will just be the average of ratings from that ID, or the overall average if that ID wasn't seen in the training set. ``` class AverageByIdModel(): """Simple model that predicts based on average ratings for a given Id (movieId or userId) from training data Parameters ---------- id_column : string Name of id column (i.e. 'itemId', 'userId') to average by in dataframe that will be fitted to Attributes ---------- averages_by_id : pandas Series, shape = [n_ids] Pandas series of rating averages by id overall_average : float Average rating over all training samples """ def __init__(self, id_column): self.id_column = id_column def fit(self, X): """Fit training data. Parameters ---------- X : pandas dataframe, shape = (n_ratings, >=3) User, item, rating dataframe. Columns beyond 3 are ignored Returns ------- self : object """ rating_column = X.columns[2] X = X[[self.id_column, rating_column]].copy() X.columns = ['id', 'rating'] self.averages_by_id = ( X .groupby('id')['rating'] .mean() .rename('average_rating') ) self.overall_average = X['rating'].mean() return self def predict(self, X): """Return rating predictions Parameters ---------- X : pandas dataframe, shape = (n_ratings, >=3) Array of n_ratings movieIds or userIds Returns ------- y_pred : numpy array, shape = (n_ratings,) Array of n_samples rating predictions """ rating_column = X.columns[2] X = X[[self.id_column, rating_column]].copy() X.columns = ['id', 'rating'] X = X.join(self.averages_by_id, on='id') X['average_rating'].fillna(self.overall_average, inplace=True) return X['average_rating'].values ``` ### Baseline - Damped Baseline with User + Movie Data This baseline model takes into account the average ratings of both the user and the movie, as well as a damping factor that brings the baseline prediction closer to the overall mean. The damping factor has been shown empirically to improve the perfomance. This model follows equation 2.1 from a [collaborative filtering paper](http://files.grouplens.org/papers/FnT%20CF%20Recsys%20Survey.pdf) from [GroupLens](https://grouplens.org/), the same group that published the MovieLens data. This equation defines rhe baseline rating for user $u$ and item $i$ as $$b_{u,i} = \mu + b_u + b_i$$ where $$b_u = \frac{1}{|I_u| + \beta_u}\sum_{i \in I_u} (r_{u,i} - \mu)$$ and $$b_i = \frac{1}{|U_i| + \beta_i}\sum_{u \in U_i} (r_{u,i} - b_u - \mu).$$ (See equations 2.4 and 2.5). Here, $\beta_u$ and $\beta_i$ are damping factors, for which the paper reported 25 is a good number for this dataset. For now we'll just leave these values equal ($\beta=\beta_u=\beta_i$). Here's a summary of the meanings of all the variables here: | Variable | Meaning | | --------------- | ----------------------------------------------------- | | $b_{u,i}$ | Baseline rating for user $u$ on item (movie) $i$ | | $\mu$ | The mean of all ratings | | $b_u$ | The deviation from $\mu$ associated with user $u$ | | $b_i$ | The deviation from $\mu+b_u$ associated with user $i$ | | $I_u$ | The set of all items rated by user $u$ | | $\mid I_u \mid$ | The number of items rated by user $u$ | | $\beta_u$ | Damping factor for the users ($=\beta$) | | $r_{u,i}$ | Observed rating for user $u$ on item $i$ | | $U_i$ | The set of all users who rated item $i$ | | $\mid U_i \mid$ | The number of users who rated item $i$ | | $\beta_i$ | Damping factor for the items ($=\beta$) | ``` class DampedUserMovieBaselineModel(): """Baseline model that of the form mu + b_u + b_i, where mu is the overall average, b_u is a damped user average rating residual, and b_i is a damped item (movie) average rating residual. See eqn 2.1 of http://files.grouplens.org/papers/FnT%20CF%20Recsys%20Survey.pdf Parameters ---------- damping_factor : float, default=0 Factor to bring residuals closer to 0. Must be positive. Attributes ---------- mu : float Average rating over all training samples b_u : pandas Series, shape = [n_users] User residuals b_i : pandas Series, shape = [n_movies] Movie residuals damping_factor : float, default=0 Factor to bring residuals closer to 0. Must be >= 0. """ def __init__(self, damping_factor=0): self.damping_factor = damping_factor def fit(self, X): """Fit training data. Parameters ---------- X : DataFrame, shape = [n_samples, >=3] User, movie, rating dataFrame. Columns beyond 3 are ignored Returns ------- self : object """ X = X.iloc[:, :3].copy() X.columns = ['user', 'item', 'rating'] self.mu = np.mean(X['rating']) user_counts = X['user'].value_counts() movie_counts = X['item'].value_counts() b_u = ( X[['user', 'rating']] .groupby('user')['rating'] .sum() .subtract(user_counts * self.mu) .divide(user_counts + self.damping_factor) .rename('b_u') ) X = X.join(b_u, on='user') X['item_residual'] = X['rating'] - X['b_u'] - self.mu b_i = ( X[['item', 'item_residual']] .groupby('item')['item_residual'] .sum() .divide(movie_counts + self.damping_factor) .rename('b_i') ) self.b_u = b_u self.b_i = b_i return self def predict(self, X): """Return rating predictions Parameters ---------- X : DataFrame, shape = (n_ratings, 2) User, item dataframe Returns ------- y_pred : numpy array, shape = (n_ratings,) Array of n_samples rating predictions """ X = X.iloc[:, :2].copy() X.columns = ['user', 'item'] X = X.join(self.b_u, on='user').fillna(0) X = X.join(self.b_i, on='item').fillna(0) return (self.mu + X['b_u'] + X['b_i']).values def get_xval_errs_and_res(df, model, n_splits=5, random_state=0, rating_col='rating'): kf = KFold(n_splits=n_splits, random_state=random_state, shuffle=True) errs, stds = [], [] residuals = np.zeros(len(df)) for train_inds, test_inds in kf.split(df): train_df, test_df = df.iloc[train_inds], df.iloc[test_inds] pred = model.fit(train_df).predict(test_df) residuals[test_inds] = pred - test_df[rating_col] mae = mean_absolute_error(pred, test_df[rating_col]) errs.append(mae) return errs, residuals errs_1, res_1 = get_xval_errs_and_res(ratings_df, SimpleAverageModel()) errs_2, res_2 = get_xval_errs_and_res(ratings_df, AverageByIdModel('item_id')) errs_3, res_3 = get_xval_errs_and_res(ratings_df, AverageByIdModel('user_id')) errs_4, res_4 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(0)) errs_5, res_5 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(10)) errs_6, res_6 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(25)) errs_7, res_7 = get_xval_errs_and_res(ratings_df, DampedUserMovieBaselineModel(50)) df_errs = pd.DataFrame( OrderedDict( ( ('Average', errs_1), ('Item Average', errs_2), ('User Average', errs_3), ('Combined 0', errs_4), ('Combined 10', errs_5), ('Combined 25', errs_6), ('Combined 50', errs_7), ) ) ) display(df_errs) df_errs = ( pd.melt(df_errs, value_vars=df_errs.columns) .rename({'variable': 'Baseline Model', 'value': 'MAE'}, axis=1) ) df_res = pd.DataFrame( OrderedDict( ( ('Average', res_1), ('Item Average', res_2), ('User Average', res_3), ('Combined 0', res_4), ('Combined 10', res_5), ('Combined 25', res_6), ('Combined 50', res_7), ) ) ) display(df_res.tail()) df_res = ( pd.melt(df_res, value_vars=df_res.columns) .rename({'variable': 'Baseline Model', 'value': 'Residual'}, axis=1) ) fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(12,8)) sns.swarmplot(data=df_errs, x='Baseline Model', y='MAE', ax=ax0) sns.violinplot(data=df_res, x='Baseline Model', y='Residual', ax=ax1) ax0.xaxis.set_visible(False) plt.tight_layout() plt.show() ``` The MAE plots above show that the combined model with a damping factor of 0 or 10 performs the best, followed by the item average, then the user average. It makes sense that taking into account deviations from the mean due to both user and item would perform the best: there is simply more data being taken into account for each baseline prediction. The same idea explains why the item average performs better than the user average: there are more items than users in this dataset, so averaging over items takes into account more data per baseline prediction than averaging over users. The residual plots underneath the MAE plot illustrate that taking into account more data pulls the density of the residuals closer to 0. **Selecting the baseline**: Both the Combined 0 and Combined 10 models performed equally, but we'll choose the Combined 10 model, because a higher damping factor is effectively stronger regularization, which will prevent overfitting better than a damping factor of 0. ## KNN Collaborative Filtering ``` # <!-- collapse=True --> class KNNRecommender(): """User-based or Item-based collaborative filtering model that operates on dataframes with at least a user-like, item-like, and a rating-like column Parameters ---------- mode : str, ['item | 'user'], default='item' Tells model whether to use item-based or user-based collaborative filtering k : int, default=20 Number of most similar items or users to average for prediction basline_algo : object, optional Algorithm used to predict baseline scores for each rating. If not provided, the mean of all training ratings is used as the baseline. If provided, the object must have a fit(X) method and a predict(X) method similarity_func : function, default=cosine_similarity Function must take a numpy array M of shape (m,n) and return a numpy array of shape (m,m) where each element i,j represents the similarity between row i and row j of M. loop_predict : boolean, default=True If True, the model will loop over all user-item pairs in test set and compute prediction individually. If False, the model will compute all ratings simultaneously. With sparse matrices, looping is typically faster. Attributes ---------- train_mean : float Mean of the training data ratings. Used if baseline_algo is None. rating_matrix : 2d numpy array, shape=(n_users, n_items) Rating matrix minus baselines user_map : pandas Series, shape=(n_users,) Mapping from the original user id to an integer in the range [0,n_users) item_map : pandas Series, shape=(n_items,) Mapping from the original item id to an integer in the range [0,n_items) knn_indices : 2d numpy array, shape=([n_users|n_items], k) Element i,j represents the index of the jth closet [user|item] to i knn_similarities : 2d numpy array, shape=([n_users|n_items], k) Element i,j represents the similarity between the jth closest [user|item] to i """ def __init__(self, mode='item', k=20, baseline_algo=None, similarity_func=cosine_similarity, loop_predict=True): if not mode in ['user', 'item']: raise ValueError("'mode' must be either 'user' or 'item', not '{}'!".format(mode)) self.mode = mode self.k = k self.baseline_algo = baseline_algo self.similarity_func = similarity_func self.loop_predict = loop_predict self.train_mean = None self.rating_matrix = None self.user_map = None self.item_map = None self.knn_indices = None self.knn_similarities = None def _get_rating_matrix(self, X): """Private function to generate a ratings matrx and mappings for the user and item ids to the row and column indices Parameters ---------- X : pandas.DataFrame, shape=(n_ratings,>=3) First 3 columns must be in order of user, item, rating. Returns ------- rating_matrix : 2d numpy array, shape=(n_users, n_items) user_map : pandas Series, shape=(n_users,) Mapping from the original user id to an integer in the range [0,n_users) item_map : pandas Series, shape=(n_items,) Mapping from the original item id to an integer in the range [0,n_items) """ user_col, item_col, rating_col = X.columns[:3] rating = X[rating_col] user_map = pd.Series( index=np.unique(X[user_col]), data=np.arange(X[user_col].nunique()), name='user_map', ) item_map = pd.Series( index=np.unique(X[item_col]), data=np.arange(X[item_col].nunique()), name='columns_map', ) user_inds = X[user_col].map(user_map) item_inds = X[item_col].map(item_map) rating_matrix = ( pd.pivot_table( data=X, values=rating_col, index=user_inds, columns=item_inds, ) .fillna(0) .values ) return rating_matrix, user_map, item_map def _get_knn_indices_and_similarities(self, rating_matrix): """Private function to find indices and similarities of k nearest neighbors for each user or item Parameters ---------- rating_matrix : 2d numpy array, shape=(n_users, n_items) Matrix of ratings minus baselines Returns ------- knn_indices : 2d numpy array, shape=([n_users|n_items], k) Element i,j represents the index of the jth closet [user|item] to i knn_similarities : 2d numpy array, shape=([n_users|n_items], k) Element i,j represents the similarity between the jth closest [user|item] to i """ if self.mode == 'item': n_users_or_items = rating_matrix.shape[1] else: n_users_or_items = rating_matrix.shape[0] if self.k > n_users_or_items: new_k = n_users_or_items - 1 print( "Warning: k = {} > # {}s = {}! Setting k to {}" .format(self.k, n_users_or_items, self.mode, new_k) ) self.k = new_k if self.mode == 'item': similarity_matrix = self.similarity_func(rating_matrix.T) else: similarity_matrix = self.similarity_func(rating_matrix) np.fill_diagonal(similarity_matrix, -1) knn_indices = np.argsort(similarity_matrix, axis=1)[:, ::-1][:, :self.k] # https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/neighbors/base.py#L373 sample_range = np.arange(len(knn_indices))[:, None] knn_similarities = similarity_matrix[sample_range, knn_indices] return knn_indices, knn_similarities def fit(self, X): """Fit model to training data X. Sets the knn_indices, knn_similarities, rating_matrix, user_map, and item map variables. Parameters ---------- X : pandas DataFrame, shape=(n_ratings, >=3) First 3 columns must correspond to user, item, and rating in that order Returns ------- self This allows chaining like `KNNRecommender().fit(X_train).predict(X_test)` """ if not isinstance(X, pd.DataFrame): raise ValueError("X must be a DataFrame") X = X.copy() user_col, item_col, rating_col = X.columns[:3] if self.baseline_algo is None: self.train_mean = X[rating_col].mean() X['rating_baseline'] = self.train_mean else: self.baseline_algo.fit(X.iloc[:, :3]) X['rating_baseline'] = self.baseline_algo.predict(X[[user_col, item_col]]) X['rating_diff'] = X[rating_col] - X['rating_baseline'] nodiff_rating_matrix, _, _ = self._get_rating_matrix(X[[user_col, item_col, rating_col]]) self.knn_indices, self.knn_similarities = self._get_knn_indices_and_similarities( nodiff_rating_matrix ) gc.collect() self.rating_matrix, self.user_map, self.item_map = self._get_rating_matrix( X[[user_col, item_col, 'rating_diff']] ) return self def _predict_1_ui_pair(self, user, item): """Predict rating (minus baseline) for 1 user-item pair. Must add baseline to get the rating in the original rating scale. Parameters ---------- user : int Must be in range [0, n_users) item : int Must be in range [0, n_items) Returns ------- rating_pred : float Predicted ratings """ if self.mode == 'item': inds_i = self.knn_indices[item, :] sims_i = self.knn_similarities[item, :] # https://stackoverflow.com/a/35696047/2680824 numerator = np.sum(self.rating_matrix[user, inds_i] * sims_i) denominator = np.sum(np.abs(sims_i)) with np.errstate(divide='ignore', invalid='ignore'): rating_pred = numerator / denominator else: inds_u = self.knn_indices[user, :] sims_u = self.knn_similarities[user, :] # https://stackoverflow.com/a/35696047/2680824 numerator = np.sum(self.rating_matrix[inds_u, item] * sims_u) denominator = np.sum(np.abs(sims_u)) with np.errstate(divide='ignore', invalid='ignore'): rating_pred = numerator / denominator return rating_pred def predict(self, X): """Predict ratings for each user-item pair in X Parameters ---------- X : pandas DataFrame, shape=(n_ratings, >=2) First 2 columns of X must correspond to user and item. Returns ------- pandas Series, shape=(n_ratings,) Ratings for each user-item pair in X. No restriction on the data type for the user and item ids, other than they must match the training indices. """ if not isinstance(X, pd.DataFrame): raise ValueError("X must be a DataFrame") X = X.copy() user_col, item_col = X.columns[:2] if self.baseline_algo is None: X['rating_baseline'] = self.train_mean else: X['rating_baseline'] = self.baseline_algo.predict(X) X['rating'] = 0 known_user_and_item_mask = ( X[user_col].isin(self.user_map.index) & X[item_col].isin(self.item_map.index) ) X_known = X[known_user_and_item_mask] user_inds = X_known[user_col].map(self.user_map) item_inds = X_known[item_col].map(self.item_map) if self.loop_predict: rating_pred = np.array([ self._predict_1_ui_pair(u_ind, i_ind) for u_ind, i_ind in zip(user_inds, item_inds) ]) else: stacked_ratings = self.rating_matrix[ self.knn_indices[:, :, None], np.arange(self.rating_matrix.shape[1])[None, None, :] ] numerator_matrix = np.sum( stacked_ratings * self.knn_similarities[:, :, None], axis=1 ) denominator_matrix = np.sum( (stacked_ratings != 0) * self.knn_similarities[:, :, None], axis=1 ) # https://stackoverflow.com/a/35696047/2680824 with np.errstate(divide='ignore', invalid='ignore'): rating_pred_matrix = numerator_matrix / denominator_matrix rating_pred = rating_pred_matrix[user_inds, item_inds] rating_pred[np.isnan(rating_pred)] = 0 X.loc[known_user_and_item_mask, 'rating'] = rating_pred return X['rating'] + X['rating_baseline'] ``` Determine Optimal k Values ``` def cart_prod(df_1, df_2): df_1['_dummy_'], df_2['_dummy_'] = 1, 1 return pd.merge(df_1, df_2, on='_dummy_').drop('_dummy_', axis=1) n_splits = 5 k_list = [1, 2, 5, 10, 20, 50, 100, 200] mode_list = ['user', 'item'] i_fold_list = np.arange(n_splits) df_1 = pd.DataFrame({'k': k_list}) df_2 = pd.DataFrame({'mode': mode_list}) df_3 = pd.DataFrame({'i_fold': i_fold_list}) results_df = reduce(cart_prod, [df_1, df_2, df_3]) results_df.head(10) kf = KFold(n_splits=n_splits, random_state=0, shuffle=True) for (k, mode), group in results_df.groupby(['k', 'mode']): for (index, row), (train_inds, test_inds) in zip(group.iterrows(), kf.split(ratings_df)): print("k={}, mode={}: i_fold= ".format(row['k'], row['mode']), end='') print("{}, ".format(row['i_fold']), end='') train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) t1 = datetime.now() rec = KNNRecommender(mode=row['mode'], k=row['k'], baseline_algo=baseline_algo) rec.fit(train_df) preds = rec.predict(test_df[['user_id', 'item_id']]) mae = mean_absolute_error(preds, test_df['rating']) results_df.loc[index, 'MAE'] = mae dt = (datetime.now() - t1).total_seconds() print("{:5.3f} dt={:.2f} seconds".format(mae, dt)) results_df.loc[index, 'time'] = dt baseline_df = pd.DataFrame({'i_fold': i_fold_list}) for (index, row), (train_inds, test_inds) in zip(baseline_df.iterrows(), kf.split(ratings_df)): print("i_fold={}: MAE=".format(row['i_fold']), end='') train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) baseline_algo.fit(train_df) preds = baseline_algo.predict(test_df[['user_id', 'item_id']]) mae = mean_absolute_error(preds, test_df['rating']) baseline_df.loc[index, 'MAE'] = mae print("{:5.3f}".format(mae)) base_avg = baseline_df['MAE'].mean() base_std = baseline_df['MAE'].std() sns.pointplot(data=results_df, x='k', hue='mode', y='MAE') nk = results_df['k'].nunique() plt.plot([-1, nk], [base_avg, base_avg], label='baseline', color='C2') plt.fill_between([-1, nk], [base_avg - base_std]*2, [base_avg+base_std]*2, color='C2', alpha=0.2) plt.legend() plt.show() ``` ## ALS & SGD ``` class ALSRecommender(): """Recommender based on Alternating Least Squares algorithm. Parameters ---------- k : int, default=5 Number of latent features lmbda : float, default=0.1 Regularization parameter max_epochs : int, default=15 Max number of iterations to run baseline_algo : object Object with fit(X) and """ def __init__(self, k=5, lmbda=0.1, max_epochs=15, baseline_algo=None, error_metric='mae', verbose=True): # Force integer in case it comes in as float self.k = int(np.round(k)) self.lmbda = lmbda self.max_epochs = max_epochs self.baseline_algo = baseline_algo self.error_metric = error_metric self.verbose = verbose self.U = None self.I = None self.initialized = False def _calc_train_error(self, U, I, R, R_selector=None, error_metric='mae'): if R_selector is None: R_selector = (R > 0) R_hat = np.dot(U.T, I) if error_metric == 'mae': error = np.sum(R_selector * np.abs(R_hat - R)) / np.sum(R_selector) else: raise ValueError("{} is an unsupported error metric".format(metric)) return error def _fit_init(self, X): if not isinstance(X, pd.DataFrame): raise ValueError("X must be a DataFrame") X = X.copy() user_col, item_col, rating_col = X.columns[:3] if self.baseline_algo is None: self.train_mean = X[rating_col].mean() else: self.baseline_algo.fit(X) self.R, self.user_map, self.item_map = get_rating_matrix(X) n_users, n_items = self.R.shape self.U = 3 * np.random.rand(self.k, n_users) self.I = 3 * np.random.rand(self.k, n_items) self.I[0, :] = self.R[self.R != 0].mean(axis=0) # Avg. rating for each movie self.E = np.eye(self.k) # (k x k)-dimensional idendity matrix self.epoch = 0 self.train_errors = [] self.initialized = True def fit(self, X, n_epochs=None): """Fit model to training data X. If at least one iteration has already been run, then the model will continue from its most recent state. Parameters ---------- X : pandas DataFrame, shape=(n_ratings, >=3) First 3 columns must correspond to user, item, and rating in that order n_epochs : int, optional Number of iterations to run. If not provided, will run for self.max_epochs Returns ------- self This allows chaining like `ALSRecommender().fit(X_train).predict(X_test)` """ # Allow continuation from previous state if n_epochs is given. Otherwise start from scratch. if n_epochs is None: self.initialized = False if not self.initialized: self._fit_init(X) epoch_0 = self.epoch if n_epochs is None: n_epochs = self.max_epochs - epoch_0 n_users, n_items = self.R.shape # Run n_epochs iterations for i_epoch in range(n_epochs): if self.epoch >= self.max_epochs: print("max_epochs = {}".format(self.max_epochs)) break # Fix I and estimate U for i, Ri in enumerate(self.R): nui = np.count_nonzero(Ri) # Number of items user i has rated if (nui == 0): nui = 1 # Be aware of zero counts! # Get array of nonzero indices in row Ii Ri_nonzero_selector = np.nonzero(Ri)[0] # Select subset of I associated with movies reviewed by user i I_Ri = self.I[:, Ri_nonzero_selector] # Select subset of row R_i associated with movies reviewed by user i Ri_nonzero = self.R[i, Ri_nonzero_selector] Ai = np.dot(I_Ri, I_Ri.T) + self.lmbda * nui * self.E Vi = np.dot(I_Ri, Ri_nonzero.T) self.U[:, i] = np.linalg.solve(Ai, Vi) # Fix U and estimate I for j, Rj in enumerate(self.R.T): nmj = np.count_nonzero(Rj) # Number of users that rated item j if (nmj == 0): nmj = 1 # Be aware of zero counts! # Get array of nonzero indices in row Ij Rj_nonzero_selector = np.nonzero(Rj)[0] # Select subset of P associated with users who reviewed movie j U_Rj = self.U[:, Rj_nonzero_selector] # Select subset of column R_j associated with users who reviewed movie j Rj_nonzero = self.R[Rj_nonzero_selector, j] Aj = np.dot(U_Rj, U_Rj.T) + self.lmbda * nmj * self.E Vj = np.dot(U_Rj, Rj_nonzero) self.I[:, j] = np.linalg.solve(Aj, Vj) error = self._calc_train_error(self.U, self.I, self.R) self.train_errors.append(error) if self.verbose: print("[Epoch {}/{}] train error: {}".format(self.epoch, self.max_epochs, error)) self.epoch += 1 return self def predict(self, X): """Generate predictions for user/item pairs Parameters ---------- X : pandas dataframe, shape = (n_pairs, 2) User, item dataframe Returns ------- rating_pred : 1d numpy array, shape = (n_pairs,) Array of rating predictions for each user/item pair """ if not isinstance(X, pd.DataFrame): raise ValueError("X must be a DataFrame") X = X.copy() user_col, item_col = X.columns[:2] if self.baseline_algo is None: X['rating_baseline'] = self.train_mean else: X['rating_baseline'] = self.baseline_algo.predict(X) X['rating'] = 0 known_user_and_item_mask = ( X[user_col].isin(self.user_map.index) & X[item_col].isin(self.item_map.index) ) X_known, X_unknown = X[known_user_and_item_mask], X[~known_user_and_item_mask] user_inds = X_known[user_col].map(self.user_map) item_inds = X_known[item_col].map(self.item_map) rating_pred = np.array([ np.sum(self.U[:, u_ind] * self.I[:, i_ind]) for u_ind, i_ind in zip(user_inds, item_inds) ]) X.loc[known_user_and_item_mask, 'rating'] = rating_pred X.loc[~known_user_and_item_mask, 'rating'] = self.baseline_algo.predict(X_unknown) min_rating = np.min(self.R[np.nonzero(self.R)]) max_rating = np.max(self.R) X.loc[X['rating'] < min_rating, 'rating'] = min_rating X.loc[X['rating'] > max_rating, 'rating'] = max_rating return X['rating'].values class SGDRecommender(): """Stochastic Gradient Descent recommender. Parameters ---------- k : int, default=5 Number of latent features learning_rate : float, default=0.1 Speed at which to descend down gradient max_epochs : int, default=15 Max number of iterations to run error_metric : string, default='mae' Error metric to use user_reg : float, default=0.0 Regularization parameter for the latent feature weights in U, >=0 item_reg : float, default=0.0 Regularization parameter for the latent feature weights in I, >=0 user_bias_reg : float, default=0.0 Regularization parameter for the b_u terms, >=0 item_bias_reg : float, default=0.0 Regularization parameter for the b_i terms, >=0 damping_factor : float, default=25 Damping factor to be used in the baseline algorithm minibatch_size : int, default=1 Number of user/item pairs to evaluate at a time during training verbose : boolean, default=True If True, print progress. """ def __init__(self, k=5, learning_rate=0.1, max_epochs=15, error_metric='mae', user_reg=0.0, item_reg=0.0, user_bias_reg=0.0, item_bias_reg=0.0, damping_factor=25, minibatch_size=1, verbose=True): self.k = k self.learning_rate = learning_rate self.max_epochs = max_epochs self.error_metric = error_metric self.user_reg = user_reg self.item_reg = item_reg self.user_bias_reg = user_bias_reg self.item_bias_reg = item_bias_reg self.damping_factor = damping_factor self.minibatch_size = minibatch_size self.verbose = verbose self.U = None self.I = None self.initialized = False def _calc_train_error(self, U, I, mu, b_u, b_i, R, R_selector=None): if R_selector is None: R_selector = (R > 0) R_hat = np.dot(U, I.T) + mu + b_u[:, None] + b_i[None, :] if self.error_metric == 'mae': error = np.sum(R_selector * np.abs(R_hat - R)) / np.sum(R_selector) else: raise ValueError("{} is an unsupported error metric".format(metric)) return error def _fit_init(self, X): if not isinstance(X, pd.DataFrame): raise ValueError("X must be a DataFrame") user_col, item_col, rating_col = X.columns[:3] self.baseline_algo = DampedUserMovieBaselineModel(damping_factor=self.damping_factor) self.baseline_algo.fit(X) self.mu = X[rating_col].mean() self.b_u, self.b_i = self.baseline_algo.b_u.values, self.baseline_algo.b_i.values self.R, self.user_map, self.item_map = get_rating_matrix(X) n_users, n_items = self.R.shape self.U = np.random.normal(scale=1.0/self.k, size=(n_users, self.k)) self.I = np.random.normal(scale=1.0/self.k, size=(n_items, self.k)) self.epoch = 0 self.train_errors = [] self.initialized = True def fit(self, X, n_epochs=None): """Fit model to training data X. If at least one iteration has already been run, then the model will continue from its most recent state. Parameters ---------- X : pandas DataFrame, shape=(n_ratings, >=3) First 3 columns must correspond to user, item, and rating in that order n_epochs : int, optional Number of iterations to run. If not provided, will run for self.max_epochs Returns ------- self This allows chaining like `SGDRecommender().fit(X_train).predict(X_test)` """ X = X.copy() # Allow continuation from previous state if n_epochs is given. Otherwise start from scratch. if n_epochs is None: self.initialized = False if not self.initialized: self._fit_init(X) X.iloc[:, 0] = X.iloc[:, 0].map(self.user_map) X.iloc[:, 1] = X.iloc[:, 1].map(self.item_map) epoch_0 = self.epoch if n_epochs is None: n_epochs = self.max_epochs - epoch_0 n_users, n_items = self.R.shape # Repeat until convergence for i_epoch in range(n_epochs): if self.epoch >= self.max_epochs: print("max_epochs = {}".format(self.max_epochs)) break # Shuffle X X = X.sample(frac=1) if self.minibatch_size == 1: for row in X.itertuples(): index, user, item, rating = row[:4] pred = self.predict_1_train(user, item) err = pred - self.R[user, item] self.b_u[user] -= self.learning_rate * (err + self.user_bias_reg * self.b_u[user]) self.b_i[item] -= self.learning_rate * (err + self.item_bias_reg * self.b_i[item]) self.U[user, :] -= self.learning_rate * ( err * self.I[item, :] + self.user_reg * self.U[user, :] ) self.I[item, :] -= self.learning_rate * ( err * self.U[user, :] + self.item_reg * self.I[item, :] ) else: raise ValueError("Minibatch size greater than 1 not supported yet.") error = self._calc_train_error(self.U, self.I, self.mu, self.b_u, self.b_i, self.R) self.train_errors.append(error) if self.verbose: print("[Epoch {}/{}] train error: {}".format(self.epoch, self.max_epochs, error)) self.epoch += 1 return self def predict_1_train(self, user, item): pred = self.mu + self.b_u[user] + self.b_i[item] pred += np.dot(self.U[user, :], self.I[item, :]) return pred def predict(self, X): """Generate predictions for user/item pairs Parameters ---------- X : pandas dataframe, shape = (n_pairs, 2) User, item dataframe Returns ------- rating_pred : 1d numpy array, shape = (n_pairs,) Array of rating predictions for each user/item pair """ if not isinstance(X, pd.DataFrame): raise ValueError("X must be a DataFrame") X = X.copy() user_col, item_col = X.columns[:2] known_user_and_item_mask = ( X[user_col].isin(self.user_map.index) & X[item_col].isin(self.item_map.index) ) X_known, X_unknown = X[known_user_and_item_mask], X[~known_user_and_item_mask] user_inds = X_known[user_col].map(self.user_map) item_inds = X_known[item_col].map(self.item_map) rating_pred = np.array([ self.predict_1_train(u_ind, i_ind) for u_ind, i_ind in zip(user_inds, item_inds) ]) X.loc[known_user_and_item_mask, 'rating'] = rating_pred X.loc[~known_user_and_item_mask, 'rating'] = self.baseline_algo.predict(X_unknown) return X['rating'].values n_splits = 3 skf = StratifiedKFold(n_splits=n_splits, random_state=0) splits = [ (train_inds, test_inds) for train_inds, test_inds in skf.split(ratings_df, ratings_df['user_id']) ] for i_fold, (train_inds, test_inds) in enumerate(splits): train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] train_movie_counts = train_df.groupby('user_id').item_id.count() test_movie_counts = test_df.groupby('user_id').item_id.count() print("Fold {}:".format(i_fold)) train_min, train_max = train_movie_counts.min(), train_movie_counts.max() test_min, test_max = test_movie_counts.min(), test_movie_counts.max() print(" Train: between {} and {} movies per user".format(train_min, train_max)) print(" Test: between {} and {} movies per user".format(test_min, test_max)) def cart_prod(df_1, df_2): df_1['_dummy_'], df_2['_dummy_'] = 1, 1 return pd.merge(df_1, df_2, on='_dummy_').drop('_dummy_', axis=1) def prep_results_df(lists_dict): df = pd.DataFrame({'_dummy_': [1]}) for name, list in lists_dict.items(): df = cart_prod(df, pd.DataFrame({name: list})) return df ``` Evaluation functions ``` def dcg(top_k_matrix): """Compute discounted cumulative gain (DCG) for each row (user) in matrix. This measures how good the k recommendations for each user are, with decreasing weight placed on items farther down the list. DCG needs to be normalized before comparing between users (see normalized discounted cumulative gain, or NDCG). Links: https://link.springer.com/article/10.1007/s11704-015-4584-1 https://gist.github.com/bwhite/3726239 https://opensourceconnections.com/blog/2018/02/26/ndcg-scorer-in-quepid #cg-dcg-idcg-and-ndcg Parameters ---------- top_k_matrix : 2d numpy array, shape = (n_users, k) Each row should have the top k ratings for each user from a rating matrix in descending order. Returns ------- 1d numpy array, shape=(n_users,) Array of DCG values for each user """ return np.sum( top_k_matrix / np.log2(np.arange(2, top_k_matrix.shape[1]+2))[None, :], axis=1 ) def ndcg(pred_k_matrix, actual_k_matrix): """Calculate normalized discounted cumulative gain (NDCG) for each user (each row). This is simply the DCG divided by the maximum possible DCG for each user. NDCG ranges from 0 to 1, where 1 means movies were chosen that actually received the highest k ratings. Parameters ---------- pred_k_matrix : 2d numpy array, shape = (n_users, k) A matrix of the *actual* ratings of the k movies chosen by the recommender system for each user actual_k_matrix : 2d numpy array, shape = (n_users, k) A matrix of the *actual* ratings of the k movies from the test set which the user gave the highest ratings to. Returns ------- ndcg_array : 1d numpy array, shape = (n_users,) Array of NDCG values for each user """ max_dcg_array = dcg(actual_k_matrix) dcg_array = dcg(pred_k_matrix) return dcg_array / max_dcg_array def ndcg_from_df(df, pred, k): """Calculate NDCG for each user in the passed dataframe given predicted scores and a number of movies to recommend Parameters ---------- df : pandas dataframe, shape = (n_ratings, >=3) User, item, rating dataframe. All columns after first 3 are ignored pred : 1d array-like, shape = (n_ratings,) List/array/series of predicted ratings for each user/item pair in df k : int Number of movies per user to recommend Returns ------- user_map : pandas series, shape = (n_users,) Index = original user ids, value = mapped integer corresponding to position in ndcg_array for that user ndcg_array : 1d numpy array, shape = (n_users) Array of NDCG scores in range (0, 1] """ df = df.iloc[:, :3].copy() df.columns = ['user', 'item', 'rating'] df['pred'] = pred pred_matrix, user_map, item_map = get_rating_matrix(df[['user', 'item', 'pred']]) n_items = len(item_map) inds = pred_matrix.argsort(axis=1)[:, :n_items-1-k:-1] del pred_matrix gc.collect() actual_matrix, _, _ = get_rating_matrix(df[['user', 'item', 'rating']]) pred_k_matrix = actual_matrix[np.arange(len(actual_matrix))[:, None], inds] inds = actual_matrix.argsort(axis=1)[:, :n_items-1-k:-1] actual_k_matrix = actual_matrix[np.arange(len(actual_matrix))[:, None], inds] ndcg_array = ndcg(pred_k_matrix, actual_k_matrix) return user_map, ndcg_array ``` ### Choose the best user-based model *Let's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the "best" user-based model* ``` lists_dict = { 'i_fold': np.arange(n_splits), 'k': [1, 2, 5, 10, 20, 50, 100], } k_recs = 3 baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) user_results_df = prep_results_df(lists_dict) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] for c in cols: user_results_df[c] = np.nan for i_fold, (train_inds, test_inds) in enumerate(splits): train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] user_results_i = user_results_df[user_results_df['i_fold']==i_fold] for index, row in user_results_i[['i_fold', 'k']].iterrows(): t1 = datetime.now() model = KNNRecommender(mode='user', k=row['k'], baseline_algo=baseline_algo) preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']]) dt = (datetime.now() - t1).total_seconds() test_err = mean_absolute_error(test_df['rating'], preds) user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs) ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array) print("k={}, i_fold={}: MAE={}, NDCG={}".format(row['k'], row['i_fold'], test_err, ndcg_mean)) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] user_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True) sns.pointplot(data=user_results_df, x='k', y='test_err', ax=ax0) ax0.set_ylabel('MAE') ax0.set_xlabel('') sns.pointplot(data=user_results_df, x='k', y='ndcg_mean', ax=ax1) ax1.set_ylabel('NDCG@{}'.format(k_recs)) fig.subplots_adjust(hspace=0.1) plt.show() ``` NDCG@3 peaks at $k=50$, and MAE is pretty similar between $k=20$ to $100$, so $k=50$ is the winner. ``` baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) best_user_model = KNNRecommender(mode='user', k=50, baseline_algo=baseline_algo) ``` ### Choose the best item-based model *Let's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the "best" item-based model.* ``` lists_dict = { 'i_fold': np.arange(n_splits), 'k': [1, 2, 5, 10, 20, 50, 100], } k_recs = 3 baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) item_results_df = prep_results_df(lists_dict) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] for c in cols: item_results_df[c] = np.nan for i_fold, (train_inds, test_inds) in enumerate(splits): train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] item_results_i = item_results_df[item_results_df['i_fold']==i_fold] print("i_fold={}: ".format(i_fold), end='') for index, row in item_results_i[['i_fold', 'k']].iterrows(): t1 = datetime.now() model = KNNRecommender(mode='item', k=row['k'], baseline_algo=baseline_algo) preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']]) dt = (datetime.now() - t1).total_seconds() test_err = mean_absolute_error(test_df['rating'], preds) user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs) ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array) print("k={}, ".format(row['k']), end='') cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] item_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt print() fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True) sns.pointplot(data=item_results_df, x='k', y='test_err', ax=ax0) ax0.set_ylabel('MAE') ax0.set_xlabel('') sns.pointplot(data=item_results_df, x='k', y='ndcg_mean', ax=ax1) ax1.set_ylabel('NDCG@{}'.format(k_recs)) fig.subplots_adjust(hspace=0.1) plt.show() ``` Here, $k=10$ and $k=20$ have similar MAE and NDCG@3, we'll favor higher $k$ in nearest neigbor methods because higher $k$ is less prone to overfitting. $k=20$ is the winner of the item-based models. ``` baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) best_item_model = KNNRecommender(mode='item', k=20, baseline_algo=baseline_algo) ``` ### Choose the best ALS model Let's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the "best" ALS model. ``` max_epochs = 15 lists_dict = { 'i_fold': np.arange(n_splits), 'i_epoch': np.arange(max_epochs), 'k': [5, 10, 50], } k_recs = 3 baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) als_epoch_results_df = prep_results_df(lists_dict) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] for c in cols: als_epoch_results_df[c] = np.nan for i_fold, (train_inds, test_inds) in enumerate(splits): train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] als_epoch_results_i = als_epoch_results_df[als_epoch_results_df['i_fold']==i_fold] for k, group in als_epoch_results_i.groupby('k'): model = ALSRecommender(k=k, lmbda=0.1, max_epochs=max_epochs, baseline_algo=baseline_algo, verbose=False) print('i_fold={}, k={}: i_epoch='.format(i_fold, k), end='') for index, row in group[['i_fold', 'i_epoch']].iterrows(): t1 = datetime.now() preds = model.fit(train_df, n_epochs=1).predict(test_df[['user_id', 'item_id']]) dt = (datetime.now() - t1).total_seconds() test_err = mean_absolute_error(test_df['rating'], preds) user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs) ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array) print('{}, '.format(row['i_epoch']), end='') cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] als_epoch_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt print() fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True) sns.pointplot(data=als_epoch_results_df, x='i_epoch', y='test_err', hue='k', ax=ax0) ax0.set_ylabel('MAE') ax0.set_xlabel('') ax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title='k =') sns.pointplot(data=als_epoch_results_df, x='i_epoch', y='ndcg_mean', hue='k', ax=ax1) ax1.set_ylabel('NDCG@{}'.format(k_recs)) ax1.set_xlabel('Epoch') ax1.legend_.remove() fig.subplots_adjust(hspace=0.1) plt.show() max_epochs = 15 lists_dict = { 'i_fold': np.arange(n_splits), 'k': [20, 50, 100, 200], 'lmbda': [0.05, 0.1, 0.2] } k_recs = 3 baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) als_results_df = prep_results_df(lists_dict) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] for c in cols: als_results_df[c] = np.nan for i_fold, (train_inds, test_inds) in enumerate(splits): train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] als_results_i = als_results_df[als_results_df['i_fold']==i_fold] for index, row in als_results_i[['k', 'lmbda']].iterrows(): model = ALSRecommender(k=row['k'], lmbda=row['lmbda'], max_epochs=max_epochs, baseline_algo=baseline_algo, verbose=False) print('i_fold={}, k={}: lmbda={}'.format(i_fold, row['k'], row['lmbda'])) t1 = datetime.now() preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']]) dt = (datetime.now() - t1).total_seconds() test_err = mean_absolute_error(test_df['rating'], preds) user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs) ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] als_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True) sns.pointplot(data=als_results_df, x='k', y='test_err', hue='lmbda', ax=ax0) ax0.set_ylabel('MAE') ax0.set_xlabel('') ax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title=r'$\lambda =$') sns.pointplot(data=als_results_df, x='k', y='ndcg_mean', hue='lmbda', ax=ax1) ax1.set_ylabel('NDCG@{}'.format(k_recs)) ax1.legend_.remove() fig.subplots_adjust(hspace=0.1) plt.show() ``` Here, it looks like MAE is pretty flat with respect to the learning rate $\lambda$, but NDCG@3 shows some interesting variations. The highest NDCG@3 comes from $\lambda=0.1$ and $k>=50$. With matrix factorization methods like ALS, we want to favor lower $k$ for better generalizability, so $\lambda=0.1$ and $k=50$ is the winner of the ALS category. ``` baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) best_als_model = ALSRecommender(k=50, lmbda=0.1, max_epochs=30, baseline_algo=baseline_algo) ``` ### Choose the best SGD model *Let's use cross-validation to examine MAE and NDCG@3 on out-of-sample data and choose the "best" SGD model.* ``` max_epochs = 15 lists_dict = { 'i_fold': np.arange(n_splits), 'i_epoch': np.arange(max_epochs), 'k': [5, 10, 50], } k_recs = 3 baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) sgd_epoch_results_df = prep_results_df(lists_dict) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] for c in cols: sgd_epoch_results_df[c] = np.nan for i_fold, (train_inds, test_inds) in enumerate(splits): train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] sgd_epoch_results_i = sgd_epoch_results_df[sgd_epoch_results_df['i_fold']==i_fold] for k, group in sgd_epoch_results_i.groupby('k'): model = SGDRecommender(k=k, learning_rate=0.01, max_epochs=max_epochs, damping_factor=10, verbose=False) print('i_fold={}, k={}: i_epoch='.format(i_fold, k), end='') for index, row in group[['i_fold', 'i_epoch']].iterrows(): t1 = datetime.now() preds = model.fit(train_df, n_epochs=1).predict(test_df[['user_id', 'item_id']]) dt = (datetime.now() - t1).total_seconds() test_err = mean_absolute_error(test_df['rating'], preds) user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs) ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array) print('{}, '.format(row['i_epoch']), end='') cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] sgd_epoch_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt print() fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True) sns.pointplot(data=sgd_epoch_results_df, x='i_epoch', y='test_err', hue='k', ax=ax0) ax0.set_ylabel('MAE') ax0.set_xlabel('') ax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title='k =') sns.pointplot(data=sgd_epoch_results_df, x='i_epoch', y='ndcg_mean', hue='k', ax=ax1) ax1.set_ylabel('NDCG@{}'.format(k_recs)) ax1.set_xlabel('Epoch') ax1.legend_.remove() fig.subplots_adjust(hspace=0.1) plt.show() lists_dict = { 'i_fold': np.arange(n_splits), 'learning_rate': [0.001, 0.01], 'reg': [0.0, 0.001, 0.01], } k_recs = 3 k = 50 max_epochs = 30 sgd_results_df = prep_results_df(lists_dict) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] for c in cols: sgd_results_df[c] = np.nan for i_fold, (train_inds, test_inds) in enumerate(splits): train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] sgd_results_i = sgd_results_df[sgd_results_df['i_fold']==i_fold] for index, row in sgd_results_i[['learning_rate', 'reg']].iterrows(): learning_rate, reg = row['learning_rate'], row['reg'] model = SGDRecommender(k=k, learning_rate=learning_rate, max_epochs=max_epochs, damping_factor=10, verbose=False, user_reg=reg, item_reg=reg, user_bias_reg=reg, item_bias_reg=reg) print('i_fold={}, learning_rate={}, reg={}'.format(i_fold, learning_rate, reg)) t1 = datetime.now() preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']]) dt = (datetime.now() - t1).total_seconds() test_err = mean_absolute_error(test_df['rating'], preds) user_map, ndcg_array = ndcg_from_df(test_df, preds, k=k_recs) ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array) cols = ['test_err', 'ndcg_mean', 'ndcg_std', 'dt'] sgd_results_df.loc[index, cols] = test_err, ndcg_mean, ndcg_std, dt fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True) sns.pointplot(data=sgd_results_df, x='reg', y='test_err', hue='learning_rate', ax=ax0) ax0.set_ylabel('MAE') ax0.set_xlabel('') ax0.legend(loc='upper left', bbox_to_anchor=(1.02, 1.0), title='Learning Rate') sns.pointplot(data=sgd_results_df, x='reg', y='ndcg_mean', hue='learning_rate', ax=ax1) ax1.set_ylabel('NDCG@{}'.format(k_recs)) ax1.set_xlabel('Regularization Parameter') ax1.legend_.remove() fig.subplots_adjust(hspace=0.1) plt.show() reg = 0.01 best_sgd_model = SGDRecommender(k=50, learning_rate=0.01, max_epochs=30, damping_factor=10, user_reg=reg, item_reg=reg, user_bias_reg=reg, item_bias_reg=reg) ``` ### Compare the top methods of each category ``` final_models = [best_user_model, best_item_model, best_als_model, best_sgd_model] final_model_names = ['user', 'item', 'als', 'sgd'] final_results = pd.DataFrame(columns=['model', 'i_fold', 'test_err', 'ndcg_mean', 'ndcg_std', 'dt']) for model, name in zip(final_models, final_model_names): for i_fold, (train_inds, test_inds) in enumerate(splits): print("i_fold={}, model={}".format(i_fold, name)) train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] if name in ['als', 'sgd']: model.verbose = False t1 = datetime.now() preds = model.fit(train_df).predict(test_df[['user_id', 'item_id']]) dt = (datetime.now() - t1).total_seconds() test_err = mean_absolute_error(test_df['rating'], preds) user_map, ndcg_array = ndcg_from_df(test_df, preds, k=3) ndcg_mean, ndcg_std = np.mean(ndcg_array), np.std(ndcg_array) final_results.loc[len(final_results), :] = name, i_fold, test_err, ndcg_mean, ndcg_std, dt fig, (ax0, ax1, ax2) = plt.subplots(3, 1, sharex=True) sns.stripplot(data=final_results, x='model', y='test_err', ax=ax0, jitter=True) sns.stripplot(data=final_results, x='model', y='ndcg_mean', ax=ax1, jitter=True) sns.stripplot(data=final_results, x='model', y='dt', ax=ax2, jitter=True) ax0.set_ylabel('MAE') ax0.set_xlabel('') ax1.set_ylabel('NDCG@3') ax1.set_xlabel('') ax2.set_ylabel(r'time [$s$]') ax2.set(yscale='log') ax2.set_yticks([1, 10, 100]) plt.setp(ax0.collections, sizes=[50]) plt.setp(ax1.collections, sizes=[50]) plt.setp(ax2.collections, sizes=[50]) plt.show() ``` ### Fetching posters ``` n_splits = 3 skf = StratifiedKFold(n_splits=n_splits, random_state=0) splits = [ (train_inds, test_inds) for train_inds, test_inds in skf.split(ratings_df, ratings_df['user_id']) ] baseline_algo = DampedUserMovieBaselineModel(damping_factor=10) reg = 0.0 models_dict = { 'user': KNNRecommender(mode='user', k=50, baseline_algo=baseline_algo), 'item': KNNRecommender(mode='item', k=20, baseline_algo=baseline_algo), 'als': ALSRecommender(k=50, lmbda=0.1, max_epochs=15, baseline_algo=baseline_algo, verbose=False), 'sgd': SGDRecommender(k=50, learning_rate=0.01, max_epochs=30, damping_factor=10, user_reg=reg, item_reg=reg, user_bias_reg=reg, item_bias_reg=reg, verbose=False) } def get_poster_url(movieId, base_url, links_df, api_key): movieId = str(int(movieId)) # Get IMDB movie ID tmdbId = links_df.loc[movieId, 'tmdbId'] # Query themoviedb.org API for movie poster path. movie_url = 'http://api.themoviedb.org/3/movie/{:}/images'.format(tmdbId) headers = {'Accept': 'application/json'} payload = {'api_key': api_key} response = requests.get(movie_url, params=payload, headers=headers) file_path = json.loads(response.text)['posters'][0]['file_path'] return base_url + file_path def display_posters(movieIds, base_url, links_df, api_key): poster_urls = [get_poster_url(movieId, base_url, links_df, api_key) for movieId in movieIds] TABLE = "<table style='width: 100%; align: center;'><tr>{}</tr></table>" CELL = "<td align='center'><img style='float: left; width: 120px' src={}></td>" table = TABLE.format(''.join([CELL.format(url) for url in poster_urls])) display(HTML(table)) def recommend(model, train_df, user, pretrained=False, k=3): train_df = train_df.iloc[:, :3].copy() train_df.columns = ['user', 'item', 'rating'] if not pretrained: model.fit(train_df) seen_movies = train_df[train_df['user'] == user]['item'].unique() unseen_movies = list(set(train_df['item'].unique()) - set(seen_movies)) user_movie_df = pd.DataFrame({'user': [user]*len(unseen_movies), 'item': unseen_movies}) user_movie_df = user_movie_df[['user', 'item']] user_movie_df['pred'] = model.predict(user_movie_df) user_movie_df = user_movie_df.sort_values('pred', ascending=False) movies, preds = user_movie_df[['item', 'pred']].values[:k, :].T return movies, preds ``` Movies this user likes ``` user = 10 train_inds, test_inds = splits[0] train_df, test_df = ratings_df.iloc[train_inds], ratings_df.iloc[test_inds] favorite_movies = ( train_df[train_df['user_id']==user] .sort_values('rating', ascending=False) .iloc[:5, 1] .values ) display_posters(favorite_movies, base_url, links_df, api_key) ``` Recommended movies ``` model = models_dict['als'] movies, preds = recommend(model, train_df, user, pretrained=True, k=5) display_posters(movies, base_url, links_df, api_key) ``` ## Non-Negative Matrix Factorization (NMF, scikit-learn package) Find two non-negative matrices (W, H) whose product approximates the non-negative matrix R. NOTE: since the values of matrix R MUST be all positive, we CAN'T do mean-centering normalization here (although this would improve the accuracy). ``` from sklearn.decomposition import NMF nmf_model = NMF(n_components=20) # starts with 20 latents factors # Matrix factorization # V ~ W.H (Find two non-negative matrices (W, H) whose product approximates the non- negative matrix X. ) nmf_model.fit(rating_matrix) # R can be array-like or sparse, here it is array-like (dense) Theta = nmf_model.transform(rating_matrix) # user latent factors (= W, called the features matrix) M = nmf_model.components_.T # item latent factors (= H.T) (H is called the coefficient matrix) # Making the predictions R_pred = M.dot(Theta.T) # See http://stackoverflow.com/questions/24739121/nonnegative-matrix-factorization-in-sklearn R_pred = R_pred.T # same dimensions as R print('Item features - M:', M.shape) print('User features - Theta:', Theta.shape) print() print('R ~ M * Theta.T:') print(R_pred.round(2)) print(R_pred.shape) ``` Estimating the error (RMSE) before tuning the hyperparameters ``` from sklearn.metrics import mean_squared_error def get_rmse(pred, actual): pred = pred[actual.nonzero()].flatten() # Ignore nonzero terms actual = actual[actual.nonzero()].flatten() # Ignore nonzero terms return np.sqrt(mean_squared_error(pred, actual)) get_rmse(R_pred, rating_matrix) ``` When the predictive model is satisfying, save it to a file ``` import pickle with open('nnmf_sklearn.pickle', 'wb') as f: pickle.dump(nmf_model, f) ``` Item recommendation for an active user (given its rating history) ``` def make_recommendation_activeuser(R, prediction, user_idx, k=5): ''' user_idx ...... select an active user k ............ number of movies to recommend ''' rated_items_df_user = pd.DataFrame(R).iloc[user_idx, :] # get the list of actual ratings of user_idx (seen movies) user_prediction_df_user = pd.DataFrame(prediction).iloc[user_idx,:] # get the list of predicted ratings of user_idx (unseen movies) reco_df = pd.concat([rated_items_df_user, user_prediction_df_user, item_info], axis=1) # merge both lists with the movie's title reco_df.columns = ['rating','prediction','title'] print('Preferred movies for user #', user_idx) print(reco_df.sort_values(by='rating', ascending=False)[:k]) # returns the 5 seen movies with the best actual ratings print('Recommended movies for user #', user_idx) reco_df = reco_df[ reco_df['rating'] == 0 ] print(reco_df.sort_values(by='prediction', ascending=False)[:k]) # returns the 5 unseen movies with the best predicted ratings print() print() make_recommendation_activeuser(rating_matrix, R_pred, user_idx=50, k=5) make_recommendation_activeuser(rating_matrix, R_pred, user_idx=130, k=5) ``` Item recommendation for a new user (wih rating history) ``` # creating a new user profile: my_ratings = np.zeros((1682,1), dtype=int) my_ratings[0] = 4 my_ratings[1] = 4 my_ratings[10] = 1 my_ratings[15] = 3 my_ratings[27] = 4 my_ratings[34] = 1 my_ratings[49] = 1 my_ratings[55] = 1 my_ratings[61] = 1 my_ratings[68] = 5 my_ratings[70] = 4 my_ratings[81] = 4 my_ratings[87] = 2 my_ratings[94] = 4 my_ratings[120] = 2 my_ratings[171] = 1 my_ratings[173] = 4 my_ratings[175] = 1 my_ratings[182] = 1 my_ratings[194] = 2 my_ratings[203] = 5 my_ratings[209] = 5 my_ratings[221] = 1 my_ratings[234] = 2 my_ratings[312] = 3 my_ratings[317] = 3 my_ratings[322] = 3 my_ratings[342] = 1 my_ratings[378] = 1 my_ratings[379] = 1 my_ratings[392] = 3 my_ratings[404] = 2 my_ratings[422] = 4 my_ratings[542] = 4 for i in range(len(my_ratings)): print(i, my_ratings[i], item_info.iloc[i]['title']) # Adding a new user to the R matrix newR = np.concatenate((rating_matrix, my_ratings.T)) # Recompute the Matrix factorization newTheta = estimator.fit_transform(newR) newX = estimator.components_.T # Making the predictions newR_pred = newX.dot(newTheta.T).T # clipping values newR_pred[newR_pred > R.max()] = R.max() # clips ratings above 5 newR_pred[newR_pred < R.min() + 1] = R.min() +1 # clips ratings below 1 # Making the recommendation make_recommendation_activeuser(newR, newR_pred, user_idx=newR.shape[0]-1, k=10) ``` Item recommendation for a new user (wihout rating history) ``` def make_recommendation_newuser(item_sim, item_idx, k=5): ''' item_idx ...... select an item k ............ number of movies to recommend ''' reco_item_df = pd.DataFrame(item_sim).iloc[item_idx, :] reco_item_df = pd.concat([reco_item_df, item_info], axis=1) # merge list with the movie's title reco_item_df.columns = ['similarity','title'] reco_item_df = reco_item_df.sort_values(by='similarity',ascending=False) print('Recommended movies for a new user (without rating history), currently looking at movie:', reco_item_df.iloc[0]['title']) print(reco_item_df[1:k+1]) # returns the 5 movies the most similar to item_idx print() from sklearn.metrics.pairwise import cosine_similarity item_sim = cosine_similarity(M) # Use item features matrix to compute movie-to-movie similarity matrices make_recommendation_newuser(item_sim, item_idx=1, k=5) make_recommendation_newuser(item_sim, item_idx=20, k=5) make_recommendation_newuser(item_sim, item_idx=500, k=5) ``` ## Hybrid (using processed dataset) 1. Run Content based filtering and determine the movies which we want to recommend to the user. 2. Filter and sort the recommendations of CF using SVD predicted ratings. Setup ``` !git clone https://github.com/vivdalal/movie-recommender-system.git from math import sqrt import pandas as pd import numpy as np import seaborn as sns from matplotlib import pyplot as plt from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel # Reading ratings file hratings = pd.read_csv('movie-recommender-system/ratings.csv', sep=',', encoding='latin-1', usecols=['userId','movieId','rating','timestamp']) display(ratings.head()) # Reading movies file hmovies = pd.read_csv('movie-recommender-system/movies.csv', sep=',', encoding='latin-1', usecols=['movieId','title','genres']) display(movies.head()) ``` Content-based model ``` tfihmovies_genres = TfidfVectorizer(token_pattern = '[a-zA-Z0-9\-]+') hmovies['genres'] = hmovies['genres'].replace(to_replace="(no genres listed)", value="") tfihmovies_genres_matrix = tfihmovies_genres.fit_transform(hmovies['genres']) cosine_sim_movies = linear_kernel(tfihmovies_genres_matrix, tfihmovies_genres_matrix) def get_recommendations_based_on_genres(movie_title, cosine_sim_movies=cosine_sim_movies): """ Calculates top 2 movies to recommend based on given movie titles genres. :param movie_title: title of movie to be taken for base of recommendation :param cosine_sim_movies: cosine similarity between movies :return: Titles of movies recommended to user """ # Get the index of the movie that matches the title idx_movie = hmovies.loc[hmovies['title'].isin([movie_title])] idx_movie = idx_movie.index # Get the pairwsie similarity scores of all movies with that movie sim_scores_movies = list(enumerate(cosine_sim_movies[idx_movie][0])) # Sort the movies based on the similarity scores sim_scores_movies = sorted(sim_scores_movies, key=lambda x: x[1], reverse=True) # Get the scores of the 10 most similar movies sim_scores_movies = sim_scores_movies[1:3] # Get the movie indices movie_indices = [i[0] for i in sim_scores_movies] # Return the top 2 most similar movies return hmovies['title'].iloc[movie_indices] def get_recommendation_content_model(userId): """ Calculates top movies to be recommended to user based on movie user has watched. :param userId: userid of user :return: Titles of movies recommended to user """ recommended_movie_list = [] movie_list = [] df_rating_filtered = hratings[hratings["userId"]== userId] for key, row in df_rating_filtered.iterrows(): movie_list.append((hmovies["title"][row["movieId"]==hmovies["movieId"]]).values) for index, movie in enumerate(movie_list): for key, movie_recommended in get_recommendations_based_on_genres(movie[0]).iteritems(): recommended_movie_list.append(movie_recommended) # removing already watched movie from recommended list for movie_title in recommended_movie_list: if movie_title in movie_list: recommended_movie_list.remove(movie_title) return set(recommended_movie_list) list(get_recommendation_content_model(1))[:10] ``` SVD Collaborative model ``` # !pip install -q surprise from surprise import Reader, Dataset, SVD from surprise.model_selection import cross_validate reader = Reader() data = Dataset.load_from_df(ratings_df[['user_id', 'item_id', 'rating']], reader) svd = SVD() cross_validate(svd, data, measures=['RMSE', 'MAE'], cv=5, verbose=True) trainset = data.build_full_trainset() svd.fit(trainset) ``` Hybrid model ``` def hybrid_content_svd_model(userId): recommended_movies_by_content_model = get_recommendation_content_model(userId) recommended_movies_by_content_model = hmovies[hmovies.apply(lambda movie: movie["title"] in recommended_movies_by_content_model, axis=1)] for key, columns in recommended_movies_by_content_model.iterrows(): predict = svd.predict(userId, columns["movieId"]) recommended_movies_by_content_model.loc[key, "svd_rating"] = predict.est # if(predict.est < 2): # recommended_movies_by_content_model = recommended_movies_by_content_model.drop([key]) return recommended_movies_by_content_model.sort_values("svd_rating", ascending=False).iloc[0:11] user_id = 50 hybrid_content_svd_model(user_id) ``` ## LightFM - BPR & WARP ``` # !pip install -q lightfm import numpy as np from lightfm.datasets import fetch_movielens from lightfm import LightFM from lightfm.evaluation import precision_at_k from lightfm.evaluation import auc_score movielens = fetch_movielens() for key, value in movielens.items(): print(key, type(value), value.shape) lfm_train = movielens['train'] lfm_test = movielens['test'] model = LightFM(learning_rate=0.05, loss='bpr') model.fit(lfm_train, epochs=10) train_precision = precision_at_k(model, lfm_train, k=10).mean() test_precision = precision_at_k(model, lfm_test, k=10, train_interactions=lfm_train).mean() train_auc = auc_score(model, lfm_train).mean() test_auc = auc_score(model, lfm_test, train_interactions=lfm_train).mean() print('Precision: train %.2f, test %.2f.' % (train_precision, test_precision)) print('AUC: train %.2f, test %.2f.' % (train_auc, test_auc)) ``` BPR optimises for ROC. The WARP model, on the other hand, optimises for precision@k---we should expect its performance to be better on precision. ``` model = LightFM(learning_rate=0.05, loss='warp') model.fit_partial(lfm_train, epochs=10) train_precision = precision_at_k(model, lfm_train, k=10).mean() test_precision = precision_at_k(model, lfm_test, k=10, train_interactions=lfm_train).mean() train_auc = auc_score(model, lfm_train).mean() test_auc = auc_score(model, lfm_test, train_interactions=lfm_train).mean() print('Precision: train %.2f, test %.2f.' % (train_precision, test_precision)) print('AUC: train %.2f, test %.2f.' % (train_auc, test_auc)) ``` ## Microsoft Library - FastAI-Collab ``` %cd /content/4CED0278/4CED0278 import os import sys import time import itertools import numpy as np import pandas as pd import torch, fastai from fastai.collab import EmbeddingDotBias, collab_learner, CollabDataBunch, load_learner from reco_utils.dataset.python_splitters import python_stratified_split from reco_utils.recommender.fastai.fastai_utils import cartesian_product, score from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k from reco_utils.evaluation.python_evaluation import rmse, mae, rsquared, exp_var USER, ITEM, RATING, TIMESTAMP, PREDICTION, TITLE = 'UserId', 'MovieId', 'Rating', 'Timestamp', 'Prediction', 'Title' # top k items to recommend TOP_K = 10 # select movieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # model parameters N_FACTORS = 40 EPOCHS = 5 ratings = pd.read_csv('./data/ml-100k/ratings.csv') ratings.head() # split the dataset train_valid_df, test_df = python_stratified_split(ratings, ratio=0.75, min_rating=5, filter_by="item", col_user=USER, col_item=ITEM ) data = CollabDataBunch.from_df(train_valid_df, user_name=USER, item_name=ITEM, rating_name=RATING, valid_pct=0) data.show_batch() ``` Now we will create a `collab_learner` for the data, which by default uses the `EmbeddingDotBias` model. We will be using 40 latent factors. This will create an embedding for the users and the items that will map each of these to 40 floats as can be seen below. Note that the embedding parameters are not predefined, but are learned by the model. Although ratings can only range from 1-5, we are setting the range of possible ratings to a range from 0 to 5.5 -- that will allow the model to predict values around 1 and 5, which improves accuracy. Lastly, we set a value for weight-decay for regularization. ``` learn = collab_learner(data, n_factors=N_FACTORS, y_range=[0,5.5], wd=1e-1) learn.model ``` Now train the model for 5 epochs setting the maximal learning rate. The learner will reduce the learning rate with each epoch using cosine annealing ``` learn.fit_one_cycle(EPOCHS, max_lr=5e-3) # save the learner learn.export('movielens_model.pkl') ``` Evaluation ``` # load the learner learner = load_learner(path=".", file='movielens_model.pkl') # get all users and items that the model knows total_users, total_items = learner.data.train_ds.x.classes.values() total_items = total_items[1:] total_users = total_users[1:] # get all users from the test set and remove any users that were now in the training set test_users = test_df[USER].unique() test_users = np.intersect1d(test_users, total_users) # build the cartesian product of test set users and all items known to the model users_items = cartesian_product(np.array(test_users),np.array(total_items)) users_items = pd.DataFrame(users_items, columns=[USER,ITEM]) # remove the user/items combinations that are in the training set # we don't want to propose a movie that the user has already watched training_removed = pd.merge(users_items, train_valid_df.astype(str), on=[USER, ITEM], how='left') training_removed = training_removed[training_removed[RATING].isna()][[USER, ITEM]] # score the model to find the top K recommendation top_k_scores = score(learner, test_df=training_removed, user_col=USER, item_col=ITEM, prediction_col=PREDICTION) # MAP eval_map = map_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION, relevancy_method="top_k", k=TOP_K) # NDCG eval_ndcg = ndcg_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION, relevancy_method="top_k", k=TOP_K) # Precision eval_precision = precision_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION, relevancy_method="top_k", k=TOP_K) # Recall eval_recall = recall_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION, relevancy_method="top_k", k=TOP_K) print("Model:\t" + learn.__class__.__name__, "Top K:\t%d" % TOP_K, "MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') # calculate scores for test user-item pairs scores = score(learner, test_df=test_df.copy(), user_col=USER, item_col=ITEM, prediction_col=PREDICTION) # calculate some regression metrics eval_r2 = rsquared(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION) eval_rmse = rmse(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION) eval_mae = mae(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION) eval_exp_var = exp_var(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION) print("Model:\t" + learn.__class__.__name__, "RMSE:\t%f" % eval_rmse, "MAE:\t%f" % eval_mae, "Explained variance:\t%f" % eval_exp_var, "R squared:\t%f" % eval_r2, sep='\n') ```
github_jupyter
# "Covid-19, आपका समुदाय और आप - एक डेटा विज्ञान परिप्रेक्ष्य" > "लिखित: 09 मार्च 2020 जेरेमी हावर्ड और रेचल थॉमस द्वारा" - toc: false - badges: false - comments: true - categories: [ai-in-society] - image: images/coronavirus.jpg > हम डेटा वैज्ञानिक हैं - अर्थात, हमारा काम यह समझना है कि हम डेटा का विश्लेषण और व्याख्या कैसे करें। जब हम Covid-19 के डेटा का विश्लेषण करते हैं, तो हम बहुत चिंतित होते हैं। समाज के सबसे कमजोर हिस्सों, बुजुर्गों और गरीबों को सबसे अधिक खतरा है, लेकिन बीमारी के प्रसार और प्रभाव को नियंत्रित करने के लिए हम सभी को अपने व्यवहार को बदलने की आवश्यकता है। अपने हाथों को अच्छी तरह से और नियमित रूप से धोएं, समूहों और भीड़ से बचें, कार्यक्रमों को रद्द करें, और अपना चेहरा न छुएं। इस लेख में, हम बताते हैं कि हम क्यों चिंतित हैं, और आपको भी क्यों चिंतित होना चाहिए। आपको जो महत्वपूर्ण जानकारी जानने की आवश्यकता है, उसके एक उत्कृष्ट सारांश के लिए, ईथन एले (एक गैर-लाभकारी संस्था के अध्यक्ष जो महामारियों से जोखिम को कम करने के लिए प्रौद्योगिकियों का विकास करते है) द्वारा लिखित [Corona in Brief](https://docs.google.com/document/u/1/d/1vumYWoiV7NlVoc27rMQvmVkVu5cOAbnaW_RKkq2RMaQ/mobilebasic) पढ़ें। ### विषय-सूची * [हमें एक सुचारू चिकित्सा प्रणाली की आवश्यकता है](#हमें-एक-सुचारू-चिकित्सा-प्रणाली-की-आवश्यकता-है) * [यह फ्लू जैसा नहीं है](#यह-फ्लू-जैसा-नहीं-है) * [“घबराएँ नहीं। शान्ति बनाये रखें।" यह मददगार नहीं है](#घबराएँ-नहीं-शान्ति-बनाये-रखें-यह-मददगार-नहीं-है) * [यह केवल आपके बारे में नहीं है](#यह-केवल-आपके-बारे-में-नहीं-है) * [हमें वक्र को समतल करने की आवश्यकता है](#हमें-वक्र-को-समतल-करने-की-आवश्यकता-है) * [समाज की जवाबी कार्यवाही से ही सारा अंतर पड़ता है](#समाज-की-जवाबी-कार्यवाही-से-ही-सारा-अंतर-पड़ता-है) * [अमेरिका में हमारे पास पर्याप्त जानकारी नहीं है](#अमेरिका-में-हमारे-पास-पर्याप्त-जानकारी-नहीं-है) * [निष्कर्ष](#निष्कर्ष) ### हमें एक सुचारू चिकित्सा प्रणाली की आवश्यकता है अभी 2 वर्ष पहले हममें से एक (रेचल) को मस्तिष्क में संक्रमण हुआ था, जिसके एक चौथाई मरीज़ो की मृत्यु हो जाती है, और एक तिहाई मरीज़ो को स्थायी संज्ञानात्मक क्षति पहुँचती है। कई अन्य लोगों की दृष्टि और श्रवण-शक्ति स्थायी रूप से नष्ट हो जाती है। अस्पताल की पार्किंग में पहुँचने तक ही रेचल बेसुध सी हो जाती। रेचल भाग्यशाली रही की उसे तुरंत देखभाल, निदान और उपचार प्राप्त हुआ। इस घटना से कुछ समय पहले तक रेचल काफी स्वस्थ थी। आपातकालीन कक्ष तुरंत मिल पाने के कारण ही शायद उसकी जान बच पायी। अब ज़रा Covid-19 के बारे में बात करते हैं, और आने वाले हफ्तों और महीनों में रेचल जैसी स्थिति में लोगों के साथ क्या हो सकता है। Covid-19 से संक्रमित पाए जाने वाले लोगों की संख्या हर 3 से 6 दिनों में दोगुनी हो जाती है। तीन दिनों में दोगुनी दर, इसका अर्थ यह है कि संक्रमित पाए जाने वाले लोगों की संख्या तीन सप्ताह में 100 गुना तक बढ़ सकती है (यह वास्तव में इतना सरल नहीं है, लेकिन तकनीकी विवरणों से भटकने से बचते हैं)। [10 संक्रमित लोगों में से एक](https://www.who.int/docs/default-source/coronaviruse/who-china-joint-mission-on-covid-19-final-report.pdf) को कई हफ्तों तक अस्पताल में भर्ती रहने की आवश्यकता होती है, और इनमें से अधिकांश को ऑक्सीजन की भी आवश्यकता होती है। हालांकि इस वायरस के अभी बहुत शुरुआती दिन हैं, अभी से ही ऐसे क्षेत्र हैं जहां अस्पताल पूरी तरह से खत्म हो चुके हैं, और लोग अब उस उपचार को प्राप्त करने में सक्षम नहीं हैं जिनकी उन्हें आवश्यकता है (न केवल Covid-19 के लिए, बल्कि किसी और चीज के लिए भी, जैसे कि रेचल की जरूरत की जीवन रक्षक देखभाल)। उदाहरण के लिए, इटली में, जहां सिर्फ एक हफ्ते पहले अधिकारी कह रहे थे कि सब कुछ ठीक है, अब सोलह मिलियन लोगों को लॉक-डाउन पर रखा गया है (अपडेट: इसे पोस्ट करने के 6 घंटे बाद, इटली ने पूरे देश को लॉक-डाउन में डाल दिया), और इस तरह के तम्बू मरीजों की आमद को संभालने में मदद के लिए लगाए जा रहे हैं: ![](my_icons/italymedicaltent.jpg "इटली में प्रयोग किया जा रहा एक चिकित्सा तम्बू") डॉ एंटोनियो पेसेन्ती, इटली के एक अत्यंत-प्रभावित क्षेत्र में क्षेत्रीय संकट पजवाबी कार्यवाही इकाई के प्रमुख ने [कहा](https://www.reuters.com/article/us-health-coronavirus-italy/alarmed-italy-locks-down-north-to-prevent-spread-of-coronavirus-idUSKBN20V06R) है, "अब हम कॉरिडोर में गहन देखभाल उपचार स्थापित करने के लिए मजबूर हो रहे हैं, ऑपरेटिंग थिएटरों में, रिकवरी रूम में ... दुनिया की एक सबसे अच्छी स्वास्थ्य प्रणाली, लोम्बार्डी में पतन से एक कदम दूर है।” ### यह फ्लू जैसा नहीं है फ्लू से संक्रमित लोगों की मृत्यु दर लगभग 0.1% है। हार्वर्ड में सेंटर फॉर कम्युनिकेबल डिजीज डायनेमिक्स के निदेशक मार्क लिप्सिच का [अनुमान है](https://www.washingtonpost.com/opinions/2020/03/06/why-its-so-hard-pin-down-risk-dying-coronavirus/) कि Covid-19 के लिए यह दर 1-2% है। [नवीनतम महामारी विज्ञान प्रतिरूपण](https://www.medrxiv.org/content/10.1101/2020.03.04.20031104v1.full.pdf) ने फरवरी में चीन में इस दर को 1.6% पाया, जो कि फ्लू की तुलना में सोलह गुना अधिक है (हालांकि यह काफी अनुदार संख्या हो सकती है, क्योंकि जब चिकिस्ता प्रणाली हालात का सामना नहीं कर पाती तब यह दर तेज़ी से बढ़ती है)। वर्तमान के सबसे अच्छे अनुमानों की उम्मीद है कि Covid-19 इस साल फ्लू की तुलना में इस साल 10 गुना अधिक लोगों की मृत्यु का कारण बनेगा (और एयरबीएनबी में डेटा विज्ञान के पूर्व निदेशक, [एलेना ग्रेवाल का प्रतिरूपण](https://docs.google.com/spreadsheets/d/1ktSfdDrX_uJsdM08azBflVOm4Z5ZVE75nA0lGygNgaA/edit#gid=0) दर्शाता है कि यह सबसे खराब स्थिति में 100 गुना अधिक हो सकता है)। यह चिकित्सा प्रणाली पर भारी प्रभाव को नज़रअंदांज़ करते हुए है, जैसे कि ऊपर वर्णित है। ज़ाहिर है कि कुछ लोग खुद को समझाने की कोशिश कर रहे हैं कि यह कोई नई बात नहीं है, फ्लू जैसी बीमारी है, क्योंकि वास्तविकता को स्वीकार करना अत्यंत कठिन है और ये हालात काफी असामान्य हैं। हमारे मस्तिष्क की संरचना इस तरह की नहीं है कि हम घातांकीय रूप से बढ़ने वाली इस संक्रमित लोगों की संख्या को सहज रूप से समझ सकें। इसलिए हमें वैज्ञानिकों के रूप में इसका विश्लेषण करना होगा, न कि अपने सहज-ज्ञान का उपयोग करना होगा। ![](my_icons/coronachart.png "यह दो सप्ताह में कहाँ होगा? दो महीनों में?") प्रत्येक व्यक्ति जिसे फ्लू है, औसतन, वह 1.3 अन्य लोगों को संक्रमित करता है। जिसे फ्लू के लिए "R0" कहा जाता है। यदि R0 1.0 से कम है, तो संक्रमण फैलना बंद हो जाता है और मर जाता है। यदि यह 1.0 से अधिक है, तो यह फैलता है। वर्तमान में चीन के बाहर Covid-19 के लिए R0 2-3 है। अंतर सुनने में छोटा लग सकता है, लेकिन संक्रमित लोगों की 20 "पीढ़ियां" जब संक्रमण आगे बढ़ाती हैं, 1.3 के R0 का परिणाम 146 संक्रमण होगा, लेकिन 2.5 के R0 का परिणाम होगा 3.6 करोड़! (यह निश्चित रूप से अधूरा है और कई वास्तविक दुनिया के प्रभावों को नजरअंदाज करता है, लेकिन यह Covid-19 और फ़्लू के बीच सापेक्ष अंतर का एक उचित चित्रण है, अन्य सभी तथ्यों को समान मानते हुए)। ध्यान दें कि R0 किसी बीमारी का एक मौलिक लक्षण नहीं है। यह जवाबी कार्यवाही पर बहुत निर्भर करता है, और यह समय अनुसार बदल सकता है। अतिविशेष रूप से, चीन में Covid-19 के लिए R0 में काफी कमी आई है, और अब यह 1.0 आ रहा है! आप पूछेंगे, कैसे? ऐसे बड़े पैमानों पर कदम उठाना जिनकी अमेरिका जैसे देश में कल्पना करना भी मुश्किल है- उदाहरण के लिए, कई विशाल शहरों को पूरी तरह से बंद कर देना, और एक परीक्षण प्रक्रिया विकसित करना जो एक सप्ताह में दस लाख से अधिक लोगों का परीक्षण मुमकिन बनाता है। एक चीज जो सोशल मीडिया पर बहुत अधिक उभरती है (एलोन मस्क जैसे अत्यधिक फ़ॉलो किए गए खातों से भी) कि लोगों में लॉजिस्टिक और घातीय वृद्धि के बीच अंतर की गलतफहमी है। "लॉजिस्टिक" वृद्धि महामारी के "s-आकार" विकास के स्वरुप को संदर्भित करता है। यकीनन घातीय वृद्धि हमेशा के लिए नहीं हो सकती, क्योंकि अन्यथा दुनिया में लोगों की संख्या से अधिक संख्या संक्रमित लोगों की हो जाएगी! इसलिए, अंत में, संक्रमण दर हमेशा कम होनी चाहिए, जिसके परिणामस्वरूप समय के साथ एक s-आकार (जो सिग्मॉइड के रूप में जाना जाता है) की वृद्धि दर बनती है। हालाँकि, घटती वृद्धि दर केवल एक कारण से होती है-यह कोई जादू नहीं है। मुख्य कारण हैं: * बड़े पैमाने पर और प्रभावी सामुदायिक जवाबी कार्यवाही, या * इतने बड़े प्रतिशत में लोग संक्रमित हैं कि फैलने के लिए कम असंक्रमित लोग बचे हैं। इसलिए, महामारी के "नियंत्रण" के लिए लॉजिस्टिक वृद्धि के स्वरुप पर निर्भर होने में कोई तार्किक समझदारी नहीं है। एक और बात जो आपके स्थानीय समुदाय में Covid-19 के प्रभाव को सहज रूप से समझना कठिन बना देती है, वह यह है कि संक्रमण और अस्पताल में भर्ती होने के बीच बहुत महत्वपूर्ण देरी होती है - आम तौर पर लगभग 11 दिन। यह शायद एक लम्बा समय न प्रतीत हो, लेकिन जब आप इसकी तुलना तब तक संक्रमित लोगों की संख्या से करते हैं, तो इसका मतलब है कि जब तक आप ध्यान देंगे कि अस्पताल में बिस्तर भर चुके हैं, तब तक सामुदायिक संक्रमण इस स्तर पर पहुंच जायेगा कि निपटने के लिए 5-10 गुना अधिक लोग होंगे। ध्यान दें कि कुछ शुरुआती संकेत ऐसे भी हैं कि आपके स्थानीय क्षेत्र में प्रभाव कम से कम कुछ हद तक जलवायु पर निर्भर हो सकता है। [Covid-19 के संभावित प्रसार और मौसम-तत्व के पूर्वानुमान के लिए तापमान और अक्षांश विश्लेषण](https://poseidon01.ssrn.com/delivery.php?ID=091071099092098096101097074089104068104013035023062021010031112088025099126064001093097030102106046016114116082016095089113023126034089078012119081090111118122007110026000085123071022022127025026080005029001020025126022000066075021086079031101116126112&EXT=pdf) बताता है कि यह बीमारी अब तक सुहावने जलवायु में फैल रही है (दुर्भाग्य से हमारे लिए, सैन फ्रांसिस्को में तापमान सीमा, जहां हम रहते हैं, उस सीमा के लिए सही है; यह लंदन सहित यूरोप के मुख्य जनसंख्या केंद्रों को भी शामिल करता है।) ### “घबराएँ नहीं। शान्ति बनाये रखें।" यह मददगार नहीं है एक आम प्रतिक्रिया जो हमने सोशल मीडिया पर उन लोगों के लिए देखी है जो चिंतित होने के कारणों की ओर इशारा कर रहे हैं, "घबराओ मत" या "शांत रहो"। कम से कम कहने में तो यह मददगार नहीं है। कोई यह नहीं कह रहा है कि आतंकित होना एक उपयुक्त प्रतिक्रिया है। किसी कारणवष, हालांकि, "शांत रहें" कुछ मंडलियों में बहुत लोकप्रिय प्रतिक्रिया है (लेकिन महामारी विज्ञानियों के बीच नहीं, जिनका काम इन चीजों पर नज़र रखना करना है)। शायद "शांत रहें" कुछ लोगों को अपनी निष्क्रियता के बारे में बेहतर महसूस करने में मदद करता है, या उन्हें ऐसे लोगों से बेहतर महसूस कराता है जिनकी वे कल्पना करते हैं कि वे बिना सिर की मुर्गी की तरह इधर-उधर भाग रहे हैं। लेकिन "शांत रहें" काफी आसानी से आपकी तैयारी और जवाबी कार्यवाही की विफलता का कारण बन सकता है। चीन में, दसियों लाख को लॉक-डाउन पर रखा गया था और दो नए अस्पतालों का निर्माण उस समय तक कर लिया गया था जब उनकी स्तिथि वैसी थी जैसी अमेरिका की अब है। इटली ने बहुत लंबा इंतजार किया, और आज (रविवार 8 मार्च को) उन्होंने 1692 लोगों को बंद करने के बावजूद 1492 नए मामले और 133 नई मौतें दर्ज कीं। इस समय सबसे अच्छी जानकारी के आधार पर हम यह पता लगा सकते हैं कि 2-3 सप्ताह पहले इटली भी उसी स्थिति में था,जो अमेरिका और ब्रिटेन में आज (संक्रमण के आंकड़ों के संदर्भ में) है। ध्यान दें कि इस समय पर Covid-19 के बारे में लगभग सब कुछ हवा में है। हम वास्तव में इसकी संक्रमण की गति या मृत्यु दर नहीं जानते हैं, हम यह नहीं जानते हैं कि यह सतहों पर कितनी देर तक सक्रिय रहता है, हम नहीं जानते कि यह गर्म परिस्थितियों में जीवित रह सकता है, फैल सकता है या नहीं। हमारे पास जो कुछ भी है वो इस समय तक लोगों द्वारा जुटाई गयी जानकारियों के आधार पर हमारे सर्वोत्तम अनुमान हैं। और याद रखें, इस जानकारी का अधिकांश हिस्सा चीन में, चीनी में है। वर्तमान में, चीन के अनुभव को समझने का सबसे अच्छा तरीका है कि चीन, जर्मनी, जापान, कोरिया, नाइजीरिया, रूस, सिंगापुर, अमेरिका और विश्व स्वास्थ्य संगठन (WHO) के 25 राष्ट्रीय और अंतर्राष्ट्रीय विशेषज्ञों के संयुक्त मिशन के आधार पर, [WHO-चीन संयुक्त मिशन की उत्कृष्ट रिपोर्ट को कोरोनावायरस रोग 2019 पर](https://www.who.int/docs/default-source/coronaviruse/who-china-joint-mission-on-covid-19-final-report.pdf) पढ़ें। जब यह अनिश्चित है कि, यह वैश्विक महामारी बनेगा या नहीं, और शायद सब कुछ *बस* ठीक से गुज़र जाये बिना अस्पताल तंत्र के ढहे हुए, तब इसका अर्थ ये बिलकुल नहीं है कि कुछ न करना ही उचित जवाबी कार्यवाही है। यह जूए के समान होगा और किसी भी खतरे के प्रतिरूप के परिदृश्य में यह हमारी ईष्टतम प्रतिक्रिया नहीं होगी। यह भी काफी असंभव सा है कि इटली और चीन जैसे देश बिना किसी अच्छे कारण के अपनी अर्थव्यवस्था के बड़े हिस्से को प्रभावी ढंग से बंद कर देंगे। यह उन वास्तविक प्रभावों के साथ भी तर्कसंगत नहीं है जो हम संक्रमित क्षेत्रों में जमीन पर देख रहे हैं, जहां चिकित्सा प्रणाली सामना करने में असमर्थ है (उदाहरण के लिए, इटली "प्री-ट्राइएज" के लिए 462 टेंट का उपयोग कर रहा है, और अभी भी [आईसीयू रोगियों को स्थानांतरित करना है संक्रमित क्षेत्रों से](https://www.repubblica.it/cronaca/2020/03/08/news/coronavirus_situazione_italia-250665818/?ref=RHPPTP-BH-I250661466-C12-P5-S1.12-T1))। इसके बजाय, विचारशील, उचित प्रतिक्रिया उन चरणों का पालन करना है जो विशेषज्ञों द्वारा संक्रमण फैलाने से बचने के लिए अनुशंसित हैं: * बड़े समूहों और भीड़ से बचें * कार्यक्रमों को रद्द करें * यदि संभव हो तो घर से काम करें * घर से आते और जाते समय और बाहर आने पर बार-बार हाथ धोएं * अपने चेहरे को छूने से बचें, खासकर जब आपके घर के बाहर (आसान नहीं!) * सतहों और सामान को कीटाणु रहित करें (यह संभव है कि वायरस सतहों पर 9 दिनों तक सक्रिय रह सकता है, हालांकि यह अभी भी निश्चित रूप से ज्ञात नहीं है)। ### यह केवल आपके बारे में नहीं है यदि आप 50 वर्ष से कम उम्र के हैं, और जोखिम कारक नहीं हैं, जैसे कि कमज़ोर प्रतिरक्षा प्रणाली, हृदय रोग, धूम्रपान का इतिहास, या अन्य पुरानी बीमारियां, तो आपको कुछ आराम हो सकता है कि Covid-19 की आपको मारने की संभावना बहुत कम है। लेकिन इसके प्रति आपकी प्रतिकिरिया अभी भी बहुत मायने रखती है। आपके पास अभी भी संक्रमित होने की अधिक संभावना है, और यदि होते हैं, तो बस दूसरों को संक्रमित करने की भी उतनी ही सम्भावना है। औसतन, प्रत्येक संक्रमित व्यक्ति दो से अधिक लोगों को संक्रमित कर रहा है, और वे लक्षण दिखाने से पहले संक्रामक हो जाते हैं। यदि आपके पास ऐसे माता-पिता हैं जिनकी आप परवाह करते हैं, या दादा-दादी, और उनके साथ समय बिताने की योजना बनाते हैं, और बाद में पता चलता है कि आप उन्हें Covid-19 के साथ संक्रमित करने के लिए जिम्मेदार हैं, तो आपको इस बोझ के साथ जीना पड़ेगा। यहां तक कि अगर आप 50 से अधिक की उम्र के लोगों के संपर्क में नहीं हैं, तो यह संभावना है कि आपके पास ऐसे सहकर्मी और परिचित हैं जिनकी गंभीर बीमारियों का आपको अनुमान भी नहीं है। [अनुसंधान से पता चलता है](https://www.talentinnovation.org/_private/assets/DisabilitiesInclusion_KeyFindings-CTI.pdf) कि कुछ लोग कार्यस्थल में अपने स्वास्थ्य की स्थिति का खुलासा करने से बचते हैं, यदि वो बच सकते हैं, क्योंकि उन्हें [भेदभाव का डर](https://medium.com/@racheltho/the-tech-industry-is-failing-people-with-disabilities-and-chronic-illnesses-8e8aa17937f3) होता है। हम दोनों ही उच्च जोखिम की श्रेणियों में हैं, लेकिन कई लोग जिनसे हम नियमित रूप से बातचीत करते हैं, वे शायद यह नहीं जानते होंगे। और हाँ, यह केवल आपके आस पास के लोगो की बात नहीं है। यह एक अत्यधिक महत्वपूर्ण नैतिक मुद्दा है। प्रत्येक व्यक्ति जो वायरस के प्रसार को नियंत्रित करने में योगदान करने की पूरी कोशिश करता है, वह संक्रमण की दर को धीमा करने में अपने पूरे समुदाय की मदद कर रहा है। जैसा कि ज़ीनेप तुफैकी ने [साइंटिफिक अमेरिकन में लिखा है](https://blogs.scientificamerican.com/observations/preparing-for-coronavirus-to-strike-the-u-s/): "इस वायरस के लगभग अवश्यंभावी वैश्विक प्रसार के लिए तैयारी ... समाज के लिए सबसे अधिक परोपकारी चीजों में से एक है जो आप कर सकते हैं"। वह लिखती हैं: > हमें तैयार होना चाहिए, इसलिए नहीं कि हम व्यक्तिगत रूप से जोखिम महसूस कर सकते हैं, बल्कि इसलिए ताकि हम सभी के लिए जोखिम कम करने में मदद कर सकें। हमें तैयार नहीं होना चाहिए क्योंकि हम अपने नियंत्रण से बाहर एक कयामत के दिन का सामना कर रहे हैं, लेकिन क्योंकि हम इस जोखिम के हर पहलू को बदल सकते हैं जो हम एक समाज के रूप में सामना करते हैं। यह सही है, आपको तैयार रहना चाहिए क्योंकि आपके पड़ोसियों को आपको तैयार करने की आवश्यकता है - विशेष रूप से आपके बुजुर्ग पड़ोसी, आपके पड़ोसी जो अस्पतालों में काम करते हैं, आपके पड़ोसी पुरानी बीमारियों के साथ, और आपके पड़ोसी जिनके पास साधन या समय नहीं हैं तैयारी का जिसका कारण संसाधनों की कमी हो या समय की। इसने हमें व्यक्तिगत रूप से प्रभावित किया है। हमारा सबसे बड़ा और सबसे महत्वपूर्ण कोर्स फ़ास्ट.एआई, जो हमारे लिए वर्षों के काम की परिणति का प्रतिनिधित्व करता है, एक सप्ताह में सैन फ्रांसिस्को विश्वविद्यालय में शुरू होने वाला था। पिछले बुधवार (4 मार्च) को, हमने पूरी बात ऑनलाइन स्थानांतरित करने का निर्णय लिया। हम ऑनलाइन स्थानांतरित करने वाले पहले बड़े पाठ्यक्रमों में से एक थे। हमने ऐसा क्यों किया? क्योंकि हमें पिछले हफ्ते की शुरुआत में एहसास हुआ कि अगर हम इस कोर्स को चलाते हैं, तो हम अनुमानित रूप से सैकड़ों लोगों को एक संलग्न स्थान पर एक साथ आने के लिए प्रोत्साहित कर रहे थे, कई बार एक बहु-सप्ताह की अवधि में। संलग्न स्थानों में समूहों को एक साथ लाना सबसे खराब काम है जो इस समय किया जा सकता है। हमने यह सुनिश्चित करने के लिए नैतिक रूप से बाध्य महसूस किया कि कम से कम इस मामले में, यह नहीं हुआ। यह दिल तोड़ने वाला फैसला था। हमारे छात्रों के साथ सीधे काम करने में हमारा समय हर साल महान सुखों और सबसे अधिक उत्पादक अवधियों में से एक रहा है। और हमारे पास छात्रों को दुनिया भर से उड़ान भरने की योजना थी, जिन्हें हम वास्तव में निराश नहीं करना चाहते थे। लेकिन हमें पता था कि यह करना सही है, क्योंकि अन्यथा हम अपने समुदाय में बीमारी के प्रसार को बढ़ा सकते हैं। ### हमें वक्र को समतल करने की आवश्यकता है यह अत्यंत महत्वपूर्ण है, क्योंकि अगर हम किसी समुदाय में संक्रमण की दर को धीमा कर सकते हैं, तो हम उस समुदाय के अस्पतालों को दोनों, संक्रमित रोगियों से निपटने में, और नियमित रोगी भार में साथ देते हैं जिसे उन्हें संभालने की आवश्यकता है। इसे "वक्र को समतल करना" के रूप में वर्णित किया गया है, और इस चित्रमय मानचित्र में स्पष्ट रूप से दिखाया गया है: ![](my_icons/chart.jpeg "बिंदु-रेखा के नीचे रहना ही सब-कुछ है") हेल्थ आईटी के पूर्व नेशनल कोऑर्डिनेटर फ़रज़ाद मोस्तश्री ने स्पष्ट किया: “हर दिन नए मामलों की पहचान की जा रही है, जिनका यात्रा इतिहास या किसी ज्ञात मामले से कोई संबंध नहीं है, और हम जानते हैं कि ये सिर्फ बड़ी समस्या की एक छोटी सी झलक है जिसका कारण है परिक्षण में हो रही देरी। इसका मतलब है कि अगले दो हफ्तों में निदान किए गए मामलों की संख्या में विस्फोट होगा ... जब समुदाय में घातांकीय रूप से फैलाव हो रहा हो तब उसकी रोकथाम की कोशिश करना इस प्रकार है जैसे जलते हुए घर में चिंगारियों पर ध्यान लगाना। जब ऐसा होता है, तो हमें शमन करने के लिए रणनीतियों को बदलने की आवश्यकता होती है - स्वास्थ्य देखभाल पर चरम प्रभाव को कम करना और कम करने के लिए सुरक्षात्मक उपाय करना।" यदि हम बीमारी के फैलाव को कम रख सकते हैं कि हमारे अस्पताल भार को संभाल सकें, तो लोग उपचार तक पहुंच सकते हैं। लेकिन अगर मामले बहुत जल्दी आते हैं, तो ज़रूरतमंदो को अस्पताल में भर्ती होने नहीं मिलेगा। [लिज़ स्पैट के अनुसार](https://twitter.com/LizSpecht/status/1236095186737852416) यहां गणित कुछ ऐसा दिख सकता है: >अमेरिका में प्रति 1000 लोगों पर लगभग 2.8 अस्पताल बेड हैं। 330M की आबादी के साथ, यह ~ 1M बेड है। किसी भी समय, उन बिस्तरों में से 65% पर पहले से ही कब्जा होता है। यह देश भर में उपलब्ध 330k बिस्तरों को छोड़ता है (शायद नियमित फ्लू के मौसम के कारण ये आँकड़ा इस समय थोड़ा कम होगा)। आइए इटली की संख्या पर निर्भर हो कर मान लें कि लगभग 10% मामले अस्पताल में भर्ती होने के लिए गंभीर हैं। (ध्यान रखें कि कई रोगियों को अस्पताल में हफ्तों तक रहना पड़ता है - दूसरे शब्दों में, यह प्रक्रिया बहुत धीमी होगी क्योंकि बिस्तर COVID19 रोगियों से भर जायेंगे)। इस अनुमान के अनुसार, 8 मई तक, अमेरिका में सभी खुले अस्पताल बेड भर जाएंगे। (ये यह बिलकुल नहीं बताता है कि यह बिस्तर अत्यधिक संक्रामक रोगो के अलगाव के लिए उपयुक्त होंगे या नहीं।) अगर हम गंभीर मामलों के अंश के बारे में दो घटक से ग़लत हैं, तो यह अस्पताल में बिस्तरों के ख़त्म होने की समयरेखा को मात्र 6 दिन से बदलता है जो किसी भी दिशा में हो सकता है। यदि 20% मामलों में अस्पताल में भर्ती होने की आवश्यकता होती है, तो बिस्तरों की ख़त्म होने की तारीख होगी ~ 2 मई, यदि केवल 5% मामलों में ही इसकी आवश्यकता होती है, तो हम इसे ~ 14 मई तक चला पाएंगे। 2.5% हमें 20 मई तक ले जायेगा। यह गणना बिलकुल ये मान के की गयी है कि गैर-Covid19 मामलो से बिस्तरों की मांग में कोई उतार चढ़ाव नहीं होगा, जो कि काफी संशयात्मक प्रतीत होता है। जैसे-जैसे स्वास्थ्य सेवा प्रणाली तेजी से बोझिल होती जा रही है, Rx की कमी इत्यादि बढ़ती जा रही है, गंभीर रोगों से जूझते लोगों की चिकित्सा जो सामान्य रूप से अच्छी तरह से प्रबंधित होती हैं, वह चिकित्सा संकट की स्तिथि में बदल सकती है जिसमें ऐसे लोगों को गहन देखभाल और अस्पताल भर्ती की ज़रूरत होगी। ### समाज की जवाबी कार्यवाही से ही सारा अंतर पड़ता है जैसा कि हमने चर्चा की है, यह गणित निश्चित नहीं है - चीन पहले ही दिखा चुका है कि ठोस कदम उठाकर प्रसार को कम करना संभव है। एक सफल जवाबी कार्यवाही का एक और शानदार उदाहरण वियतनाम है, जहां अन्य बातों के अलावा, एक राष्ट्रव्यापी विज्ञापन अभियान (एक आकर्षक गीत सहित) ने तेजी से समाज को उचित जवाबी कार्यवाही के लिए एकजुट किया और यह सुनिश्चित किया कि लोग अपने व्यवहार को उचित रूप से समायोजित करें। यह केवल एक काल्पनिक स्थिति नहीं है - यह 1918 फ्लू महामारी में स्पष्ट रूप से प्रदर्शित किया गया था। अमेरिका में दो शहरों ने महामारी के लिए बहुत अलग जवाबी कार्यवाही प्रदर्शित कीं: फिलाडेल्फिया में 200,000 लोगों की विशाल परेड का आयोजन किया गया ताकि युद्ध के लिए पैसे जुटा सकें। लेकिन सेंट लुइस ने बड़े संपर्कों को रद्द करने के साथ-साथ वायरस के प्रसार को कम करने के लिए सामाजिक संपर्कों को कम करने के लिए सावधानीपूर्वक तैयार की। यहाँ प्रत्येक शहर में मौतों की संख्या कितनी है, यह [नेशनल एकेडमी ऑफ साइंसेज की कार्यवाही](https://www.pnas.org/content/104/18/7582) में दिखाया गया है: ![](my_icons/fluchart.jpeg "1918 फ्लू महामारी में प्रतिकिर्याओं में अंतर का असर") फिलाडेल्फिया में स्थिति बेहद गंभीर हो गई, यहां तक कि एक समय पर ऐसे स्थान भी थे जहां फ्लू से मृतकों की भारी संख्या को संभालने के लिए [पर्याप्त ताबूत या मुर्दाघर](https://www.history.com/news/spanish-flu-pandemic-dead) नहीं थे। रिचर्ड बेस्सर, जो कि रोग नियंत्रण और रोकथाम केंद्रों के कार्यवाहक निदेशक थे 2019 H1N1 महामारी के समय, ने [कहा है](https://www.washingtonpost.com/opinions/as-coronavirus-spreads-the-bill-for-our-public-health-failures-is-due/2020/03/05/9da09ed6-5f10-11ea-b29b-9db42f7803a7_story.html?utm_campaign=wp_week_in_ideas&utm_medium=email&utm_source=newsletter&wpisrc=nl_ideas) कि अमेरिका में "रोग के जोखिम से बचने की और अपने परिवार को बचाने की क्षमता निर्भर करती है, आय पर, स्वस्थ्य देखभाल तक पहुँच पर, और आव्रजन स्तिथि पर, और भी अन्य कारणों के साथ", वे बताते हैं: > बुजुर्गों और विकलांगों को विशेष जोखिम होता है जब उनके दैनिक जीवन और समर्थन प्रणाली बाधित होती है। ग्रामीण और मूल निवासी समुदायों सहित स्वास्थ्य देखभाल की आसान पहुंच के बिना, जरूरत के समय कठिन दूरी का सामना करना पड़ सकता है। पास के क्वार्टर में रहने वाले लोग - चाहे सार्वजनिक आवास, नर्सिंग होम, जेल, आश्रय या यहां तक कि सड़कों पर बेघर - लहरों में पीड़ित हो सकते हैं, जैसा कि हम पहले ही वाशिंगटन राज्य में देख चुके हैं। और कम वेतन वाले गिग इकॉनमी की कमजोरियां, गैर-वेतनभोगी श्रमिकों और अनिश्चित काम की सारणी, इस संकट के दौरान सभी के सामने उजागर हो जाएगी। 60 प्रतिशत अमेरिकी श्रम शक्ति से पूछें जो प्रति घंटा वेतन उन्हें मिलता है उससे ज़रूरत के समय छुट्टी लेना कितना आसान है। यूएस ब्यूरो ऑफ लेबर स्टैटिस्टिक्स से पता चलता है कि सबसे कम आय वाले बैंड में [एक तिहाई से भी कम](https://www.bls.gov/opub/ted/2018/higher-wage-workers-more-likely-than-lower-wage-workers-to-have-paid-leave-benefits-in-2018.htm) लोगों को भुगतान के साथ बीमारी के लिए अवकाश मिलता है: ![](my_icons/uspaidleave.png "ज़्यादातर गरीब अमेरिकियों को भुगतान सहित रोग-अवकाश नहीं मिलता, तो उन्हें काम पर जाना पड़ता है") ### अमेरिका में हमारे पास पर्याप्त जानकारी नहीं है यूएस में एक बड़ा मुद्दा यह है कि बहुत कम परीक्षण किया जा रहा है, और परीक्षण के परिणाम ठीक से साझा नहीं किए जा रहे हैं, जिसका अर्थ है कि हम नहीं जानते कि वास्तव में क्या हो रहा है। पिछले FDA कमिश्नर स्कॉट गोटलिब ने बताया कि सिएटल में बेहतर परीक्षण किया गया है, और हम वहाँ संक्रमण देख रहे हैं: “स्वतंत्र वैज्ञानिको के प्रहरी निगरानी कार्य के कारण हम सीएटल में के प्रकोप के बारे में जल्दी जान पाए हैं। अन्य शहरों में ऐसी निगरानी पूरी तरह से कभी नहीं हुई। अन्य अमेरिकी अतिप्रभावित क्षेत्रों का अभी तक पूरी तरह से पता नहीं लगाया जा सकता है। [द अटलांटिक](https://www.theatlantic.com/health/archive/2020/03/how-many-americans-have-been-tested-coronavirus/607597/) के अनुसार, उपराष्ट्रपति माइक पेंस ने वादा किया कि इस सप्ताह "लगभग 1.5 मिलियन परीक्षण" उपलब्ध होंगे, लेकिन इस समय तक पूरे अमेरिका में 2,000 से कम लोगों का परीक्षण किया गया है। [COVID ट्रैकिंग प्रोजेक्ट](https://docs.google.com/spreadsheets/u/1/d/e/2PACX-1vRwAqp96T9sYYq2-i7Tj0pvTf6XVHjDSMIKBdZHXiCGGdNC0ypEU9NbngS8mxea55JuCFuua1MUeOj5/pubhtml) पर आधारित, द अटलांटिक के रॉबिन्सन मेयर और एलेक्सिस मेड्रिगल, ने कहा: > हमने जो आंकड़े इकट्ठे किए हैं, उनका सुझाव है कि Covid-19 और इससे होने वाली बीमारी, COVID-19, के प्रति अमेरिकी जवाबी कार्यवाही विशेष रूप से अन्य विकसित देशों के साथ तुलनात्मक रूप से सुस्त रही है। सीडीसी ने आठ दिन पहले पुष्टि की थी कि वायरस संयुक्त राज्य में सामुदायिक संचरण में था - यह उन अमेरिकियों को संक्रमित कर रहा था, जो न तो विदेश यात्रा पर थे और न ही उन लोगों के संपर्क में थे। दक्षिण कोरिया में, सामुदायिक प्रसारण के पहले मामले के एक सप्ताह के भीतर 66,650 से अधिक लोगों का परीक्षण किया गया था, और वह जल्दी से एक दिन में 10,000 लोगों का परीक्षण करने में सक्षम हो गया। समस्या का हिस्सा यह है कि यह एक राजनीतिक मुद्दा बन गया है। विशेष रूप से, राष्ट्रपति डोनाल्ड ट्रम्प ने यह स्पष्ट कर दिया है कि वह "संख्या" (जो कि अमेरिका में संक्रमित लोगों की संख्या है) को कम रखना चाहते हैं। यह एक उदाहरण है, जहां अनुकूलन मेट्रिक्स असल में अच्छे परिणाम प्राप्त करने में हस्तक्षेप करता है। (इस मुद्दे पर अधिक जानकारी के लिए, डेटा विज्ञान की नैतिकता पेपर देखें [मैट्रिक्स की समस्या](https://arxiv.org/abs/2002.08512) एआई के लिए एक मौलिक समस्या है)। Google के AI के प्रमुख [जेफ डीन ने ट्वीट कर](https://twitter.com/JeffDean/status/1236489084870119427) राजनीतिक विघटन की समस्याओं के बारे में अपनी चिंता व्यक्त की: > जब मैंने WHO में काम किया, तो मैं AIDS (अब UNAIDS) पर वैश्विक कार्यक्रम का हिस्सा था, जो दुनिया को HIV/AIDS महामारी से निपटने में मदद करने के लिए बनाया गया था। वहां के कर्मचारी समर्पित डॉक्टर थे और वैज्ञानिकों ने उस संकट के समाधान में मदद करने के लिए तीव्रता से ध्यान केंद्रित किया। संकट के समय में, स्पष्ट और सटीक जानकारी सभी को उचित और सूचित निर्णय लेने में मदद करने के लिए महत्वपूर्ण है (देश, राज्य और स्थानीय सरकारें, कंपनियां, गैर सरकारी संगठन, स्कूल, परिवार और व्यक्ति)। सर्वोत्तम चिकित्सा और वैज्ञानिक विशेषज्ञों को सुनने के लिए सही जानकारी और नीतियों के साथ, हम सभी HIV/AIDS या Covid-19 द्वारा प्रस्तुत की गई चुनौतियों से पार पाएंगे। राजनीतिक हितों से प्रेरित दुष्प्रचार का वास्तविक जोखिम यह है की हम जल्दी काय्रवाही न कर के हालातों को बद से बत्तर बनाएंगे और यह सुनिश्चि करेंगे के महामारी से झूझते हुए हम ऐसे व्यहवार को बढ़ावा दे बैठे जिससे कि रोग और फैले। ऐसी स्तिथि को बनते हुए देखना बेहद दर्दनाक है। ऐसा लगता तो नहीं है के पारदर्शिता के सन्दर्भ में हालत को बदलने की राजनीतिक इच्छाशक्ति यहां है। [वायर्ड के अनुसार](https://www.wired.com/story/trumps-coronavirus-press-event-was-even-worse-than-it-looked/), स्वास्थ्य और मानव सेवा सचिव एलेक्स अजार ने कहा, "उन परीक्षणों के बारे में बात करना शुरू कर दिया जो स्वास्थ्य देखभाल कार्यकर्ता यह निर्धारित करने के लिए उपयोग करते हैं कि कोई नए कोरोनोवायरस से संक्रमित है या नहीं। उन किटों की कमी का मतलब अमेरिका में बीमारी के प्रसार और गंभीरता के बारे में महामारी विज्ञान संबंधी जानकारी का एक खतरनाक अभाव है, जो सरकार की ओर से अस्पष्टता से बढ़ा है। अज़ार ने यह कहने की कोशिश की कि अधिक परीक्षण किये जायेंगे पर गुणवत्ता नियंत्रण लंबित है।” लेकिन, उन्होंने जारी रखा: > तब ट्रम्प ने अजार की बात काटते हुए कहा "लेकिन मुझे लगता है, महत्वपूर्ण बात यह है, किसी को भी, अभी और कल, यदि परीक्षण की ज़रूरत पड़ी है तो उसका परीक्षण हुआ है। वह हैं, उनके परीक्षण हुए हैं, और परीक्षण बहुत अच्छे हैं। ट्रम्प ने कहा कि जिसे परीक्षण की ज़रूरत है, उसे परीक्षण मिलेगा। यह असत्य है। उपराष्ट्रपति पेंस ने गुरुवार को संवाददाताओं से कहा कि अमेरिका के पास मांग को पूरा करने के लिए पर्याप्त परीक्षण किट नहीं हैं। अन्य देश अमेरिका की तुलना में बहुत अधिक तेजी से और महत्वपूर्ण रूप से जवाबी कार्यवाही कर रहे हैं। दक्षिण-पूर्वी एशिया के कई देश ताइवान सहित उत्तम परिणाम दिखा रहे हैं, जहां R0 अभी 0.3 से नीचे है, और सिंगापुर, जिसे द मॉडल फॉर Covid-19 रिस्पांस के रूप में प्रस्तावित किया जा रहा है। यह सिर्फ एशिया में ही नहीं है; उदाहरण के लिए, फ्रांस में, 1000 लोगों से अधिक का इकट्ठा होना मना है, और स्कूल अब तीन जिलों में बंद हैं। ### निष्कर्ष Covid-19 एक महत्वपूर्ण सामाजिक मुद्दा है, और हमें बीमारी के प्रसार को कम करने के लिए काम करना चाहिए। इसका मतलब है की: * बड़े समूहों और भीड़ से बचना * कार्यक्रमों को रद्द करना * घर से काम करना, यदि संभव हो तो * घर से आते और जाते समय और बाहर आने पर बार-बार हाथ धोएं * अपने चेहरे को छूने से बचें, खासकर जब आपके घर के बाहर ध्यान दें: इसे छापने की जल्दबाज़ी में, इसमें उपयोग की गयी जानकारियों का हवाला और श्रेय देने में हमने उतनी सावधानी नहीं बरती है जितना हम आम तौर पर बरतते हैं । अगर हमसे कुछ भूल हुई हो तो कृपया हमे बतायें। प्रतिपुष्टि और टिप्पणियों के लिए सिल्वेन गुग्गर और एलेक्सिस गैलाघर को धन्यवाद। *यह एक महत्वपूर्ण अंग्रेजी लेख का अनुवाद है, असली लेख अंग्रेजी में पढ़ने के लिए [यहां क्लिक करें](https://www.fast.ai/2020/03/09/coronavirus/)।* <a href="https://www.buymeacoffee.com/alephthoughts" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a>
github_jupyter
# Visuals This notebook contains visual output functions for Constitutive Models, more specifically for a bounding surface model by Borjas & Amies, 1994. The functions are developed and maintained by Justin Bonus (University of Washington). Use ``%run YOURPATH/'Bounding Surface'/Visuals.ipynb`` at the start of your notebook to import these functions. ``` def visBorjas3D(R_B, R_F, Stress0): #======================================================================== # Visualizing the Borjas constitutive model in 3D #======================================================================== #Base code for generic cylinder plotting: #Created on Sun Oct 2 18:33:10 2016 #Modified from https://stackoverflow.com/questions/38076682/how-to-add- #colors-to-each-individual-face-of-a-cylinder-using-matplotlib #Author: astrokeat # #Edited to produce multiple Von Mises cylinders, axis, kappa contours, #and interactivity #Jul 15 2019 #Author: Justin Bonus #======================================================================== import numpy as np from collections import namedtuple from scipy.linalg import norm # Inputs Su = 0.061 RR = R_B #Stress0 = np.array([0,0,0,.1,.07,.03]) #Last unloading point AKA \alpha proj_Stress0 = hydroProjector(Stress0,2) #Projected onto plane normal to the hydrostatic axis, centered on origin norm_proj_Stress0 = float(normS(proj_Stress0)) # Broken, currently scaling down dev_Stress0 outside of function if norm_proj_Stress0 >= float(R_B): reduction = float(0.99 * (float(R_B) / norm_projStress0)) adj_proj_Stress0 = reduction * proj_Stress0 adj_norm_proj_Stress0 = float(normS(adj_proj_Stress0)) else: adj_norm_proj_Stress0 = norm_proj_Stress0 adj_proj_Stress0 = proj_Stress0 #R_B = Su*(8/3)**0.5 #Radius of bounding surface, (8/3)^{1/2}Su p0 = np.array([-R_B, -R_B, -R_B]) #Point at one end, [\sigma_1, \sigma_2, \sigma_3] p1 = np.array([R_B, R_B, R_B]) #Point at other end, [\sigma_1, \sigma_2, \sigma_3] #R_F = (2/5)*R_B #Radius of yield surface AKA \zeta', MPa p0_F = np.array([p0[0] + proj_Stress0[2], p0[1] + proj_Stress0[1], p0[2] + proj_Stress0[0]]) p1_F = np.array([p1[0] + proj_Stress0[2], p1[1] + proj_Stress0[1], p1[2] + proj_Stress0[0]]) def kappaCon(kappa, proj_Stress0): norm_center = (kappa * norm_proj_Stress0)/(kappa + 1) small = 1e-10 if norm_proj_Stress0 < small: rat = 0 else: rat = norm_center / adj_norm_proj_Stress0 center = rat * adj_proj_Stress0 north = ((RR - kappa*adj_norm_proj_Stress0)/(kappa + 1)) south = -((RR + kappa*adj_norm_proj_Stress0)/(kappa + 1)) rad = np.abs((north - south)/2) return center, rad kappa_BKOne = 5 kappa_BKTwo = 2 kappa_BKThree = 1 kappa_BKFour = 0.3 center_BKOne, R_BKOne = kappaCon(kappa_BKOne, adj_proj_Stress0) center_BKTwo, R_BKTwo = kappaCon(kappa_BKTwo, adj_proj_Stress0) center_BKThree, R_BKThree = kappaCon(kappa_BKThree, adj_proj_Stress0) center_BKFour, R_BKFour = kappaCon(kappa_BKFour, adj_proj_Stress0) p0_BKOne = np.array([p0[0] + center_BKOne[2], p0[1] + center_BKOne[1], p0[2] + center_BKOne[0]]) p1_BKOne = np.array([p1[0] + center_BKOne[2], p1[1] + center_BKOne[1], p1[2] + center_BKOne[0]]) p0_BKTwo = np.array([p0[0] + center_BKTwo[2], p0[1] + center_BKTwo[1], p0[2] + center_BKTwo[0]]) p1_BKTwo = np.array([p1[0] + center_BKTwo[2], p1[1] + center_BKTwo[1], p1[2] + center_BKTwo[0]]) p0_BKThree = np.array([p0[0] + center_BKThree[2], p0[1] + center_BKThree[1], p0[2] + center_BKThree[0]]) p1_BKThree = np.array([p1[0] + center_BKThree[2], p1[1] + center_BKThree[1], p1[2] + center_BKThree[0]]) p0_BKFour = np.array([p0[0] + center_BKFour[2], p0[1] + center_BKFour[1], p0[2] + center_BKFour[0]]) p1_BKFour = np.array([p1[0] + center_BKFour[2], p1[1] + center_BKFour[1], p1[2] + center_BKFour[0]]) #Vector in direction of axis v = p1 - p0 v_F = p1_F - p0_F v_BKOne = p1_BKOne - p0_BKOne v_BKTwo = p1_BKTwo - p0_BKTwo v_BKThree = p1_BKThree - p0_BKThree v_BKFour = p1_BKFour - p0_BKFour #Unit vector in direction of axis mag = norm(v) v = v / mag mag_F = norm(v_F) v_F = v_F / mag_F mag_BKOne = norm(v_BKOne) v_BKOne = v_BKOne / mag_BKOne mag_BKTwo = norm(v_BKTwo) v_BKTwo = v_BKTwo / mag_BKTwo mag_BKThree = norm(v_BKThree) v_BKThree = v_BKThree / mag_BKThree mag_BKFour = norm(v_BKFour) v_BKFour = v_BKFour / mag_BKFour #Make some of the vectors not in the same direction as v not_v = np.array([1, 0, 0]) if (v == not_v).all(): not_v = np.array([0, 1, 0]) not_v_F = np.array([1, 0, 0]) if (v_F == not_v_F).all(): not_v_F = np.array([0, 1, 0]) not_v_BKOne = np.array([1,0,0]) if (v_BKOne == not_v_BKOne).all(): not_v_BKOne = np.array([0,1,0]) not_v_BKTwo = np.array([1,0,0]) if (v_BKTwo == not_v_BKTwo).all(): not_v_BKTwo = np.array([0,1,0]) not_v_BKThree = np.array([1,0,0]) if (v_BKThree == not_v_BKThree).all(): not_v_BKThree = np.array([0,1,0]) not_v_BKFour = np.array([1,0,0]) if (v_BKFour == not_v_BKFour).all(): not_v_BKFour = np.array([0,1,0]) #Make vector perpendicular to v, normalize n1 n1 = np.cross(v, not_v) n1 /= norm(n1) n1_F = np.cross(v_F, not_v_F) n1_F /= norm(n1_F) n1_BKOne = np.cross(v_BKOne, not_v_BKOne) n1_BKOne /= norm(n1_BKOne) n1_BKTwo = np.cross(v_BKTwo, not_v_BKTwo) n1_BKTwo /= norm(n1_BKTwo) n1_BKThree = np.cross(v_BKThree, not_v_BKThree) n1_BKThree /= norm(n1_BKThree) n1_BKFour = np.cross(v_BKFour, not_v_BKFour) n1_BKFour /= norm(n1_BKFour) #Make unit vector perpendicular to v and n1 n2 = np.cross(v, n1) n2_F = np.cross(v_F, n1_F) n2_BKOne = np.cross(v_BKOne, n1_BKOne) n2_BKTwo = np.cross(v_BKTwo, n1_BKTwo) n2_BKThree = np.cross(v_BKThree, n1_BKThree) n2_BKFour = np.cross(v_BKFour, n1_BKFour) #Surface ranges over t from 0 to length of axis and 0 to 2*pi t = np.linspace(0, mag, 2) theta = np.linspace(0, 2 * np.pi, 100) rsample = np.linspace(0, R_B, 2) t_F = np.linspace(0, mag_F, 2) theta_F = np.linspace(0, 2 * np.pi, 100) rsample_F = np.linspace(0, R_F, 2) t_BKOne = np.linspace(0, mag_BKOne, 2) theta_BKOne = np.linspace(0, 2 * np.pi, 100) rsample_BKOne = np.linspace(0, R_BKOne, 2) t_BKTwo = np.linspace(0, mag_BKTwo, 2) theta_BKTwo = np.linspace(0, 2 * np.pi, 100) rsample_BKTwo = np.linspace(0, R_BKTwo, 2) t_BKThree = np.linspace(0, mag_BKThree, 2) theta_BKThree = np.linspace(0, 2 * np.pi, 100) rsample_BKThree = np.linspace(0, R_BKThree, 2) t_BKFour = np.linspace(0, mag_BKFour, 2) theta_BKFour = np.linspace(0, 2 * np.pi, 100) rsample_BKFour = np.linspace(0, R_BKFour, 2) #Use meshgrid to make 2d arrays t, theta2 = np.meshgrid(t, theta) rsample,theta = np.meshgrid(rsample, theta) t_F, theta2_F = np.meshgrid(t_F, theta_F) rsample_F,theta_F = np.meshgrid(rsample_F, theta_F) t_BKOne, theta2_BKOne = np.meshgrid(t_BKOne, theta_BKOne) rsample_BKOne, theta_BKOne = np.meshgrid(rsample_BKOne, theta_BKOne) t_BKTwo, theta2_BKTwo = np.meshgrid(t_BKTwo, theta_BKTwo) rsample_BKTwo, theta_BKTwo = np.meshgrid(rsample_BKTwo, theta_BKTwo) t_BKThree, theta2_BKThree = np.meshgrid(t_BKThree, theta_BKThree) rsample_BKThree, theta_BKThree = np.meshgrid(rsample_BKThree, theta_BKThree) t_BKFour, theta2_BKFour = np.meshgrid(t_BKFour, theta_BKFour) rsample_BKFour, theta_BKFour = np.meshgrid(rsample_BKFour, theta_BKFour) #Generate coordinates for surface # Bounding and Yield Surfaces X, Y, Z = [p0[i] + v[i] * t + R_B * np.sin(theta2) * n1[i] + R_B * np.cos(theta2) * n2[i] for i in [0, 1, 2]] X_F, Y_F, Z_F = [p0_F[i] + v_F[i] * t_F + R_F * np.sin(theta2_F) * n1_F[i] + R_F * np.cos(theta2_F) * n2_F[i] for i in [0, 1, 2]] # Lines X_hydro, Y_hydro, Z_hydro = [p0[i] + v[i]*t for i in [0, 1, 2]] X_F0, Y_F0, Z_F0 = [p0_F[i] + v[i]*t for i in [0, 1, 2]] X_alpha = np.array([p1[0], p1_F[0]]); Y_alpha = np.array([p1[1], p1_F[1]]); Z_alpha = np.array([p1[2], p1_F[2]]).reshape(1,2) # \kappa Contour Surfaces X_BKOne, Y_BKOne, Z_BKOne = [p0_BKOne[i] + v_BKOne[i] * t_BKOne + R_BKOne * np.sin(theta2_BKOne) * n1_BKOne[i] + R_BKOne * np.cos(theta2_BKOne) * n2_BKOne[i] for i in [0, 1, 2]] X_BKTwo, Y_BKTwo, Z_BKTwo = [p0_BKTwo[i] + v_BKTwo[i] * t_BKTwo + R_BKTwo * np.sin(theta2_BKTwo) * n1_BKTwo[i] + R_BKTwo * np.cos(theta2_BKTwo) * n2_BKTwo[i] for i in [0, 1, 2]] X_BKThree, Y_BKThree, Z_BKThree = [p0_BKThree[i] + v_BKThree[i] * t_BKThree + R_BKThree * np.sin(theta2_BKThree) * n1_BKThree[i] + R_BKThree * np.cos(theta2_BKThree) * n2_BKThree[i] for i in [0, 1, 2]] X_BKFour, Y_BKFour, Z_BKFour = [p0_BKFour[i] + v_BKFour[i] * t_BKFour + R_BKFour * np.sin(theta2_BKFour) * n1_BKFour[i] + R_BKFour * np.cos(theta2_BKFour) * n2_BKFour[i] for i in [0, 1, 2]] # "Bottom" #X2, Y2, Z2 = [p0[i] + rsample[i] * np.sin(theta) * n1[i] + rsample[i] * np.cos(theta) * n2[i] for i in [0, 1, 2]] # "Top" #X3, Y3, Z3 = [p0[i] + v[i]*mag + rsample[i] * np.sin(theta) * n1[i] + rsample[i] * np.cos(theta) * n2[i] for i in [0, 1, 2]] #Factor fixes projection issue when viewing in 3D Mc = np.sqrt(3/2)*R_B refLine1x = np.array([0, Mc]); refLine1y = np.array([0, 0]); refLine1z = np.array([0, 0]).reshape(1,2); refLine2x = np.array([0, 0]); refLine2y = np.array([0, Mc]); refLine2z = np.array([0, 0]).reshape(1,2); refLine3x = np.array([0, 0]); refLine3y = np.array([0, 0]); refLine3z = np.array([0, Mc]).reshape(1,2); out = namedtuple('out',['refLine1x','refLine1y','refLine1z', 'refLine2x','refLine2y','refLine2z', 'refLine3x','refLine3y','refLine3z', 'X_hydro','Y_hydro','Z_hydro', 'X','Y','Z', 'X_F','Y_F','Z_F', 'X_F0','Y_F0','Z_F0', 'X_alpha','Y_alpha','Z_alpha', 'X_BKOne','Y_BKOne','Z_BKOne', 'X_BKTwo','Y_BKTwo','Z_BKTwo', 'X_BKThree','Y_BKThree','Z_BKThree', 'X_BKFour','Y_BKFour','Z_BKFour' ]) result = out(refLine1x,refLine1y,refLine1z, refLine2x,refLine2y,refLine2z, refLine3x,refLine3y,refLine3z, X_hydro,Y_hydro,Z_hydro, X,Y,Z, X_F,Y_F,Z_F, X_F0,Y_F0,Z_F0, X_alpha,Y_alpha,Z_alpha, X_BKOne,Y_BKOne,Z_BKOne, X_BKTwo,Y_BKTwo,Z_BKTwo, X_BKThree,Y_BKThree,Z_BKThree, X_BKFour,Y_BKFour,Z_BKFour) return result def borjasInteractive(): #=============================== # PLOTTING INTERACTIVE 3D MODEL # ----- %matplotlib notebook import matplotlib from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.widgets import Slider, Button, RadioButtons # Basic Setup font = {'size': 14} matplotlib.rc('font', **font) fig_bound = plt.figure() fig_bound.set_figheight(6) fig_bound.set_figwidth(6) ax = fig_bound.gca(projection='3d', proj_type = 'persp') plt.ion() #---------------------------- # Initial Stress Settings Su = 0.061 R_B = Su*(8/3)**0.5 #Radius of bounding surface, (8/3)^{1/2}Su R_F = 0 #Radius of yield surface AKA \zeta', MPa Stress0 = np.array([.05,.05,.05]) #Last unloading point AKA \alpha #---------------------------- # Slider Settings B0 = R_B axB = plt.axes([0.3, 0.1, 0.4, 0.015]) sB = Slider(axB, '$R$', 0, 10*B0, valinit=B0) F0 = R_F axF = plt.axes([0.3, 0.125, 0.4, 0.015]) sF = Slider(axF, '$r$', 0, 10*B0, valinit=F0) S1 = Stress0[0]; S2 = Stress0[1]; S3 = Stress0[2] axS1 = plt.axes([0.3, 0.15, 0.4, 0.015]) axS2 = plt.axes([0.3, 0.175, 0.4, 0.015]) axS3 = plt.axes([0.3, 0.2, 0.4, 0.015]) sS1 = Slider(axS1, '$\sigma_1$', -10*Stress0[0], 10*Stress0[0], valinit=S1) sS2 = Slider(axS2, '$\sigma_2$', -10*Stress0[1], 10*Stress0[1], valinit=S2) sS3 = Slider(axS3, '$\sigma_3$', -10*Stress0[2], 10*Stress0[2], valinit=S3) #---------------------------- def draw(R_B,R_F,Stress0): # Prevent Stress0 from leaving bounding surface dev_Stress0 = dev(Stress0.reshape(3,1)) norm_dev_Stress0 = normS(dev_Stress0) if R_B <= 0: R_B = 0.001 if normS(dev_Stress0) >= R_B: reduction = 0.99 * (R_B / normS(dev_Stress0)) dev_Stress0 = reduction * dev_Stress0 Stress0[0] = dev_Stress0[0] Stress0[1] = dev_Stress0[1] Stress0[2] = dev_Stress0[2] # Retrieve cylinders and axis for stress state vis = visBorjas3D(R_B, R_F, Stress0) # Plot reference lines ax.plot_wireframe(vis.refLine1x, vis.refLine1y, vis.refLine1z, color = 'black', label='$\sigma$ Axis') ax.plot_wireframe(vis.refLine2x, vis.refLine2y, vis.refLine2z, color = 'black') ax.plot_wireframe(vis.refLine3x, vis.refLine3y, vis.refLine3z, color = 'black') # Plot axis and lines ax.plot_wireframe(vis.X_hydro, vis.Y_hydro, vis.Z_hydro, color='red', label = 'Hydrostatic Axis') #Plots hydrostatic axis ax.plot_wireframe(vis.X_F0, vis.Y_F0, vis.Z_F0, color='orange', label = 'Unloading Axis') #Plots axes that goes through last unloading point, F0 ax.plot_wireframe(vis.X_alpha, vis.Y_alpha, vis.Z_alpha, color='grey', label = 'alpha') # Plot cylinders ax.plot_surface(vis.X_F, vis.Y_F, vis.Z_F, color='blue') #Plots yield surface, F ax.plot_surface(vis.X_BKOne, vis.Y_BKOne, vis.Z_BKOne, color='yellow') ax.plot_surface(vis.X_BKTwo, vis.Y_BKTwo, vis.Z_BKTwo, color='orange') ax.plot_surface(vis.X_BKThree, vis.Y_BKThree, vis.Z_BKThree, color='red') ax.plot_surface(vis.X_BKFour, vis.Y_BKFour, vis.Z_BKFour, color='purple') ax.plot_wireframe(vis.X, vis.Y, vis.Z, color='black', label = 'Bounding Surface') #Plots bounding surface, B #ax.plot_surface(X2, Y2, Z2, color='blue') #ax.plot_surface(X3, Y3, Z3, color='blue') #plt.xlabel('$\sigma_1$') #plt.ylabel('$\sigma_2$') #plt.zlabel('\sigma_3') #plt.legend(bbox_to_anchor=(.75,1), loc="lower right") # Legend outside plot #plt.legend() #plt.tight_layout() #Make the panes transparent ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) #Make the grid lines transparent ax.set_axis_off() ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0) ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0) ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0) ax.set_title('Model in $\sigma$ Space', fontdict=None, loc='center', pad=None) plt.tight_layout() plt.show() #---------------------------- # Initialize plot draw(R_B,R_F,Stress0) def update(val): R_B = sB.val R_F = sF.val Stress0 = np.array([sS1.val, sS2.val, sS3.val]) ax.clear() draw(R_B, R_F, Stress0) fig.canvas.draw_idle() return R_B, R_F, Stress0 def viewDev(self): #Set view to deviatoric space ax.clear() R_B = sB.val R_F = sF.val Stress0 = np.array([sS1.val, sS2.val, sS3.val]) draw(R_B, R_F, Stress0) ax.view_init(azim=45., elev=38) plt.show() def viewX(self): #Set view to deviatoric space ax.clear() R_B = sB.val R_F = sF.val Stress0 = np.array([sS1.val, sS2.val, sS3.val]) draw(R_B, R_F, Stress0) ax.view_init(azim=0, elev=0) plt.show() sB.on_changed(update) sF.on_changed(update) sS1.on_changed(update) sS2.on_changed(update) sS3.on_changed(update) #axprev = plt.axes([0.7, 0.05, 0.1, 0.075]) axDev = plt.axes([0.1, 0.1, 0.1, 0.05]) bDev = Button(axDev, '$\Pi$-plane') bDev.label.set_fontsize(10) bDev.on_clicked(viewDev) axX = plt.axes([0.1, 0.15, 0.1, 0.05]) bX = Button(axX, 'X-View') bX.label.set_fontsize(10) bX.on_clicked(viewX) #bprev = Button(axprev, 'Previous') #bprev.on_clicked(callback.prev) plt.show() def drawDeviatoricSurface_function(Stress0, CurStress, Su): import numpy as np from scipy.linalg import norm def GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, SType): # Mc = max(q/p) in compresion # Me = max(q/p) in extension # SType = type of surface # aRatio = MMe/MMc # theta = Lode angle lode = np.linspace(0, 2*np.pi, 2*np.pi/0.02) c = Me / Mc aSurf = np.zeros((len(lode),3)) for i in range(0,len(lode)): lodeMap = GetLodeMapping(lode[i]) if SType == 'MC': q = MohrCoulomb(Mc, c, lodeMap) elif SType == 'MN': q = MatsuokaNakai(Mc, c, lodeMap) elif SType == 'WB': q = Mc * g(lodeMap, c) elif SType == 'VM': q = VonMises(Mc, c, lodeMap) elif SType == 'B': q = Borjas(Mc, c, lodeMap) elif SType == 'BK': q = BorjasKappa(Mc, c, VMSurf, kappa) else: q = 1.0 if SType != 'BK': # For MC, MN, WB, VM aSurf[i,0] = 2/3 * q * np.cos(lode[i]) aSurf[i,1] = 2/3 * q * np.cos(lode[i] - 2*np.pi/3) aSurf[i,2] = 2/3 * q * np.cos(lode[i] - 4*np.pi/3) if SType == 'B': # For B aSurf[i,0] = np.sqrt(3/2)*(aSurf[i,0]) + (princ_dev_Stress0[0]) aSurf[i,1] = np.sqrt(3/2)*(aSurf[i,1]) + (princ_dev_Stress0[1]) aSurf[i,2] = np.sqrt(3/2)*(aSurf[i,2]) + (princ_dev_Stress0[2]) elif SType == 'BK': # For BK small = 1e-10 norm_center = (kappa * norm_dev_Stress0)/(kappa + 1) if norm_dev_Stress0 < small: rat = 0 else: rat = norm_center / norm_dev_Stress0 center = rat * princ_dev_Stress0 aSurf[i,0] = 2/3 * q * np.cos(lode[i]) aSurf[i,1] = 2/3 * q * np.cos(lode[i] - 2*np.pi/3) aSurf[i,2] = 2/3 * q * np.cos(lode[i] - 4*np.pi/3) aSurf[i,0] = np.sqrt(3/2)*(aSurf[i,0]) + (center[0]) aSurf[i,1] = np.sqrt(3/2)*(aSurf[i,1]) + (center[1]) aSurf[i,2] = np.sqrt(3/2)*(aSurf[i,2]) + (center[2]) return aSurf #******************************************************************************************************** def GetLodeMapping(theta): if theta >=0 and theta < np.pi/3: theRegion = 1 elif theta >= np.pi/3 and theta < 2*np.pi/3: theRegion = 2 elif theta >=2*np.pi/3 and theta < np.pi: theRegion = 3 elif theta >= np.pi and theta < 4*np.pi/3: theRegion = 4 elif theta >=4*np.pi/3 and theta < 5*np.pi/3: theRegion = 5 elif theta >=5*np.pi/3 and theta <= 2*np.pi: theRegion = 6 else: theRegion = 7 #Pseudo-switch switch_region = { 1: theta, 2: (2*np.pi/3 - theta), 3: (theta - 2*np.pi/3), 4: (4*np.pi/3 - theta), 5: (theta - 4*np.pi/3), 6: (6*np.pi/3 -theta) } lodemap = switch_region.get(theRegion, 'Check GetLodeMapping function') return lodemap #******************************************************************************************************** def g(theta, c): term1 = 4.0 * (1.0 - c*c) * np.cos(theta - np.pi/3.0) * np.cos(theta - np.pi/3.0) + 5.0 * c * c - 4.0 * c aNumer = 2.0 * (1.0 - c*c) * np.cos(theta - np.pi/3.0) + (2.0*c - 1.0) * np.sqrt(term1) aDeno = 4.0 * (1.0 - c*c) * np.cos(theta - np.pi/3.0) * np.cos(theta - np.pi/3.0) + (1.0 - 2.0*c) * (1.0 - 2.0*c) result = aNumer / aDeno return result #******************************************************************************************************** def MohrCoulomb(MM, aRatio, theta): # MM = q/p slope in compresion # aRatio = MMe/MMc # theta = Lode angle c=0 p=1.0 # phi = asin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/pi #phi = ((np.arcsin(( np.divide(np.multiply( np.sqrt(3)*MM, np.sin(0+np.pi/3) ), (np.multiply( 3*1+MM, (np.cos(0+np.pi/3) )) )) )))).max()*180/np.pi phi = ((np.arcsin((np.sqrt(3)*MM*np.sin(0+np.pi/3))/(3*1+MM*(np.cos(0+np.pi/3)))))).max()*180/np.pi # term1 = 0.5*(sqrt(3)*(1+sin(phi*pi/180.0))*sin(theta)+(3-sin(phi*pi/180.0))*cos(theta)) # result = 3.0*(c * cos(phi*pi/180.0)+p*sin(phi*pi/180.0))/term1*sqrt(3.0/2.0) result = 3*(p*np.sin(phi*np.pi/180)/(np.sqrt(3)*np.sin(theta+np.pi/3)-np.cos(theta+np.pi/3)*np.sin(phi*np.pi/180))) return result #******************************************************************************************************** def MatsuokaNakai(MM, aRatio, theta): # MM = q/p slope in compresion # aRatio = MMe/MMc # theta = Lode angle c=0 p=1.0 # phi = asin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/pi; phi = ((np.arcsin((np.sqrt(3)*MM*np.sin(0+np.pi/3))/(3*1+MM*(np.cos(0+np.pi/3)))))).max()*180/np.pi eta=2*np.sin(phi*np.pi/180)/np.sqrt(4 - np.cos(phi*np.pi/180)**2) xi=np.sin(phi*np.pi/180)*(np.cos(phi*np.pi/180)**2 + 8)/np.sqrt((4 - np.cos(phi*np.pi/180)**2)**3) g=2*np.sqrt(3)*np.cos(np.arccos(xi*(-np.cos(3*theta)))/3) result = 3*np.sqrt(3)*eta*p/g return result #******************************************************************************************************** def M_MatsuokaNakai(MM, aRatio, lode): # MM = q/p slope in compresion # aRatio = MMe/MMc # theta = Lode angle c=0 p=1.0 # phi = asin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/pi; phi = max((np.arcsin((np.sqrt(3)*MM*np.sin(0+np.pi/3))/(3*1+MM*(np.cos(0+np.pi/3))))))*180/np.pi eta=2*np.sin(phi*np.pi/180)/np.sqrt(4-np.cos(phi*np.pi/180)**2) xi=np.sin(phi*np.pi/180)*(np.cos(phi*np.pi/180)**2+8)/np.sqrt((4-np.cos(phi*np.pi/180)**2)**3) for i in range(0,len(lode)): lodeMap = GetLodeMapping(lode[i]) g=2*np.sqrt(3)*np.cos(np.arccos(xi*(-np.cos(3*lode[i])))/3) result = 3*np.sqrt(3)*eta/g return result #******************************************************************************************************** def VonMises(MM, aRatio, theta): #Creates circle of radius MM around the origin R = MM result = R return result #******************************************************************************************************** def Borjas(MM, aRatio, theta): #Determines the yield function radius and constructs a circle around point of last unloading result = eucli return result #******************************************************************************************************** # Output kappa contour def BorjasKappa(MM, aRatio, theta, frac): # kappa defined outside of function small = 1e-10 RR = MM norm_dev_Stress0 = float(normS(princ_dev_Stress0)) norm_center = (kappa * norm_dev_Stress0)/(kappa + 1) if norm_dev_Stress0 < small: rat = 0 else: rat = norm_center / norm_dev_Stress0 center = rat * princ_dev_Stress0 north = ((RR - kappa*norm_dev_Stress0)/(kappa + 1)) south = -((RR + kappa*norm_dev_Stress0)/(kappa + 1)) rad = np.abs((north - south)/2) return rad #******************************************************************************************************** #Mc being used as a proxy for R Mc = ((8/3)**0.5)*Su Me = Mc * 0.65 aRatio=Me/Mc theta0=0 phi = np.arcsin(3.0*(1.0-aRatio)/(1+aRatio))*180.0/np.pi phi2 = ((np.arcsin((np.sqrt(3)*Mc*np.sin(theta0+np.pi/3))/(3*1+Mc*(np.cos(theta0+np.pi/3))))))*180/np.pi #====================================== # Project vectors (6x1, 3x1) onto flattened deviatoric plane # U_{proj-plane} = U - ((dot(U,n))/||n||^2)n # U is arbitrary vector, n is vector normal to plane to project on # dev() will automatically perform this projection #====================================== if CurStress.shape == (6,1) or CurStress.shape == (6,) or CurStress.shape == (1,6): Stress0 = Stress0.reshape(6,1) CurStress = CurStress.reshape(6,1) dev_Stress0 = dev(Stress0) dev_CurStress = dev(CurStress) norm_dev_Stress0 = normS(dev_Stress0) norm_dev_CurStress = normS(dev_CurStress) #Solve eigenvalues for normal principal stress p1,p2,p3 = princVal(dev_Stress0,0) princ_dev_Stress0 = np.array([p1,p2,p3]) p1,p2,p3 = princVal(dev_CurStress,0) princ_dev_CurStress = np.array([p1,p2,p3]) elif CurStress.shape == (3,1) or CurStress.shape == (3,) or CurStress.shape == (1,3): Stress0 = Stress0.reshape(3,1) CurStress = CurStress.reshape(3,1) dev_Stress0 = dev(Stress0).reshape(3,1) dev_CurStress = dev(CurStress).reshape(3,1) norm_dev_Stress0 = normS(dev_Stress0) norm_dev_CurStress = normS(dev_CurStress) princ_dev_Stress0 = dev_Stress0 princ_dev_CurStress = dev_CurStress #Euclidian distance eucli = ((princ_dev_CurStress[2]-princ_dev_Stress0[2])**2 + (princ_dev_CurStress[1]-princ_dev_Stress0[1])**2 + (princ_dev_CurStress[0]-princ_dev_Stress0[0])**2)**0.5 #Get projected constitutive shapes (MC, MN , WB, VM, B) #MCSurf = GetSurfaceInfo(Mc, Me, proj_dev_Stress0, proj_dev_CurStress, 'MC') #MNSurf = GetSurfaceInfo(Mc, Me, proj_dev_Stress0, proj_dev_CurStress, 'MN') #K1=MNSurf[:,0]*3/2 #WBSurf = GetSurfaceInfo(Mc, Me, proj_dev_Stress0, proj_dev_CurStress, 'WB') #MCSurf5 = GetSurfaceInfo(Mc, Me, proj_Stress0, proj_CurStress, 'B') # Create bounding and yield surface BSurf = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'B') proj_R = np.sqrt(3/2)*Mc VMSurf = GetSurfaceInfo(proj_R, Me, princ_dev_Stress0, princ_dev_CurStress, 'VM') # Create contours of equal \kappa value kappa = 5 BKSurfOne = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK') kappa = 2 BKSurfTwo = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK') kappa = 1 BKSurfThree = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK') kappa = 0.2 BKSurfFour = GetSurfaceInfo(Mc, Me, princ_dev_Stress0, princ_dev_CurStress, 'BK') # Lines for the hydrostatic axis and reference axis hydroLinex = np.array([0,proj_R]); hydroLiney = np.array([0,proj_R]); hydroLinez = np.array([0,proj_R]).reshape(1,2) refLine1x = np.array([0, proj_R]); refLine1y = np.array([0, 0]); refLine1z = np.array([0, 0]).reshape(1,2); refLine2x = np.array([0, 0]); refLine2y = np.array([0, proj_R]); refLine2z = np.array([0, 0]).reshape(1,2); refLine3x = np.array([0, 0]); refLine3y = np.array([0, 0]); refLine3z = np.array([0, proj_R]).reshape(1,2); # Projected lines for last unloading point (alpha) and current stress point (active), and connector (zeta) dev_alphaLinex = np.array([0,float(princ_dev_Stress0[2])]); dev_alphaLiney = np.array([0,float(princ_dev_Stress0[1])]); dev_alphaLinez = np.array([0,float(princ_dev_Stress0[0])]).reshape(1,2) dev_stressLinex = np.array([0,float(princ_dev_CurStress[2])]); dev_stressLiney = np.array([0,float(princ_dev_CurStress[1])]); dev_stressLinez = np.array([0,float(princ_dev_CurStress[0])]).reshape(1,2) ## AKA \zeta' = \sigma' - alpha dev_zetaLinex = np.array([float(princ_dev_Stress0[2]),float(princ_dev_CurStress[2])]); dev_zetaLiney = np.array([float(princ_dev_Stress0[1]),float(princ_dev_CurStress[1])]); dev_zetaLinez = np.array([float(princ_dev_Stress0[0]),float(princ_dev_CurStress[0])]).reshape(1,2) # Draw from stress point to projection plane dev_alphaAxisx = np.array([princ_dev_Stress0[2],princ_dev_Stress0[2]+Mc/4]); dev_alphaAxisy = np.array([princ_dev_Stress0[1],princ_dev_Stress0[1]+Mc/4]); dev_alphaAxisz = np.array([princ_dev_Stress0[0],princ_dev_Stress0[0]+Mc/4]).reshape(1,2) dev_stressAxisx = np.array([princ_dev_CurStress[2],princ_dev_CurStress[2]+Mc/4]); dev_stressAxisy = np.array([princ_dev_CurStress[1],princ_dev_CurStress[1]+Mc/4]); dev_stressAxisz = np.array([princ_dev_CurStress[0],princ_dev_CurStress[0]+Mc/4]).reshape(1,2) # Store all wireframes in a namedtuple, similar to class structure # Makes it convenient to access when calling this function result = namedtuple('result', ['VMSurf','BSurf', 'BKSurfOne', 'BKSurfTwo', 'BKSurfThree', 'BKSurfFour', 'hydroLinex', 'hydroLiney', 'hydroLinez', 'refLine1x', 'refLine1y', 'refLine1z', 'refLine2x', 'refLine2y', 'refLine2z', 'refLine3x', 'refLine3y', 'refLine3z', 'dev_alphaLinex', 'dev_alphaLiney', 'dev_alphaLinez', 'dev_stressAxisx', 'dev_stressAxisy', 'dev_stressAxisz', 'dev_alphaAxisx', 'dev_alphaAxisy', 'dev_alphaAxisz', 'dev_stressLinex', 'dev_stressLiney', 'dev_stressLinez', 'dev_zetaLinex', 'dev_zetaLiney', 'dev_zetaLinez']) result = result(VMSurf,BSurf,BKSurfOne,BKSurfTwo, BKSurfThree, BKSurfFour, hydroLinex, hydroLiney, hydroLinez, refLine1x, refLine1y, refLine1z, refLine2x, refLine2y, refLine2z, refLine3x, refLine3y, refLine3z, dev_alphaLinex, dev_alphaLiney, dev_alphaLinez, dev_stressAxisx, dev_stressAxisy, dev_stressAxisz, dev_alphaAxisx, dev_alphaAxisy, dev_alphaAxisz, dev_stressLinex, dev_stressLiney, dev_stressLinez, dev_zetaLinex, dev_zetaLiney, dev_zetaLinez) return result def visHardening(): #import numpy as np %matplotlib notebook %matplotlib notebook import matplotlib.pyplot as plt from matplotlib.widgets import Slider, Button, RadioButtons R = 1 G = 7 hh = G mm = 1.1 A = 0.9 theta = 0.5 nPoints = 501 x = np.linspace(-R,R,nPoints) # -----Initial Setting----- R0 = 1 S0 = R0/2 kappa = [] for xx in x: if xx >= S0: result = float(np.abs((R0 - xx)/(xx - S0))) kappa.append(result) else: result = float(np.abs((R0 + xx)/(xx - S0))) kappa.append(result) #theta = [] expH, hypH, davH = [], [], [] for k in kappa: expH.append( hh * k ** mm ) hypH.append( 3*G * (k**2)/(1 + 2*k) ) #theta.append(1/k) davH.append( 3*G * ((1 + theta)/(1 + theta + 1)*(1 + 1/theta)**A) - 1 ) # ------------------------- fig, ax = plt.subplots() plt.subplots_adjust(left=0.25, bottom=0.25) #t = np.arange(0.0, 1.0, 0.001) #a0 = 5 l, = plt.plot(x, kappa, lw=2, label='$\kappa') e, = plt.plot(x, expH, lw=2, label='$Exponential H\'$') h, = plt.plot(x, hypH, lw=2, label='$Hyperbolic H\'$') d, = plt.plot(x, davH, lw=2, label='$Davidenkov H\'$') ax.margins(x=0) ax.set_yscale('log') ax.set_xticks((-R,-R/2,0,R/2,R),('-R','-R/2','0','R/2','R')) #ax.set_xticklabels(('-R','-R/2','0','R/2','R')) axcolor = 'lightgoldenrodyellow' axS = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor) axR = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor) sStress0 = Slider(axS, '$\sigma_0$', -R, R, valinit=S0) sR = Slider(axR, 'R', 0.01, 10.0, valinit=R0) def update(val): Stress0 = sStress0.val R = sR.val #ax.set_xticks((-R,-R/2,0,R/2,R),('-R','-R/2','0','R/2','R')) #ax.set_xticklabels(('-R','-R/2','0','R/2','R')) x = np.linspace(-R,R,nPoints) kappa = [] for xx in x: if xx >= Stress0: result = float(np.abs((R - xx)/(xx - Stress0))) kappa.append(result) else: result = float(np.abs((R + xx)/(xx - Stress0))) kappa.append(result) #theta = [] expH, hypH, davH = [], [], [] for k in kappa: expH.append( hh * k ** mm ) hypH.append( 3*G * (k**2)/(1 + 2*k) ) #theta.append(1/k) davH.append( 3*G * ((1 + theta)/(1 + theta + 1)*(1 + 1/theta)**A) - 1 ) l.set_ydata(kappa); l.set_xdata(x) e.set_ydata(expH); e.set_xdata(x) h.set_ydata(hypH); h.set_xdata(x) d.set_ydata(davH); d.set_xdata(x) ax.set_xlim((-R,R)) fig.canvas.draw_idle() sStress0.on_changed(update) sR.on_changed(update) resetax = plt.axes([0.8, 0.025, 0.1, 0.04]) button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975') def reset(event): sStress0.reset() sR.reset() button.on_clicked(reset) #rax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor) #radio = RadioButtons(rax, ('sand', 'clay', active=0) #def colorfunc(label): # l.set_color(label) # fig.canvas.draw_idle() #radio.on_clicked(colorfunc) plt.show() ```
github_jupyter
Branching GP Regression on hematopoietic data -- *Alexis Boukouvalas, 2017* **Note:** this notebook is automatically generated by [Jupytext](https://jupytext.readthedocs.io/en/latest/index.html), see the README for instructions on working with it. test change Branching GP regression with Gaussian noise on the hematopoiesis data described in the paper "BGP: Gaussian processes for identifying branching dynamics in single cell data". This notebook shows how to build a BGP model and plot the posterior model fit and posterior branching times. ``` import time import numpy as np import pandas as pd from matplotlib import pyplot as plt import BranchedGP plt.style.use("ggplot") %matplotlib inline ``` ### Read the hematopoiesis data. This has been simplified to a small subset of 23 genes found to be branching. We have also performed Monocle2 (version 2.1) - DDRTree on this data. The results loaded include the Monocle estimated pseudotime, branching assignment (state) and the DDRTree latent dimensions. ``` Y = pd.read_csv("singlecelldata/hematoData.csv", index_col=[0]) monocle = pd.read_csv("singlecelldata/hematoMonocle.csv", index_col=[0]) Y.head() monocle.head() # Plot Monocle DDRTree space genelist = ["FLT3", "KLF1", "MPO"] f, ax = plt.subplots(1, len(genelist), figsize=(10, 5), sharex=True, sharey=True) for ig, g in enumerate(genelist): y = Y[g].values yt = np.log(1 + y / y.max()) yt = yt / yt.max() h = ax[ig].scatter( monocle["DDRTreeDim1"], monocle["DDRTreeDim2"], c=yt, s=50, alpha=1.0, vmin=0, vmax=1, ) ax[ig].set_title(g) def PlotGene(label, X, Y, s=3, alpha=1.0, ax=None): fig = None if ax is None: fig, ax = plt.subplots(1, 1, figsize=(5, 5)) for li in np.unique(label): idxN = (label == li).flatten() ax.scatter(X[idxN], Y[idxN], s=s, alpha=alpha, label=int(np.round(li))) return fig, ax ``` ### Fit BGP model Notice the cell assignment uncertainty is higher for cells close to the branching point. ``` def FitGene(g, ns=20): # for quick results subsample data t = time.time() Bsearch = list(np.linspace(0.05, 0.95, 5)) + [ 1.1 ] # set of candidate branching points GPy = (Y[g].iloc[::ns].values - Y[g].iloc[::ns].values.mean())[ :, None ] # remove mean from gene expression data GPt = monocle["StretchedPseudotime"].values[::ns] globalBranching = monocle["State"].values[::ns].astype(int) d = BranchedGP.FitBranchingModel.FitModel(Bsearch, GPt, GPy, globalBranching) print(g, "BGP inference completed in %.1f seconds." % (time.time() - t)) # plot BGP fig, ax = BranchedGP.VBHelperFunctions.PlotBGPFit( GPy, GPt, Bsearch, d, figsize=(10, 10) ) # overplot data f, a = PlotGene( monocle["State"].values, monocle["StretchedPseudotime"].values, Y[g].values - Y[g].iloc[::ns].values.mean(), ax=ax[0], s=10, alpha=0.5, ) # Calculate Bayes factor of branching vs non-branching bf = BranchedGP.VBHelperFunctions.CalculateBranchingEvidence(d)["logBayesFactor"] fig.suptitle("%s log Bayes factor of branching %.1f" % (g, bf)) return d, fig, ax d, fig, ax = FitGene("MPO") d_c, fig_c, ax_c = FitGene("CTSG") ```
github_jupyter
### A Jupyter Notebook exploring the Scipy.Stats module for Python. [scipy.stats offfical](https://docs.scipy.org/doc/scipy/reference/stats.html) The Scipy.Stats module for Python offers a wide array of probability distributions, summary and frequency statistics, correlation functions and statistical tests, masked statistics, kernel density estimation, quasi-Monte Carlo functionality, and more. Since statistics is such a large discipline and covers many areas, there are other Python modules for areas such as machine learning, classification, regression, model selection and so on.<br> One particular area of interest for the purpose of this demonstration is statistical testing. #### ANOVA Testing One-way analysis of variance (ANOVA) testing is performed on 2 or more independent groups to determine if there are any statistically significant differences between the means of the groups. The test is generally performed on three or more groups with a t-test being performed when there are two groups, there are three independent variables in this example so a one-way ANOVA will be performed. [Laerd Statistics](https://statistics.laerd.com/spss-tutorials/one-way-anova-using-spss-statistics.php)<br> #### Assumptions As part of the one-way ANOVA process, the data must be checked against 6 assumptions to ensure that the data can actually be analysed using a one-way ANOVA. Each of the 6 assumptions will be explored further in this notebook. *** Import Python modules ``` # import modules # numerical operations import numpy as np # general plotting import matplotlib.pyplot as plt # data frames import pandas as pd # statistical operations import scipy.stats as ss # statistical plots import seaborn as sns ``` #### Example One-Way ANOVA: Golf Ball driving distance dataset *** Premium golf ball manufacturers are constantly looking at ways to develop and improve their golf balls for both professional and amateur players. One attribute of a premium golf ball that is extremely important to professional and amateur golfers alike is the distance the ball travels particular with a driver. In this example one specific golf ball manufacturer is testing to see that there is no significant change in driving distance between the current golf ball design and the new golf ball design, a prototype model is also being tested for a future release. * **Null Hypothesis** (desired outcome) - The change in golf ball design has no effect on driving distance (mean of the current and new ball (and prototype) are almost the same) <br><br> * **Alternative Hypothesis** - The change in golf ball design has a significant effect on driving distance (mean of the current and new (and prototype) ball are significantly different). ``` # read in the dataset df = pd.read_csv('https://raw.githubusercontent.com/killfoley/ML-and-Stats/main/data/golf_ball.csv') df ``` <br> #### Assumption 1 - The dependent variable should be measured at the interval or ratio level (in this case metres probably actually yards) *** ``` # dependent variable v_dep = df['distance'] v_dep # describe the data v_dep.describe() ``` <br> #### Assumption 2 - The independent variable should consist of two or more categorical, independent groups. *** ``` # independent variabl v_indep = df['ball'] v_indep ``` Note: There are three independent categories 'current', 'new', and 'prototype' <br> **Assumption 3** - There should be independence of observations, which means that there is no relationship between the observations in each group or between the groups themselves.<br> <br> This cannot really be shown using the data, it is more of an experiment design issue. Considering the nature of this study it is safe to say that the three different types of golf ball are totally independent of each other so assumption 3 is satisfied in this case. *** <br> **Assumption 4** - There should be no significant outliers. *** Data visualisation is a very effective way to identify any outliers [Medium: detecting outliers](https://medium.com/analytics-vidhya/outliers-in-data-and-ways-to-detect-them-1c3a5f2c6b1e). Outliers are single data points that do not follow the general trend of the rest of the data. For this a box plot will be used from the Seaborn package [Seaborn: Boxplot](https://seaborn.pydata.org/generated/seaborn.boxplot.html). ``` # Boxplot of variables sns.boxplot(x=v_dep, y=v_indep) ``` There appear to be no outliers in this dataset. *** <br> **Assumption 5** - The dependent variable should be approximately normally distributed for each category of the independent variable. Testing for normality is performed using the Shapiro-Wilk test of normality. [Scipy.Stats](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.shapiro.html) *** ``` # Get the distance values for the current golf ball using pandas query x_current = df.query('ball=="current"')['distance'] x_current # get the distance values for the new ball x_new = df.query('ball=="new"')['distance'] x_new # get the distance values for the new ball x_proto = df.query('ball=="prototype"')['distance'] x_proto # Perform shapiro test on both sets of data shapiro_test_current = ss.shapiro(x_current) shapiro_test_current # Peform for new golf ball shapiro_test_new = ss.shapiro(x_new) shapiro_test_new # Peform for prototype golf ball shapiro_test_proto = ss.shapiro(x_proto) shapiro_test_proto ``` All three datasets have a high probability of being normally distributed with p values significantly > 0.05 *** **Data Visualisation Plot** A distribution plot from Seaborn is one of the most effective ways to display datasets. Display each of the datasets with the kernel density estimation. This is a nice way of visualising the probability distribution of variables together. ``` # KDEs of the three types of golf ball. sns.displot(x=v_dep, hue=v_indep, kind="kde") ``` The plot above shows each of the variables. Each one is observed to display a decent normal distribution *** <br> **Assumption 6** - there needs to be homogeneity of variances. This can be tested using Levene test in scipy.stats. [Scipy.Stats.Levene](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.levene.html) *** ``` # test the 3 variables for homegeneity of variance stat, p = ss.levene(x_current, x_new, x_proto) print(f"Levene test values: Stat={stat}, pvalue={p}") ``` Since the p value is greater than 0.05 it can be accepted that the 3 variables display close enough to equal variance to proceed with the onw-way ANOVA. *** ### One-Way ANOVA Since the data have satisfied the 6 assumptions required to perform a One-Way ANOVA, the test can now be performed on the data. ``` # One-way ANOVA scipy.stats ss.f_oneway( x_current, x_new, x_proto) ``` ### Interpretting the results To determine whether any of the differences between the means are statistically significant, a p-value > 0.05 would be required to accept the null hypothesis. In this case the p value of 0.158 means that there is no significant difference in mean distance between each golf ball and the manufacturer can proceed with the release of the new golf ball. <br><br> Display each of the golf ball mean distances using Numpy. ``` print(f"Mean Current: {np.mean(x_current)}, Mean New: {np.mean(x_new)}, Mean Prototype: {np.mean(x_proto)}") ``` **Conclusion** The mean distance of each golf ball does not differ significantly *** #### END
github_jupyter
``` %matplotlib inline import itertools import os os.environ['CUDA_VISIBLE_DEVICES']="" import numpy as np import gpflow import gpflow.training.monitor as mon import numbers import matplotlib.pyplot as plt import tensorflow as tf ``` # Demo: `gpflow.training.monitor` In this notebook we'll demo how to use `gpflow.training.monitor` for logging the optimisation of a GPflow model. ## Creating the GPflow model We first generate some random data and create a GPflow model. Under the hood, GPflow gives a unique name to each model which is used to name the Variables it creates in the TensorFlow graph containing a random identifier. This is useful in interactive sessions, where people may create a few models, to prevent variables with the same name conflicting. However, when loading the model, we need to make sure that the names of all the variables are exactly the same as in the checkpoint. This is why we pass name="SVGP" to the model constructor, and why we use gpflow.defer_build(). ``` np.random.seed(0) X = np.random.rand(10000, 1) * 10 Y = np.sin(X) + np.random.randn(*X.shape) Xt = np.random.rand(10000, 1) * 10 Yt = np.sin(Xt) + np.random.randn(*Xt.shape) with gpflow.defer_build(): m = gpflow.models.SVGP(X, Y, gpflow.kernels.RBF(1), gpflow.likelihoods.Gaussian(), Z=np.linspace(0, 10, 5)[:, None], minibatch_size=100, name="SVGP") m.likelihood.variance = 0.01 m.compile() ``` Let's compute log likelihood before the optimisation ``` print('LML before the optimisation: %f' % m.compute_log_likelihood()) ``` We will be using a TensorFlow optimiser. All TensorFlow optimisers have a support for `global_step` variable. Its purpose is to track how many optimisation steps have occurred. It is useful to keep this in a TensorFlow variable as this allows it to be restored together with all the parameters of the model. The code below creates this variable using a monitor's helper function. It is important to create it before building the monitor in case the monitor includes a checkpoint task. This is because the checkpoint internally uses the TensorFlow Saver which creates a list of variables to save. Therefore all variables expected to be saved by the checkpoint task should exist by the time the task is created. ``` session = m.enquire_session() global_step = mon.create_global_step(session) ``` ## Construct the monitor Next we need to construct the monitor. `gpflow.training.monitor` provides classes that are building blocks for the monitor. Essengially, a monitor is a function that is provided as a callback to an optimiser. It consists of a number of tasks that may be executed at each step, subject to their running condition. In this example, we want to: - log certain scalar parameters in TensorBoard, - log the full optimisation objective (log marginal likelihood bound) periodically, even though we optimise with minibatches, - store a backup of the optimisation process periodically, - log performance for a test set periodically. We will define these tasks as follows: ``` print_task = mon.PrintTimingsTask().with_name('print')\ .with_condition(mon.PeriodicIterationCondition(10))\ .with_exit_condition(True) sleep_task = mon.SleepTask(0.01).with_name('sleep').with_name('sleep') saver_task = mon.CheckpointTask('./monitor-saves').with_name('saver')\ .with_condition(mon.PeriodicIterationCondition(10))\ .with_exit_condition(True) file_writer = mon.LogdirWriter('./model-tensorboard') model_tboard_task = mon.ModelToTensorBoardTask(file_writer, m).with_name('model_tboard')\ .with_condition(mon.PeriodicIterationCondition(10))\ .with_exit_condition(True) lml_tboard_task = mon.LmlToTensorBoardTask(file_writer, m).with_name('lml_tboard')\ .with_condition(mon.PeriodicIterationCondition(100))\ .with_exit_condition(True) ``` As the above code shows, each task can be assigned a name and running conditions. The name will be shown in the task timing summary. There are two different types of running conditions: `with_condition` controls execution of the task at each iteration in the optimisation loop. `with_exit_condition` is a simple boolean flag indicating that the task should also run at the end of optimisation. In this example we want to run our tasks periodically, at every iteration or every 10th or 100th iteration. Notice that the two TensorBoard tasks will write events into the same file. It is possible to share a file writer between multiple tasks. However it is not possible to share the same event location between multiple file writers. An attempt to open two writers with the same location will result in error. ## Custom tasks We may also want to perfom certain tasks that do not have pre-defined `Task` classes. For example, we may want to compute the performance on a test set. Here we create such a class by extending `BaseTensorBoardTask` to log the testing benchmarks in addition to all the scalar parameters. ``` class CustomTensorBoardTask(mon.BaseTensorBoardTask): def __init__(self, file_writer, model, Xt, Yt): super().__init__(file_writer, model) self.Xt = Xt self.Yt = Yt self._full_test_err = tf.placeholder(gpflow.settings.tf_float, shape=()) self._full_test_nlpp = tf.placeholder(gpflow.settings.tf_float, shape=()) self._summary = tf.summary.merge([tf.summary.scalar("test_rmse", self._full_test_err), tf.summary.scalar("test_nlpp", self._full_test_nlpp)]) def run(self, context: mon.MonitorContext, *args, **kwargs) -> None: minibatch_size = 100 preds = np.vstack([self.model.predict_y(Xt[mb * minibatch_size:(mb + 1) * minibatch_size, :])[0] for mb in range(-(-len(Xt) // minibatch_size))]) test_err = np.mean((Yt - preds) ** 2.0)**0.5 self._eval_summary(context, {self._full_test_err: test_err, self._full_test_nlpp: 0.0}) custom_tboard_task = CustomTensorBoardTask(file_writer, m, Xt, Yt).with_name('custom_tboard')\ .with_condition(mon.PeriodicIterationCondition(100))\ .with_exit_condition(True) ``` Now we can put all these tasks into a monitor. ``` monitor_tasks = [print_task, model_tboard_task, lml_tboard_task, custom_tboard_task, saver_task, sleep_task] monitor = mon.Monitor(monitor_tasks, session, global_step) ``` ## Running the optimisation We finally get to running the optimisation. We may want to continue a previously run optimisation by resotring the TensorFlow graph from the latest checkpoint. Otherwise skip this step. ``` if os.path.isdir('./monitor-saves'): mon.restore_session(session, './monitor-saves') optimiser = gpflow.train.AdamOptimizer(0.01) with mon.Monitor(monitor_tasks, session, global_step, print_summary=True) as monitor: optimiser.minimize(m, step_callback=monitor, maxiter=450, global_step=global_step) file_writer.close() ``` Now lets compute the log likelihood again. Hopefully we will see an increase in its value ``` print('LML after the optimisation: %f' % m.compute_log_likelihood()) ```
github_jupyter
``` from resources.workspace import * %matplotlib inline ``` ## Dynamical systems are systems (sets of equations) whose variables evolve in time (the equations contains time derivatives). As a branch of mathematics, its theory is mainly concerned with understanding the behaviour of solutions (trajectories) of the systems. ## Chaos is also known as the butterfly effect: "a buttefly that flaps its wings in Brazil can 'cause' a hurricane in Texas". As opposed to the opinions of Descartes/Newton/Laplace, chaos effectively means that even in a deterministic (non-stochastic) universe, we can only predict "so far" into the future. This will be illustrated below using two toy-model dynamical systems made by Edward Lorenz. --- ## The Lorenz (1963) attractor The [Lorenz-63 dynamical system](resources/DA_intro.pdf#page=22) can be derived as an extreme simplification of *Rayleigh-Bénard convection*: fluid circulation in a shallow layer of fluid uniformly heated (cooled) from below (above). This produces the following 3 *coupled* ordinary differential equations (ODE): $$ \begin{aligned} \dot{x} & = \sigma(y-x) \\ \dot{y} & = \rho x - y - xz \\ \dot{z} & = -\beta z + xy \end{aligned} $$ where the "dot" represents the time derivative, $\frac{d}{dt}$. The state vector is $\mathbf{x} = (x,y,z)$, and the parameters are typically set to ``` SIGMA = 10.0 BETA = 8/3 RHO = 28.0 ``` The ODEs can be coded as follows ``` def dxdt(xyz, t0, sigma, beta, rho): """Compute the time-derivative of the Lorenz-63 system.""" x, y, z = xyz return [ sigma * (y - x), x * (rho - z) - y, x * y - beta * z ] ``` #### Numerical integration to compute the trajectories Below is a function to numerically **integrate** the ODEs and **plot** the solutions. <!-- This function also takes arguments to control ($\sigma$, $\beta$, $\rho$) and of the numerical integration (`N`, `T`). --> ``` from scipy.integrate import odeint # integrator output_63 = [None] @interact( sigma=(0.,50), beta=(0.,5), rho=(0.,50), N=(0,50), eps=(0.01,1), T=(0.,30)) def animate_lorenz(sigma=SIGMA, beta=BETA, rho=RHO , N=2, eps=0.01, T=1.0): # Initial conditions: perturbations around some "proto" state seed(1) x0_proto = array([-6.1, 1.2, 32.5]) x0 = x0_proto + eps*randn((N, 3)) # Compute trajectories tt = linspace(0, T, int(100*T)+1) # Time sequence for trajectory dd = lambda x,t: dxdt(x,t, sigma,beta,rho) # Define dxdt(x,t) with fixed params. xx = array([odeint(dd, xn, tt) for xn in x0]) # Integrate # PLOTTING ax = plt.figure(figsize=(10,5)).add_subplot(111, projection='3d') ax.axis('off') colors = plt.cm.jet(linspace(0,1,N)) for i in range(N): ax.plot(*(xx[i,:,:].T),'-' ,c=colors[i]) #ax.scatter3D(*xx[i,0 ,:],s=20,c=colors[i],marker='<') ax.scatter3D(*xx[i,-1,:],s=40,c=colors[i]) output_63[0] = xx ``` **Exc 4.2**: * Move `T` (use your arrow keys). What does it control? * Set `T` to something small; move the sliders for `N` and `eps`. What do they control? * Visually investigate the system's (i.e. the trajectories') sensititivy to initial conditions by moving `T`, `N` and `eps`. Very roughtly, estimate its predictability (i.e. how far into the future one can forecasts for a fixed `eps` and a fixed skill level)? ### Averages **Exc 4.8*:** Slide `N` and `T` to their upper bounds. Execute the code cell below. It computes the average location of the $i$-th component of the state in two ways. Do you think the histograms actually approximate the same distribution? If so, then the system is called [ergodic](https://en.wikipedia.org/wiki/Ergodic_theory#Ergodic_theorems). In that case, does it matter if one computes statistics (over the system dynamics) by using several short experiment runs or one long run? ``` xx = output_63[0][:,:,0] # state component index 0 (must be 0,1,2) plt.hist(xx[:,-1] ,normed=1,label="ensemble dist.",alpha=1.0) # -1: last time plt.hist(xx[-1,:] ,normed=1,label="temporal dist.",alpha=0.5) # -1: last ensemble member #plt.hist(xx.ravel(),normed=1,label="total distribution",alpha=0.5) plt.legend(); ``` --- ## The "Lorenz-95" model The Lorenz-96 system is a "1D" model, designed to simulate atmospheric convection. Each state variable $\mathbf{x}_i$ can be considered some atmospheric quantity at grid point at a fixed lattitude of the earth. The system is given by the coupled set of ODEs, $$ \frac{d \mathbf{x}_i}{dt} = (\mathbf{x}_{i+1} − \mathbf{x}_{i-2}) \mathbf{x}_{i-1} − \mathbf{x}_i + F \, , \quad \quad i \in \{1,\ldots,m\} \, , $$ where the subscript indices apply periodically. This model is not derived from physics but has similar characterisics, such as <ul> <li> there is external forcing, determined by a parameter $F$;</li> <li> there is internal dissipation, emulated by the linear term;</li> <li> there is energy-conserving advection, emulated by quadratic terms.</li> </ul> [Further description](resources/DA_intro.pdf#page=23). **Exc 4.10:** Show that the "total energy" $\sum_{i=1}^{m} \mathbf{x}_i^2$ is preserved by the quadratic terms in the ODE. ``` show_answer("Hint: Lorenz energy") show_answer("Lorenz energy") ``` The model is animated below. ``` # For all i, any n: s(x,n) := x[i+n], circularly. def s(x,n): return np.roll(x,-n) output_95 = [None] def animate_lorenz_95(m=40,Force=8.0,eps=0.01,T=0): # Initial conditions: perturbations x0 = zeros(m) x0[0] = eps def dxdt(x,t): return (s(x,1)-s(x,-2))*s(x,-1) - x + Force tt = linspace(0, T, int(40*T)+1) xx = odeint(lambda x,t: dxdt(x,t), x0, tt) output_95[0] = xx plt.figure(figsize=(7,4)) # Plot last only #plt.plot(xx[-1],'b') # Plot multiple Lag = 8 colors = plt.cm.cubehelix(0.1+0.6*linspace(0,1,Lag)) for k in range(Lag,0,-1): plt.plot(xx[max(0,len(xx)-k)],c=colors[Lag-k]) plt.ylim(-10,20) plt.show() interact(animate_lorenz_95,eps=(0.01,3,0.1),T=(0.05,40,0.05),Force=(0,40,1),m=(5,60,1)); ``` **Exc 4.12:** Under which settings of the force `F` is the system chaotic? --- ## Error/perturbation dynamics **Exc 4.14*:** Suppose $x(t)$ and $z(t)$ are "twins": they evolve according to the same law $f$: $$\frac{dx}{dt} = f(x) \\ \frac{dz}{dt} = f(z) \, .$$ * a) Define the "error": $\varepsilon(t) = x(t) - z(t)$. Suppose $z(0)$ is close to $x(0)$. Let $F = \frac{df}{dx}(x(t))$. Show that the error evolves according to the ordinary differential equation (ODE) $$\frac{d \varepsilon}{dt} \approx F \varepsilon \, .$$ * b) Show that the error grows exponentially: $\varepsilon(t) = \varepsilon(0) e^{F t} $. * c) * 1) Suppose $F<1$. What happens to the error? What does this mean for predictability? * 2) Now suppose $F>1$. Given that all observations are uncertain (i.e. $R_t>0$, if only ever so slightly), can we ever hope to estimate $x(t)$ with 0 uncertainty? * d) Consider the ODE derived above. How might we change it in order to model (i.e. emulate) a saturation of the error at some level? Can you solve this equation? * e) Now suppose $z(t)$ evolves according to $\frac{dz}{dt} = g(z)$, with $g \neq f$. What is now the differential equation governing the evolution of the error, $\varepsilon$? ``` show_answer("error evolution") ``` **Exc 4.16*:** Recall the Lorenz-63 system. What is its doubling time (i.e. estimate how long does it take for two trajectories to grow twice as far apart as they were to start with) ? *Hint: Set `N=50, eps=0.01, T=1,` and compute the spread of the particles now as compared to how they started* ``` xx = output_63[0][:,-1] # Ensemble of particles at the end of integration ### compute your answer here ### show_answer("doubling time") ``` The answer actually depends on where in "phase space" the particles started. To get a universal answer one must average these experiments for many different initial conditions. --- ## In summary: Prediction (forecasting) with these systems is challenging because they are chaotic: small errors grow exponentially. Conversely: chaos means that there is a limit to how far into the future we can make predictions (skillfully). It is therefore crucial to minimize the intial error as much as possible. This is a task for DA. ### Next: [Ensemble [Monte-Carlo] approach](T5 - Ensemble [Monte-Carlo] approach.ipynb)
github_jupyter
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # IUCN - Extinct species <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/IUCN/IUCN_Extinct_species.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> **Tags:** #iucn #opendata #extinctspecies #analytics #plotly **Author:** [Martin Delasalle](https://github.com/delasalle-sio-martin) Source : https://www.iucnredlist.org/statistics If you want another view of the data : Link : https://ourworldindata.org/extinctions ### History The initial aim was to compare the number of threatened species per species over time (e.g. number of pandas per year). After a lot of research, it turns out that this kind of data is not available or it is only data from one year (2015 or 2018). Therefore, we decided to start another project: Number of threatened species per year, with details by category using data from this site : https://www.iucnredlist.org/resources/summary-statistics#Summary%20Tables So we took the pdf from this site and turned it into a csv. But the data was heavy and not easy to use. Moreover, we thought that this would not necessarily be viable and adaptable over time. So we decided to take another datasource on a similar subject : *Extinct Species*, from this website : https://www.iucnredlist.org/statistics ### Links that we found during the course - https://donnees.banquemondiale.org/indicator/EN.MAM.THRD.NO (only 2018) - https://www.eea.europa.eu/data-and-maps/data/european-red-lists-4/european-red-list/european-red-list-csv-files/view (old Dataset, last upload was in 2015) - https://www.worldwildlife.org/species/directory?page=2 (the years are not available) - https://www.worldwildlife.org/pages/conservation-science-data-and-tools (apart from the case) - https://databasin.org/datasets/68635d7c77f1475f9b6c1d1dbe0a4c4c/ (we can't use it) - https://gisandscience.com/2009/12/01/download-datasets-from-the-world-wildlife-funds-conservation-science-program/ (no datas about threatened species) - https://data.world/datasets/tiger (only about tigers but there are no datas usefull) ## Input ### Import library ``` import pandas as pd import plotly.express as px ``` ### Setup your variables 👉 Download data in [CSV](https://www.iucnredlist.org/statistics) and drop it on your root folder ``` # Input csv csv_input = "Table 3 Species by kingdom and class - show all.csv" ``` ## Model ### Get data from csv ``` # We load the csv file data = pd.read_csv(csv_input, ',') # We set the column Name as index data.set_index('Name', inplace = True) # Then we select the columns EX, EW and Name, and all the lines we want in the graph table = data.loc[["Total", "GASTROPODA", "BIVALVIA", "AVES", "MAMMALIA", "ACTINOPTERYGII", "CEPHALASPIDOMORPHI", "INSECTA", "AMPHIBIA", "REPTILIA", "ARACHNIDA", "CLITELLATA", "DIPLOPODA", "ENOPLA", "TURBELLARIA", "MALACOSTRACA", "MAXILLOPODA", "OSTRACODA"]# add species here ,"EX":"EW"] table # We add a new column 'CATEGORY' to our Dataframe table["CATEGORY"] = ["Total", "Molluscs", "Molluscs", "Birds", "Mammals", "Fishes", "Fishes", "Insects", "Amphibians", "Reptiles", "Others", "Others", "Others", "Others", "Others", "Crustaceans", "Crustaceans", "Crustaceans"] table = table.loc[:,["CATEGORY","EX"]] # we drop the column "EW" table # ---NOTE : If you want to add new species, you have to also add his category # We groupby CATEGORIES : table.reset_index(drop=True, inplace=True) table = table.groupby(['CATEGORY']).sum().reset_index() table.rename(columns = {'EX':'Extincted'}, inplace=True) table ``` ## Output ### Plot graph ``` # We use plotly to show datas with an horizontal bar chart def create_barchart(table): Graph = table.sort_values('Extincted', ascending=False) fig = px.bar(Graph, x="Extincted", y="CATEGORY", color="CATEGORY", orientation="h") fig.update_layout(title_text="Number of species that have gone extinct since 1500", title_x=0.5) fig.add_annotation(x=800, y=0, text="Source : IUCN Red List of Threatened Species<br>https://www.iucnredlist.org/statistics", showarrow=False) fig.show() return fig fig = create_barchart(table) ```
github_jupyter
``` #importing modules import cv2 import numpy as np import math import time import _thread import wave import struct def playSound(name): import simpleaudio as sa wave_obj = sa.WaveObject.from_wave_file(name) play_obj = wave_obj.play() ####CRASHES ON FAST INPUT#### # import pyglet # player = pyglet.media.Player() # src = pyglet.media.load(name) # player.volume = 0.1 # player.queue(src) # player.play() #####VERY SLOW#### # import pygame.mixer # pm = pygame.mixer # pm.init() # sound = pm.Sound(name) # sound.set_volume(0.5) # sound.play() def drawEllipse(contours, text): if(contours == None or len(contours) == 0): return ((-100,-100), None) c = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) if(cv2.contourArea(c) < 500): return ((-100,-100), None) ellipse = cv2.fitEllipse(c) cv2.ellipse(img, ellipse, (0,0,0), 2) blank = np.zeros(img.shape[0:2]) ellipseImage = cv2.ellipse(blank, ellipse, (255, 255, 255), -2) # cv2.imshow("ell",ellipseImage) M = cv2.moments(c) if M["m00"] == 0: return center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if radius > 10: # draw the ellipse and centroid on the frame, # then update the list of tracked points # cv2.circle(img, (int(x), int(y)), int(radius),(0, 0, 0), 2) cv2.circle(img, center, 3, (0, 0, 255), -1) cv2.putText(img,text, (center[0]+10,center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),2) cv2.putText(img,"("+str(center[0])+","+str(center[1])+")", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),1) return (center, ellipseImage) def detectCollision(imgA, imgB, velocity, touching, name): mA = cv2.moments(imgA, False) mB = cv2.moments(imgB, False) blank = np.zeros(img.shape[0:2]) if type(imgA) == type(None) or type(imgB) == type(None): return intersection = cv2.bitwise_and(imgA, imgB) area = cv2.countNonZero(intersection) if area < 20: touching = False if area > 100 and not touching: # print(int(mA["m01"] / mA["m00"])< int(mB["m01"] / mB["m00"])) # print(area) if int(mA["m01"] / mA["m00"])< int(mB["m01"] / mB["m00"]): if velocity > 10: _thread.start_new_thread(playSound, (name,)) # playSound(name) touching = True return touching #capturing video through webcam cap=cv2.VideoCapture(0) frameCount = 0 timeStart = time.time() b1 = (0,0) b2 = (0,0) currentBlueVelocity = 0 r1 = (0,0) r2 = (0,0) currentRedVelocity = 0 blueAndSnare = False blueAndHiHat = False redAndSnare = False redAndHiHat = False booli = [False for i in range(2)] numDrums = 0 drums = [None for i in range(2)] def newDrum(pos, name): # pos = (x, y) drum = cv2.circle(img,pos, 50,(0,0, 0),5) cv2.putText(drum,name,pos,cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2) blank = np.zeros(img.shape[0:2]) drum_image = cv2.circle(blank.copy(), pos, 50, (255, 255, 255), -5) global numDrums numDrums += 1 return (name, drum_image) while(1): now = time.time() fps = frameCount / (now - timeStart+1.0) frameCount += 1 _, img = cap.read() img = cv2.flip(img, 1) # cv2.putText(img,"FPS : ",(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2) cv2.putText(img,"FPS: %.2f" % (fps),(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2) # Add the drums drums[0] = newDrum((350, 400), "snare") drums[1] = newDrum((100, 400), "hi_hat") #converting frame(img i.e BGR) to HSV (hue-saturation-value) hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) #defining the range of red color red_lower=np.array([255,255,255],np.uint8) red_upper=np.array([255,255,255],np.uint8) #defining the Range of Blue color blue_lower=np.array([95,60,94],np.uint8) blue_upper=np.array([163,168,209],np.uint8) #finding the range of red,blue color in the image red=cv2.inRange(hsv, red_lower, red_upper) blue=cv2.inRange(hsv,blue_lower,blue_upper) #Morphological transformation, Dilation kernal = np.ones((5 ,5), "uint8") red=cv2.dilate(red, kernal) res=cv2.bitwise_and(img, img, mask = red) blue=cv2.dilate(blue,kernal) res1=cv2.bitwise_and(img, img, mask = blue) #Tracking the Red Color (contours,hierarchy)=cv2.findContours(red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) (redCenter, redEllipse) = drawEllipse(contours, "Red") # cv2.drawContours(img, contours, -1 , (0,0,255), 2) #Tracking the Blue Color (contours,hierarchy)=cv2.findContours(blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # cv2.drawContours(img, contours, -1 , (255,0,0), 2) (blueCenter, blueEllipse) = drawEllipse(contours, "Blue") b1 = b2 b2 = blueCenter bDelta = math.sqrt((b2[0] - b1[0])**2 + (b2[1] - b1[1])**2) bVelocity = bDelta * fps / 100 if (bVelocity - currentBlueVelocity) > 10: cv2.putText(img,str(int(bVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2) else: cv2.putText(img,str(int(currentBlueVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2) currentBlueVelocity = bVelocity r1 = r2 r2 = redCenter rDelta = math.sqrt((r2[0] - r1[0])**2 + (r2[1] - r1[1])**2) rVelocity = rDelta * fps / 100 if (rVelocity - currentRedVelocity) > 10: cv2.putText(img,str(int(rVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2) else: cv2.putText(img,str(int(currentRedVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2) currentRedVelocity = rVelocity for i in range(len(drums)): # print(booli) booli[i] = detectCollision(blueEllipse, drums[i][1], currentBlueVelocity, booli[i], "{0}.wav".format(drums[i][0])) # blueAndSnare = detectCollision(blueEllipse, drums[0][1], blueAndSnare, "snare.wav") # blueAndHiHat = detectCollision(blueEllipse, drums[1][1], blueAndHiHat, "hi_hat.wav") # blueAndSnare = detectCollision(blueEllipse, snare_image, blueAndSnare, "snare.wav") # blueAndHiHat = detectCollision(blueEllipse, hi_hat_image, blueAndHiHat, "Closed-Hi-Hat.wav") # # redAndSnare = detectCollision(redEllipse, snare_image, redAndSnare, "snare.wav") # redAndHiHat = detectCollision(redEllipse, hi_hat_image, redAndHiHat, "Closed-Hi-Hat.wav") #cv2.imshow("Redcolour",red) cv2.imshow("Color Tracking",img) #cv2.imshow("red",res) if cv2.waitKey(10) & 0xFF == ord('q'): cap.release() cv2.destroyAllWindows() break # getting a black drum to hit #importing modules import cv2 import numpy as np import math import time import _thread import wave import struct def playSound(name): import simpleaudio as sa wave_obj = sa.WaveObject.from_wave_file(name) play_obj = wave_obj.play() ####CRASHES ON FAST INPUT#### # import pyglet # player = pyglet.media.Player() # src = pyglet.media.load(name) # player.volume = 0.1 # player.queue(src) # player.play() #####VERY SLOW#### # import pygame.mixer # pm = pygame.mixer # pm.init() # sound = pm.Sound(name) # sound.set_volume(0.5) # sound.play() def drawEllipse(contours, text): if(contours == None or len(contours) == 0): return ((-100,-100), None) c = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) if(cv2.contourArea(c) < 500): return ((-100,-100), None) ellipse = cv2.fitEllipse(c) cv2.ellipse(img, ellipse, (0,0,0), 2) blank = np.zeros(img.shape[0:2]) ellipseImage = cv2.ellipse(blank, ellipse, (255, 255, 255), -2) # cv2.imshow("ell",ellipseImage) M = cv2.moments(c) if M["m00"] == 0: return center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if radius > 10: # draw the ellipse and centroid on the frame, # then update the list of tracked points # cv2.circle(img, (int(x), int(y)), int(radius),(0, 0, 0), 2) cv2.circle(img, center, 3, (0, 0, 255), -1) cv2.putText(img,text, (center[0]+10,center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),2) cv2.putText(img,"("+str(center[0])+","+str(center[1])+")", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 0),1) return (center, ellipseImage) def detectCollision(imgA, imgB, velocity, touching, name): mA = cv2.moments(imgA, False) mB = cv2.moments(imgB, False) blank = np.zeros(img.shape[0:2]) if type(imgA) == type(None) or type(imgB) == type(None): return intersection = cv2.bitwise_and(imgA, imgB) area = cv2.countNonZero(intersection) if area < 20: # default 20 touching = False if area > 100 and not touching: # print(int(mA["m01"] / mA["m00"])< int(mB["m01"] / mB["m00"])) # print(area) if int(mA["m01"] / mA["m00"])< int(mB["m01"] / mB["m00"]): if velocity > 10: _thread.start_new_thread(playSound, (name,)) # playSound(name) touching = True return touching #capturing video through webcam cap=cv2.VideoCapture(0) frameCount = 0 timeStart = time.time() b1 = (0,0) b2 = (0,0) currentBlueVelocity = 0 r1 = (0,0) r2 = (0,0) currentRedVelocity = 0 blueAndSnare = False blueAndHiHat = False redAndSnare = False redAndHiHat = False booli = [False for i in range(2)] numDrums = 0 drums = [None for i in range(2)] def newDrum(pos, name): # pos = (x, y) drum = cv2.circle(img,pos, 50,(0,0, 0),5) cv2.putText(drum,name,pos,cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2) blank = np.zeros(img.shape[0:2]) drum_image = cv2.circle(blank.copy(), pos, 50, (255, 255, 255), -5) global numDrums numDrums += 1 return (name, drum_image) while(1): now = time.time() fps = frameCount / (now - timeStart+1.0) frameCount += 1 _, img = cap.read() img = cv2.flip(img, 1) # cv2.putText(img,"FPS : ",(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2) cv2.putText(img,"FPS: %.2f" % (fps),(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2) # Add the drums drums[0] = newDrum((350, 400), "snare") drums[1] = newDrum((100, 400), "hi_hat") #converting frame(img i.e BGR) to HSV (hue-saturation-value) hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) #defining the range of red color red_lower=np.array([0,0,0],np.uint8) red_upper=np.array([105,105,105],np.uint8) #defining the Range of Blue color blue_lower=np.array([0,0,0],np.uint8) blue_upper=np.array([105,105,105],np.uint8) #finding the range of red,blue color in the image red=cv2.inRange(hsv, red_lower, red_upper) blue=cv2.inRange(hsv,blue_lower,blue_upper) #Morphological transformation, Dilation kernal = np.ones((5 ,5), "uint8") red=cv2.dilate(red, kernal) res=cv2.bitwise_and(img, img, mask = red) blue=cv2.dilate(blue,kernal) res1=cv2.bitwise_and(img, img, mask = blue) #Tracking the Red Color (contours,hierarchy)=cv2.findContours(red,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) (redCenter, redEllipse) = drawEllipse(contours, "Red") # cv2.drawContours(img, contours, -1 , (0,0,255), 2) #Tracking the Blue Color (contours,hierarchy)=cv2.findContours(blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # cv2.drawContours(img, contours, -1 , (255,0,0), 2) (blueCenter, blueEllipse) = drawEllipse(contours, "Blue") b1 = b2 b2 = blueCenter bDelta = math.sqrt((b2[0] - b1[0])**2 + (b2[1] - b1[1])**2) bVelocity = bDelta * fps / 100 if (bVelocity - currentBlueVelocity) > 10: cv2.putText(img,str(int(bVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2) else: cv2.putText(img,str(int(currentBlueVelocity)),(10, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2) currentBlueVelocity = bVelocity r1 = r2 r2 = redCenter rDelta = math.sqrt((r2[0] - r1[0])**2 + (r2[1] - r1[1])**2) rVelocity = rDelta * fps / 100 if (rVelocity - currentRedVelocity) > 10: cv2.putText(img,str(int(rVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2) else: cv2.putText(img,str(int(currentRedVelocity)),(70, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2) currentRedVelocity = rVelocity for i in range(len(drums)): # print(booli) booli[i] = detectCollision(blueEllipse, drums[i][1], currentBlueVelocity, booli[i], "{0}.wav".format(drums[i][0])) # blueAndSnare = detectCollision(blueEllipse, drums[0][1], blueAndSnare, "snare.wav") # blueAndHiHat = detectCollision(blueEllipse, drums[1][1], blueAndHiHat, "hi_hat.wav") # blueAndSnare = detectCollision(blueEllipse, snare_image, blueAndSnare, "snare.wav") # blueAndHiHat = detectCollision(blueEllipse, hi_hat_image, blueAndHiHat, "Closed-Hi-Hat.wav") # # redAndSnare = detectCollision(redEllipse, snare_image, redAndSnare, "snare.wav") # redAndHiHat = detectCollision(redEllipse, hi_hat_image, redAndHiHat, "Closed-Hi-Hat.wav") #cv2.imshow("Redcolour",red) cv2.imshow("Color Tracking",img) #cv2.imshow("red",res) if cv2.waitKey(10) & 0xFF == ord('q'): cap.release() cv2.destroyAllWindows() break ```
github_jupyter
# Can I predict how much people will spend online? ### Insights from Google Analytics data ``` import pandas as pd import matplotlib.pyplot as plt import numpy as np import plotly.express as px import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('mergedGAdata.csv') print(df.shape) ``` # Wrangling ``` ##Turn Session Duration into numbers not strings print(type(df['Session Duration'][0])) df['Session Duration'] = df['Session Duration'].str.replace(' seconds','') df['Session Duration'] = pd.to_numeric(df['Session Duration']) print(type(df['Session Duration'][0])) #display(df.head(5)) ### Dropping an unneccesary column df = df.drop(columns=['Unnamed: 0']) ###Turn dates into datetime objects print(type(df['Date'][0])) df['Date'] = pd.to_datetime(df['Date'], format='%Y%m%d') ##Checking to make sure it worked print(type(df['Date'][0])) df['Year']= df['Date'].dt.year df['Month']= df['Date'].dt.month df['Day']= df['Date'].dt.day df['Day_of_year'] = ((df['Month']*30.333-30).round(0) + df['Day']) #X_train['year'] = X_train['date_cleaned'].dt.year ## Get rid of a pesky NaN in the final observation df.drop(df.tail(1).index,inplace=True) print(df.shape) #df.head(30) ## Drop transaction ID df = df.drop(columns=['Transaction ID']) ## Dropping the datetime object becuase it breaks my Random Forest df = df.drop(columns=['Date']) ``` # Exploratory Visualizations ``` fig = px.bar(df, x='Day_of_year', y='Revenue', #color="Session Duration" ) fig.show() matplotlib_figure = plt.figure() x = df['Day_of_year'] y = df['Revenue'] plt.plot(x, y); from plotly.tools import mpl_to_plotly plotly_figure = mpl_to_plotly(matplotlib_figure) plotly_figure.show() import plotly.express as px fig = px.scatter(df, x='Page Depth', y='Revenue', #color="Session Duration" ) fig.show() ``` # Building a model ``` ## Checking to make sure none of my data types will break the models df.dtypes ``` # Break into Test/Train ``` from sklearn.model_selection import train_test_split train, test = train_test_split(df, train_size=0.80, test_size=0.20, #stratify=df['Product'], random_state=42) print(train.shape) test.shape # Arrange data into X features matrix and y target vector target = 'Revenue' #model_deploy = ['Day_of_year', 'Page Depth', 'Session Duration'] X_train = train.drop(columns=target) y_train = train[target] X_test = test.drop(columns=target) y_test = test[target] sns.distplot(y_train); ## My target variable, 'Revenue' has quite a bimodal distribution ``` # Baselines ``` from sklearn.metrics import mean_absolute_error price = df['Revenue'] best_guess = price.mean() best_guess = [best_guess] * len(price) baseline_mae = mean_absolute_error(price, best_guess) print(f'The Baseline mean absolute error is ${baseline_mae.round(2)}') ``` ## A Linear Regression ``` import category_encoders as ce from sklearn.linear_model import LinearRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler lr = make_pipeline( ce.TargetEncoder(), LinearRegression() ) lr.fit(X_train, y_train) print('Linear Regression R^2', lr.score(X_test, y_test)) y_pred = lr.predict(X_test) print(f'The Baseline mean absolute error is ${baseline_mae.round(2)}') mae = mean_absolute_error(y_test, y_pred) print(f'The linear regression mean absolute error is ${mae.round(2)}') ''' RESULTS Pass 1 Linear Regression R^2 -0.00442905637790969 Gradient Boosting R^2 -0.023036898195452293 Pass2 Linear Regression R^2 -0.004429056377912577 Gradient Boosting R^2 0.07102524633751028 Pass3 Linear Regression R^2 0.002162738520710472 Gradient Boosting R^2 0.08770212107051312 Pass4 (with only 3 features) Linear Regression R^2 0.02979757194722665 Gradient Boosting R^2 0.076352681199784 Pass5 (with all features) Linear Regression R^2 0.0005382190419163102 Gradient Boosting R^2 0.07698263724426613 ''' ``` # XGBoost Model ``` from sklearn.metrics import r2_score from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint pipeline = make_pipeline( ce.OrdinalEncoder(), XGBRegressor(objective='reg:squarederror') ) param_distributions = { 'xgbregressor__n_estimators': randint(50, 500) } search = RandomizedSearchCV( pipeline, param_distributions=param_distributions, n_iter=10, cv=10, scoring='neg_mean_absolute_error', verbose=10, return_train_score=True, n_jobs=-1 ) search.fit(X_train, y_train) print('Best hyperparameters', search.best_params_) print('Cross-validation MAE', -search.best_score_) pipeline = search.best_estimator_ pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_test) print('Gradient Boosting R^2', r2_score(y_test, y_pred)) mae = mean_absolute_error(y_test, y_pred) print(f'The Baseline mean absolute error is ${baseline_mae.round(2)}') print(f'Model MAE ${mae.round(2)}') # Get feature importances rf = pipeline.named_steps['xgbregressor'] importances = pd.Series(rf.feature_importances_, X_train.columns) # Plot feature importances %matplotlib inline import matplotlib.pyplot as plt n = 20 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features') importances.sort_values()[-n:].plot.barh(color='grey'); ``` ## Permutation Importances ``` transformers = make_pipeline( ce.OrdinalEncoder(), #SimpleImputer(strategy='median') ) X_train_transformed = transformers.fit_transform(X_train) X_test_transformed = transformers.transform(X_test) model = XGBRegressor(n_estimators=100, random_state=42, n_jobs=-1) model.fit(X_train_transformed, y_train) # Ignore warnings import eli5 from eli5.sklearn import PermutationImportance permuter = PermutationImportance( model, #scoring='accuracy', n_iter=5, random_state=42 ) permuter.fit(X_test_transformed, y_test) feature_names = X_test.columns.tolist() #pd.Series(permuter.feature_importances_, feature_names).sort_values() eli5.show_weights( permuter, top=None, # No limit: show permutation importances for all features feature_names=feature_names) # must be a list ``` # Partial dependence plot ``` from pdpbox.pdp import pdp_isolate, pdp_plot feature = 'Page Depth' isolated = pdp_isolate( model=pipeline, dataset=X_test, model_features=X_test.columns, feature=feature) pdp_plot(isolated, feature_name=feature); feature = 'Session Duration' isolated = pdp_isolate( model=pipeline, dataset=X_test, model_features=X_test.columns, feature=feature) pdp_plot(isolated, feature_name=feature); feature = 'Day_of_year' isolated = pdp_isolate( model=pipeline, dataset=X_test, model_features=X_test.columns, feature=feature) pdp_plot(isolated, feature_name=feature); feature = 'Days Since Last Session' isolated = pdp_isolate( model=pipeline, dataset=X_test, model_features=X_test.columns, feature=feature) pdp_plot(isolated, feature_name=feature); ``` # Rebuilding the model with only a few features ### This is so I can deploy a much simpler model with fewer sliders on Heroku ``` # Arrange data into X features matrix and y target vector target = 'Revenue' model_deploy = ['Day_of_year', 'Page Depth', 'Session Duration'] X_train = train[model_deploy] y_train = train[target] X_test = test[model_deploy] y_test = test[target] from sklearn.metrics import r2_score from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint pipeline = make_pipeline( ce.OrdinalEncoder(), XGBRegressor(objective='reg:squarederror') ) param_distributions = { 'xgbregressor__n_estimators': randint(50, 500) } search = RandomizedSearchCV( pipeline, param_distributions=param_distributions, n_iter=10, cv=10, scoring='neg_mean_absolute_error', verbose=10, return_train_score=True, n_jobs=-1 ) search.fit(X_train, y_train) print('Best hyperparameters', search.best_params_) print('Cross-validation MAE', -search.best_score_) pipeline = search.best_estimator_ pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_test) print('Gradient Boosting R^2', r2_score(y_test, y_pred)) mae = mean_absolute_error(y_test, y_pred) print(f'The Baseline mean absolute error is ${baseline_mae.round(2)}') print(f'Model MAE ${mae.round(2)}') ``` # Exporting a .joblib file for my Heroku model ``` from joblib import dump dump(pipeline, 'pipeline.joblib', compress=True) import joblib import sklearn import category_encoders as ce import xgboost print(f'joblib=={joblib.__version__}') print(f'scikit-learn=={sklearn.__version__}') print(f'category_encoders=={ce.__version__}') print(f'xgboost=={xgboost.__version__}') ``` # Further data exploration- things I could do: ### Add in more features by exporting more csv's from Google Analytics ### Make more visualizations ### Try replacing 0's in 'session duration' with NaNs ### Make some kind of feature that describes if a product was purchased together with another product. (This psuedocode could tell if somebody placed a follow up order the same day) if (df['Date'][i]==df['Date'][i+1]) & (df['Transaction ID'][i]==df['Transaction ID']+1[i+1]): return True
github_jupyter
``` import keras keras.__version__ ``` # 영화 리뷰 분류: 이진 분류 예제 이 노트북은 [케라스 창시자에게 배우는 딥러닝](https://tensorflow.blog/케라스-창시자에게-배우는-딥러닝/) 책의 3장 4절의 코드 예제입니다. 책에는 더 많은 내용과 그림이 있습니다. 이 노트북에는 소스 코드에 관련된 설명만 포함합니다. 이 노트북의 설명은 케라스 버전 2.2.2에 맞추어져 있습니다. 케라스 최신 버전이 릴리스되면 노트북을 다시 테스트하기 때문에 설명과 코드의 결과가 조금 다를 수 있습니다. (현재 내가 사용하는 것은 케라스 버전 2.3.1 이다.) ---- 2종 분류 또는 이진 분류는 아마도 가장 널리 적용된 머신 러닝 문제일 것입니다. 이 예제에서 리뷰 텍스트를 기반으로 영화 리뷰를 긍정과 부정로 분류하는 법을 배우겠습니다. ## IMDB 데이터셋 인터넷 영화 데이터베이스로부터 가져온 양극단의 리뷰 50,000개로 이루어진 IMDB 데이터셋을 사용하겠습니다. 이 데이터셋은 훈련 데이터 25,000개와 테스트 데이터 25,000개로 나뉘어 있고 각각 50%는 부정, 50%는 긍정 리뷰로 구성되어 있습니다. 왜 훈련 데이터와 테스트 데이터를 나눌까요? 같은 데이터에서 머신 러닝 모델을 훈련하고 테스트해서는 절대 안 되기 때문입니다! 모델이 훈련 데이터에서 잘 작동한다는 것이 처음 만난 데이터에서도 잘 동작한다는 것을 보장하지 않습니다. 중요한 것은 새로운 데이터에 대한 모델의 성능입니다(사실 훈련 데이터의 레이블은 이미 알고 있기 때문에 이를 예측하는 모델은 필요하지 않습니다). 예를 들어 모델이 훈련 샘플과 타깃 사이의 매핑을 모두 외워버릴 수 있습니다. 이런 모델은 처음 만나는 데이터에서 타깃을 예측하는 작업에는 쓸모가 없습니다. 다음 장에서 이에 대해 더 자세히 살펴보겠습니다. MNIST 데이터셋처럼 IMDB 데이터셋도 케라스에 포함되어 있습니다. 이 데이터는 전처리되어 있어 각 리뷰(단어 시퀀스)가 숫자 시퀀스로 변환되어 있습니다. 여기서 각 숫자는 사전에 있는 고유한 단어를 나타냅니다. 다음 코드는 데이터셋을 로드합니다(처음 실행하면 17MB 정도의 데이터가 컴퓨터에 다운로드됩니다): ``` from keras.datasets import imdb (train_data, train_labels), (test_data, test_labels), = imdb.load_data(num_words=10000) ``` 매개변수 `num_words=10000`은 훈련 데이터에서 가장 자주 나타나는 단어 10,000개만 사용하겠다는 의미입니다. 드물게 나타나는 단어는 무시하겠습니다. 이렇게 하면 적절한 크기의 벡터 데이터를 얻을 수 있습니다. 변수 `train_data`와 `test_data`는 리뷰의 목록입니다. 각 리뷰는 단어 인덱스의 리스트입니다(단어 시퀀스가 인코딩된 것입니다). `train_labels`와 `test_labels`는 부정을 나타내는 0과 긍정을 나타내는 1의 리스트입니다: imdb 데이터셋은 train set 25000개, text set 25000개의 샘플을 제공한다. 라벨은 1과 0의 좋다, 싫다로 지정되어있다. 케라스에서 제공하는 imdb의 load_data() 함수를 이용하면 데이터 셋을 쉽게 얻을 수 있다. 데이터셋은 이미 정수로 인코딩되어 있고, 정수값은 단어의 빈도수를 나타낸다. 위의 코드는 가장 빈도수가 높게 나타난 단어 10000개로 데이터셋을 만든 것이다. ``` train_data ``` train_data는 array 안에 list가 여러개 들어있는 형태. array(list0, list1, list2, ... , list24999) 총 25000개 있다 ``` train_labels train_data[0] train_labels[0] ``` 가장 자주 등장하는 단어 10,000개로 제한했기 때문에 단어 인덱스는 10,000을 넘지 않습니다: ``` max([max(sequence) for sequence in train_data]) ``` 재미 삼아 이 리뷰 데이터 하나를 원래 영어 단어로 어떻게 바꾸는지 보겠습니다: ``` # word_index는 단어와 정수 인덱스를 매핑한 딕셔너리입니다 word_index = imdb.get_word_index() # 정수 인덱스와 단어를 매핑하도록 뒤집습니다 reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) # 리뷰를 디코딩합니다. # 0, 1, 2는 '패딩', '문서 시작', '사전에 없음'을 위한 인덱스이므로 3을 뺍니다 decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]]) # 딕셔너리 reverse_word_index에서 key 값이 i-3인 value를 가져와서 ' '를 구분자로 하여 가져오는데, 없으면 '?'를 default로 돌려준다. 이때 key값에 사용되는 i는 train_data[0] 리스트에 있는 정수들 word_index reverse_word_index # word_index에 있는 items를 가져와서, 거기에 있는 key, value를 value, key 로 딕셔너리화 한 것이다. decoded_review ``` ## 데이터 준비 신경망에 숫자 리스트를 주입할 수는 없습니다. 리스트를 텐서로 바꾸는 두 가지 방법이 있습니다: * 같은 길이가 되도록 리스트에 패딩을 추가하고 `(samples, sequence_length)` 크기의 정수 텐서로 변환합니다. 그다음 이 정수 텐서를 다룰 수 있는 층을 신경망의 첫 번째 층으로 사용합니다(`Embedding` 층을 말하며 나중에 자세히 다루겠습니다). * 리스트를 원-핫 인코딩하여 0과 1의 벡터로 변환합니다. 예를 들면 시퀀스 `[3, 5]`를 인덱스 3과 5의 위치는 1이고 그 외는 모두 0인 10,000차원의 벡터로 각각 변환합니다. 그다음 부동 소수 벡터 데이터를 다룰 수 있는 `Dense` 층을 신경망의 첫 번째 층으로 사용합니다. * 원-핫 인코딩은 단어 집합의 크기를 벡터의 차원으로 하고, 표현하고 싶은 단어의 인덱스에 1의 값을 부여하고, 다른 인덱스에는 0을 부여하는 단어의 벡터 표현 방식이다. 이렇게 표현된 벡터를 원-핫 벡터(One-hot vector)라고 한다. 여기서는 두 번째 방식을 사용하고 이해를 돕기 위해 직접 데이터를 원-핫 벡터로 만들겠습니다: ``` import numpy as np def vectorize_sequences(sequences, dimension=10000): # 크기가 (len(sequences), dimension))이고 모든 원소가 0인 행렬을 만듭니다 results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): # 리스트 sequences 의 순서 i, 리스트값 sequence를 가져온다. results[i, sequence] = 1. # results[i]에서 특정 인덱스의 위치를 1로 만듭니다 return results # 훈련 데이터를 벡터로 변환합니다 x_train = vectorize_sequences(train_data) # 테스트 데이터를 벡터로 변환합니다 x_test = vectorize_sequences(test_data) ``` 이제 샘플은 다음과 같이 나타납니다: ``` x_train[0] x_train[0].shape ``` 레이블은 쉽게 벡터로 바꿀 수 있습니다: ``` # 레이블을 벡터로 바꿉니다 y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') ``` 이제 신경망에 주입할 데이터가 준비되었습니다. ## 신경망 모델 만들기 입력 데이터가 벡터이고 레이블은 스칼라(1 또는 0)입니다. 아마 앞으로 볼 수 있는 문제 중에서 가장 간단할 것입니다. 이런 문제에 잘 작동하는 네트워크 종류는 `relu` 활성화 함수를 사용한 완전 연결 층(즉, `Dense(16, activation='relu')`)을 그냥 쌓은 것입니다. `Dense` 층에 전달한 매개변수(16)는 은닉 유닛의 개수입니다. 하나의 은닉 유닛은 층이 나타내는 표현 공간에서 하나의 차원이 됩니다. 2장에서 `relu` 활성화 함수를 사용한 `Dense` 층을 다음과 같은 텐서 연산을 연결하여 구현하였습니다: `output = relu(dot(W, input) + b)` 16개의 은닉 유닛이 있다는 것은 가중치 행렬 `W`의 크기가 `(input_dimension, 16)`이라는 뜻입니다. 입력 데이터와 `W`를 점곱하면 입력 데이터가 16 차원으로 표현된 공간으로 투영됩니다(그리고 편향 벡터 `b`를 더하고 `relu` 연산을 적용합니다). 표현 공간의 차원을 '신경망이 내재된 표현을 학습할 때 가질 수 있는 자유도'로 이해할 수 있습니다. <b>은닉 유닛을 늘리면 (표현 공간을 더 고차원으로 만들면) 신경망이 더욱 복잡한 표현을 학습할 수 있지만 계산 비용이 커지고 원치 않은 패턴을 학습할 수도 있습니다(훈련 데이터에서는 성능이 향상되지만 테스트 데이터에서는 그렇지 않은 패턴입니다).</b> `Dense` 층을 쌓을 때 두 가진 중요한 구조상의 결정이 필요합니다: * 얼마나 많은 층을 사용할 것인가 * 각 층에 얼마나 많은 은닉 유닛을 둘 것인가 4장에서 이런 결정을 하는 데 도움이 되는 일반적인 원리를 배우겠습니다. 당분간은 저를 믿고 선택한 다음 구조를 따라 주세요. * 16개의 은닉 유닛을 가진 두 개의 은닉층 * 현재 리뷰의 감정을 스칼라 값의 예측으로 출력하는 세 번째 층 중간에 있는 은닉층은 활성화 함수로 `relu`를 사용하고 마지막 층은 확률(0과 1 사이의 점수로, 어떤 샘플이 타깃 '1'일 가능성이 높다는 것은 그 리뷰가 긍정일 가능성이 높다는 것을 의미합니다)을 출력하기 위해 시그모이드 활성화 함수를 사용합니다. `relu`는 음수를 0으로 만드는 함수입니다. 시그모이드는 임의의 값을 [0, 1] 사이로 압축하므로 출력 값을 확률처럼 해석할 수 있습니다. 다음이 이 신경망의 모습입니다: ![3-layer network](https://s3.amazonaws.com/book.keras.io/img/ch3/3_layer_network.png) 다음은 이 신경망의 케라스 구현입니다. 이전에 보았던 MNIST 예제와 비슷합니다: ``` from keras import models from keras import layers model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) ``` 마지막으로 손실 함수와 옵티마이저를 선택해야 합니다. 이진 분류 문제이고 신경망의 출력이 확률이기 때문에(네트워크의 끝에 시그모이드 활성화 함수를 사용한 하나의 유닛으로 된 층을 놓았습니다), `binary_crossentropy` 손실이 적합합니다. 이 함수가 유일한 선택은 아니고 예를 들어 `mean_squared_error`를 사용할 수도 있습니다. 확률을 출력하는 모델을 사용할 때는 크로스엔트로피가 최선의 선택입니다. 크로스엔트로피는 정보 이론 분야에서 온 개념으로 확률 분포 간의 차이를 측정합니다. 여기에서는 원본 분포와 예측 분포 사이를 측정합니다. 다음은 `rmsprop` 옵티마이저와 `binary_crossentropy` 손실 함수로 모델을 설정하는 단계입니다. 훈련하는 동안 정확도를 사용해 모니터링하겠습니다. ``` model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) # 옵티마이저를 이와 같이 이름으로 사용하면, 해당 옵티마이저의 기본 설정이 사용됩니다. ``` 케라스에 `rmsprop`, `binary_crossentropy`, `accuracy`가 포함되어 있기 때문에 옵티마이저, 손실 함수, 측정 지표를 문자열로 지정하는 것이 가능합니다. 이따금 옵티마이저의 매개변수를 바꾸거나 자신만의 손실 함수, 측정 함수를 전달해야 할 경우가 있습니다. 전자의 경우에는 옵티마이저 파이썬 클래스를 사용해 객체를 직접 만들어 `optimizer` 매개변수에 전달하면 됩니다: ``` from keras import optimizers model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy']) ``` 후자의 경우는 `loss`와 `metrics` 매개변수에 함수 객체를 전달하면 됩니다 ``` from keras import losses from keras import metrics model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy]) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) ``` # 잘 모르겠다 다시 해보자. ``` # 옵티마이저의 이름을 사용하는 경우 : 기본 설정이 사용된다. model.compile(optimizer='sgd', loss='mean_squared_error', metrics=[metrics.binary_accuracy]) ``` 아 여기서 metrics=[metrics.binary_accuracy] 를 안했어서 아래부분에서 acc KeyError 난 거임 ``` # 옵티마이저의 매개변수를 바꾸거나 # 자신만의 손실 함수, 측정함수를 전달하기 위해서 # 객체 (여기서는 sgd)를 직접 만들어서, model.compile()의 매개변수로 전달해서 쓸 수도 있다. sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['acc']) ``` 여기에서도 accuracy 안넣어서 KeyError 났음 ``` metrics=['acc']로 하면 history에서 key 가 acc, val_acc 로 바뀐다 ``` * loss == 손실함수 : 학습을 통해 직접적으로 줄이고자 하는 값 (loss(손실) = error(에러) = cost(코스트)) * metrics == 측정함수 : 학습을 통해 목표를 얼마나 잘(못) 달성했는지를 나타내는 값, metric(척도) * 머신러닝의 최종 목표는 척도로 달성률을 표시하지만, * 직접 척도를 낮추도록 훈련하는 것은 어렵기때문에 손실을 줄이는 방향으로 훈련한다. ## 훈련 검증 훈련하는 동안 처음 본 데이터에 대한 모델의 정확도를 측정하기 위해서는 원본 훈련 데이터에서 10,000의 샘플을 떼어서 검증 세트를 만들어야 합니다: ``` x_val = x_train[:10000] # 리스트 x_train에서 index 값이 0 이상 10000미만인 값들을 떼어내서 리스트 x_val을 만든다. partial_x_train = x_train[10000:] # 리스트 x_train에서 index 값이 10000 이상인 값들을 떼어내서 리스트 partial_x_train을 만든다. y_val = y_train[:10000] partial_y_train = y_train[10000:] ``` 이제 모델을 512개 샘플씩 미니 배치를 만들어 20번의 에포크 동안 훈련시킵니다(`x_train`과 `y_train` 텐서에 있는 모든 샘플에 대해 20번 반복합니다). 동시에 따로 떼어 놓은 10,000개의 샘플에서 손실과 정확도를 측정할 것입니다. 이렇게 하려면 `validation_data` 매개변수에 검증 데이터를 전달해야 합니다: ``` history = model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val)) ``` CPU를 사용해도 에포크마다 2초가 걸리지 않습니다. 전체 훈련은 20초 이상 걸립니다. 에포크가 끝날 때마다 10,000개의 검증 샘플 데이터에서 손실과 정확도를 계산하기 때문에 약간씩 지연됩니다. `model.fit()` 메서드는 `History` 객체를 반환합니다. 이 객체는 훈련하는 동안 발생한 모든 정보를 담고 있는 딕셔너리인 `history` 속성을 가지고 있습니다. 한 번 확인해 보죠: ``` history_dict = history.history history_dict.keys() ``` 이 딕셔너리는 훈련과 검증하는 동안 모니터링할 측정 지표당 하나씩 모두 네 개의 항목을 담고 있습니다. 맷플롯립을 사용해 훈련과 검증 데이터에 대한 손실과 정확도를 그려 보겠습니다: ``` print(history.history['acc']) import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) # ‘bo’는 파란색 점을 의미합니다 (blue dot) plt.plot(epochs, loss, 'bo', label='Training loss') # ‘b’는 파란색 실선을 의미합니다 (solid blue line) plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') # legend 는 아래에 나오는 범례 plt.legend() plt.show() ``` val_loss, val_accuracy, loss, accuracy --> val_loss, val_acc, loss, acc 로 업데이트 됨 ``` plt.clf() # 그래프를 초기화합니다 acc = history_dict['acc'] val_acc = history_dict['val_acc'] plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() ``` 점선은 훈련 손실과 정확도이고 실선은 검증 손실과 정확도입니다. 신경망의 무작위한 초기화 때문에 사람마다 결과거 조금 다를 수 있습니다. 여기에서 볼 수 있듯이 훈련 손실이 에포크마다 감소하고 훈련 정확도는 에포크마다 증가합니다. 경사 하강법 최적화를 사용했을 때 반복마다 최소화되는 것이 손실이므로 기대했던 대로입니다. 검증 손실과 정확도는 이와 같지 않습니다. 4번째 에포크에서 그래프가 역전되는 것 같습니다. 이것이 훈련 세트에서 잘 작동하는 모델이 처음 보는 데이터에 잘 작동하지 않을 수 있다고 앞서 언급한 경고의 한 사례입니다. 정확한 용어로 말하면 과대적합되었다고 합니다. 2번째 에포크 이후부터 훈련 데이터에 과도하게 최적화되어 훈련 데이터에 특화된 표현을 학습하므로 훈련 세트 이외의 데이터에는 일반화되지 못합니다. 이런 경우에 과대적합을 방지하기 위해서 3번째 에포크 이후에 훈련을 중지할 수 있습니다. 일반적으로 4장에서 보게 될 과대적합을 완화하는 다양한 종류의 기술을 사용할 수 있습니다. 처음부터 다시 새로운 신경망을 4번의 에포크 동안만 훈련하고 테스트 데이터에서 평가해 보겠습니다: ``` model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=4, batch_size=512) results = model.evaluate(x_test, y_test) results ``` 아주 단순한 방식으로도 87%의 정확도를 달성했습니다. 최고 수준의 기법을 사용하면 95%에 가까운 성능을 얻을 수 있습니다. ## 훈련된 모델로 새로운 데이터에 대해 예측하기 모델을 훈련시킨 후에 이를 실전 환경에서 사용하고 싶을 것입니다. `predict` 메서드를 사용해서 어떤 리뷰가 긍정일 확률을 예측할 수 있습니다: ``` model.predict(x_test) ``` 여기에서처럼 이 모델은 어떤 샘플에 대해 확신을 가지고 있지만(0.99 또는 그 이상, 0.01 또는 그 이하) 어떤 샘플에 대해서는 확신이 부족합니다(0.6, 0.4). ## 추가 실험 * 여기에서는 두 개의 은닉층을 사용했습니다. 한 개 또는 세 개의 은닉층을 사용하고 검증과 테스트 정확도에 어떤 영향을 미치는지 확인해 보세요. * 층의 은닉 유닛을 추가하거나 줄여 보세요: 32개 유닛, 64개 유닛 등 * `binary_crossentropy` 대신에 `mse` 손실 함수를 사용해 보세요. * `relu` 대신에 `tanh` 활성화 함수(초창기 신경망에서 인기 있었던 함수입니다)를 사용해 보세요. 다음 실험을 진행하면 여기에서 선택한 구조가 향상의 여지는 있지만 어느 정도 납득할 만한 수준이라는 것을 알게 것입니다! ``` model = models.Sequential() model.add(layers.Dense(32, activation='tanh', input_shape=(10000,))) model.add(layers.Dense(32, activation='tanh')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='mse', metrics=['acc']) model.fit(x_train, y_train, epochs=4, batch_size=512) results = model.evaluate(x_test, y_test) results ``` ## 정리 다음은 이 예제에서 배운 것들입니다: * 원본 데이터를 신경망에 텐서로 주입하기 위해서는 꽤 많은 전처리가 필요합니다. 단어 시퀀스는 이진 벡터로 인코딩될 수 있고 다른 인코딩 방식도 있습니다. * `relu` 활성화 함수와 함께 `Dense` 층을 쌓은 네트워크는 (감성 분류를 포함하여) 여러 종류의 문제에 적용할 수 있어서 앞으로 자주 사용하게 될 것입니다. * (출력 클래스가 두 개인) 이진 분류 문제에서 네트워크는 하나의 유닛과 `sigmoid` 활성화 함수를 가진 `Dense` 층으로 끝나야 합니다. 이 신경망의 출력은 확률을 나타내는 0과 1 사이의 스칼라 값입니다. * 이진 분류 문제에서 이런 스칼라 시그모이드 출력에 대해 사용할 손실 함수는 `binary_crossentropy`입니다. * `rmsprop` 옵티마이저는 문제에 상관없이 일반적으로 충분히 좋은 선택입니다. 걱정할 거리가 하나 줄은 셈입니다. * 훈련 데이터에 대해 성능이 향상됨에 따라 신경망은 과대적합되기 시작하고 이전에 본적 없는 데이터에서는 결과가 점점 나빠지게 됩니다. 항상 훈련 세트 이외의 데이터에서 성능을 모니터링해야 합니다.
github_jupyter
# ThaiNER (Bi-LSTM CRF) using pytorch By Mr.Wannaphong Phatthiyaphaibun Bachelor of Science Program in Computer and Information Science, Nong Khai Campus, Khon Kaen University https://iam.wannaphong.com/ E-mail : [email protected] Thank you Faculty of Applied Science and Engineering, Nong Khai Campus, Khon Kaen University for server. ``` import torch.nn.functional as F from torch.autograd import Variable import torch import torch.autograd as autograd import torch.nn as nn import torch.optim as optim print(torch.__version__) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #torch.backends.cudnn.benchmark=torch.cuda.is_available() #FloatTensor = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor LongTensor = torch.long #ByteTensor = torch.cuda.ByteTensor if USE_CUDA else torch.ByteTensor def argmax(vec): # return the argmax as a python int _, idx = torch.max(vec, 1) return idx.item() def prepare_sequence(seq, to_ix): idxs = [to_ix[w] if w in to_ix else to_ix["UNK"] for w in seq] return torch.tensor(idxs, dtype=LongTensor, device=device) # Compute log sum exp in a numerically stable way for the forward algorithm def log_sum_exp(vec): max_score = vec[0, argmax(vec)] max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1]) return max_score + \ torch.log(torch.sum(torch.exp(vec - max_score_broadcast))) class BiLSTM_CRF(nn.Module): def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim): super(BiLSTM_CRF, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.vocab_size = vocab_size self.tag_to_ix = tag_to_ix self.tagset_size = len(tag_to_ix) self.word_embeds = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2, num_layers=1, bidirectional=True) # Maps the output of the LSTM into tag space. self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size) # Matrix of transition parameters. Entry i,j is the score of # transitioning *to* i *from* j. self.transitions = nn.Parameter( torch.randn(self.tagset_size, self.tagset_size, device=device)) # These two statements enforce the constraint that we never transfer # to the start tag and we never transfer from the stop tag self.transitions.data[tag_to_ix[START_TAG], :] = -10000 self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000 self.hidden = self.init_hidden() def init_hidden(self): return (torch.randn(2, 1, self.hidden_dim // 2,device=device), torch.randn(2, 1, self.hidden_dim // 2,device=device)) def _forward_alg(self, feats): # Do the forward algorithm to compute the partition function init_alphas = torch.full((1, self.tagset_size), -10000., device=device) # START_TAG has all of the score. init_alphas[0][self.tag_to_ix[START_TAG]] = 0. # Wrap in a variable so that we will get automatic backprop forward_var = init_alphas # Iterate through the sentence for feat in feats: alphas_t = [] # The forward tensors at this timestep for next_tag in range(self.tagset_size): # broadcast the emission score: it is the same regardless of # the previous tag emit_score = feat[next_tag].view( 1, -1).expand(1, self.tagset_size) # the ith entry of trans_score is the score of transitioning to # next_tag from i trans_score = self.transitions[next_tag].view(1, -1) # The ith entry of next_tag_var is the value for the # edge (i -> next_tag) before we do log-sum-exp next_tag_var = forward_var + trans_score + emit_score # The forward variable for this tag is log-sum-exp of all the # scores. alphas_t.append(log_sum_exp(next_tag_var).view(1)) forward_var = torch.cat(alphas_t).view(1, -1) terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]] alpha = log_sum_exp(terminal_var) return alpha def _get_lstm_features(self, sentence): self.hidden = self.init_hidden() embeds = self.word_embeds(sentence).view(len(sentence), 1, -1) lstm_out, self.hidden = self.lstm(embeds, self.hidden) lstm_out = lstm_out.view(len(sentence), self.hidden_dim) lstm_feats = self.hidden2tag(lstm_out) return lstm_feats def _score_sentence(self, feats, tags): # Gives the score of a provided tag sequence score = torch.zeros(1,device=device) tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=LongTensor, device=device), tags]) for i, feat in enumerate(feats): score = score + \ self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]] score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]] return score def _viterbi_decode(self, feats): backpointers = [] # Initialize the viterbi variables in log space init_vvars = torch.full((1, self.tagset_size), -10000., device=device) init_vvars[0][self.tag_to_ix[START_TAG]] = 0 # forward_var at step i holds the viterbi variables for step i-1 forward_var = init_vvars for feat in feats: bptrs_t = [] # holds the backpointers for this step viterbivars_t = [] # holds the viterbi variables for this step for next_tag in range(self.tagset_size): # next_tag_var[i] holds the viterbi variable for tag i at the # previous step, plus the score of transitioning # from tag i to next_tag. # We don't include the emission scores here because the max # does not depend on them (we add them in below) next_tag_var = forward_var + self.transitions[next_tag] best_tag_id = argmax(next_tag_var) bptrs_t.append(best_tag_id) viterbivars_t.append(next_tag_var[0][best_tag_id].view(1)) # Now add in the emission scores, and assign forward_var to the set # of viterbi variables we just computed forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1) backpointers.append(bptrs_t) # Transition to STOP_TAG terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]] best_tag_id = argmax(terminal_var) path_score = terminal_var[0][best_tag_id] # Follow the back pointers to decode the best path. best_path = [best_tag_id] for bptrs_t in reversed(backpointers): best_tag_id = bptrs_t[best_tag_id] best_path.append(best_tag_id) # Pop off the start tag (we dont want to return that to the caller) start = best_path.pop() assert start == self.tag_to_ix[START_TAG] # Sanity check best_path.reverse() return path_score, best_path def neg_log_likelihood(self, sentence, tags): feats = self._get_lstm_features(sentence) forward_score = self._forward_alg(feats) gold_score = self._score_sentence(feats, tags) return forward_score - gold_score def forward(self, sentence): # dont confuse this with _forward_alg above. # Get the emission scores from the BiLSTM lstm_feats = self._get_lstm_features(sentence) # Find the best path, given the features. score, tag_seq = self._viterbi_decode(lstm_feats) return score, tag_seq START_TAG = "<START>" STOP_TAG = "<STOP>" EMBEDDING_DIM = 64 HIDDEN_DIM = 128 import dill with open('word_to_ix.pkl', 'rb') as file: word_to_ix = dill.load(file) with open('pos_to_ix.pkl', 'rb') as file: pos_to_ix = dill.load(file) ix_to_word = dict((v,k) for k,v in word_to_ix.items()) #convert index to word ix_to_pos = dict((v,k) for k,v in pos_to_ix.items()) #convert index to word model = BiLSTM_CRF(len(word_to_ix), pos_to_ix, EMBEDDING_DIM, HIDDEN_DIM) model.load_state_dict(torch.load("thainer.model"), strict=False) model.to(device) def predict(input_sent): y_pred=[] temp=[] with torch.no_grad(): precheck_sent = prepare_sequence(input_sent, word_to_ix) output=model(precheck_sent)[1] y_pred=[ix_to_pos[i] for i in output] return y_pred predict(["ผม","ชื่อ","นาย","บุญ","มาก"," ","ทอง","ดี"]) ```
github_jupyter
# Flight Price Prediction --- ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() pip list ``` ## Importing dataset 1. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method ``` train_data = pd.read_excel(r"/home/adarshsrivastava/Github/Flight_Fare_Prediction-/dataset/Data_Train.xlsx") #pd.set_option('display.max_columns', None) train_data.head() train_data.info() train_data["Duration"].value_counts() train_data.dropna(inplace = True) train_data.isnull().sum() #To check if there is any NaN value in any of the column ``` ## EDA From description we can see that Date_of_Journey is a object data type,\ Therefore, we have to convert this datatype into timestamp so as to use this column properly for prediction For this we require pandas **to_datetime** to convert object data type to datetime dtype. <span style="color: red;">**.dt.day method will extract only day of that date**</span>\ <span style="color: red;">**.dt.month method will extract only month of that date**</span> ``` train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month train_data.head() # Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use. train_data.drop(["Date_of_Journey"], axis = 1, inplace = True) # Departure time is when a plane leaves the gate. # Similar to Date_of_Journey we can extract values from Dep_Time # Extracting Hours train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour # Extracting Minutes train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute # Now we can drop Dep_Time as it is of no use train_data.drop(["Dep_Time"], axis = 1, inplace = True) train_data.head() # Arrival time is when the plane pulls up to the gate. # Similar to Date_of_Journey we can extract values from Arrival_Time # Extracting Hours train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour # Extracting Minutes train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute # Now we can drop Arrival_Time as it is of no use train_data.drop(["Arrival_Time"], axis = 1, inplace = True) train_data.head() # Time taken by plane to reach destination is called Duration # It is the differnce betwwen Departure Time and Arrival time # Assigning and converting Duration column into list duration = list(train_data["Duration"]) for i in range(len(duration)): if len(duration[i].split()) != 2: # Check if duration contains only hour or mins if "h" in duration[i]: duration[i] = duration[i].strip() + " 0m" # Adds 0 minute else: duration[i] = "0h " + duration[i] # Adds 0 hour duration_hours = [] duration_mins = [] for i in range(len(duration)): duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration # Adding duration_hours and duration_mins list to train_data dataframe train_data["Duration_hours"] = duration_hours train_data["Duration_mins"] = duration_mins train_data.drop(["Duration"], axis = 1, inplace = True) train_data.head() ``` --- ## Handling Categorical Data One can find many ways to handle categorical data. Some of them categorical data are, 1. <span style="color: blue;">**Nominal data**</span> --> data are not in any order --> <span style="color: green;">**OneHotEncoder**</span> is used in this case 2. <span style="color: blue;">**Ordinal data**</span> --> data are in order --> <span style="color: green;">**LabelEncoder**</span> is used in this case ``` train_data["Airline"].value_counts() # From graph we can see that Jet Airways Business have the highest Price. # Apart from the first Airline almost all are having similar median # Airline vs Price sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 12, aspect = 2) plt.show() # As Airline is Nominal Categorical data we will perform OneHotEncoding Airline = train_data[["Airline"]] Airline = pd.get_dummies(Airline, drop_first= True) Airline.head() train_data["Source"].value_counts() # Source vs Price sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 2) plt.show() # As Source is Nominal Categorical data we will perform OneHotEncoding Source = train_data[["Source"]] Source = pd.get_dummies(Source, drop_first= True) Source.head() train_data["Destination"].value_counts() # As Destination is Nominal Categorical data we will perform OneHotEncoding Destination = train_data[["Destination"]] Destination = pd.get_dummies(Destination, drop_first = True) Destination.head() train_data["Route"] # Additional_Info contains almost 80% no_info # Route and Total_Stops are related to each other train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True) train_data["Total_Stops"].value_counts() # As this is case of Ordinal Categorical type we perform LabelEncoder # Here Values are assigned with corresponding keys train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True) train_data.head() # Concatenate dataframe --> train_data + Airline + Source + Destination data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1) data_train.head() data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True) data_train.head() data_train.shape ``` --- ## Test set ``` test_data = pd.read_excel(r"/home/adarshsrivastava/Github/Flight_Fare_Prediction-/dataset/Test_set.xlsx") test_data.head() # Preprocessing print("Test data Info") print("-"*75) print(test_data.info()) print() print() print("Null values :") print("-"*75) test_data.dropna(inplace = True) print(test_data.isnull().sum()) # EDA # Date_of_Journey test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month test_data.drop(["Date_of_Journey"], axis = 1, inplace = True) # Dep_Time test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute test_data.drop(["Dep_Time"], axis = 1, inplace = True) # Arrival_Time test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute test_data.drop(["Arrival_Time"], axis = 1, inplace = True) # Duration duration = list(test_data["Duration"]) for i in range(len(duration)): if len(duration[i].split()) != 2: # Check if duration contains only hour or mins if "h" in duration[i]: duration[i] = duration[i].strip() + " 0m" # Adds 0 minute else: duration[i] = "0h " + duration[i] # Adds 0 hour duration_hours = [] duration_mins = [] for i in range(len(duration)): duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration # Adding Duration column to test set test_data["Duration_hours"] = duration_hours test_data["Duration_mins"] = duration_mins test_data.drop(["Duration"], axis = 1, inplace = True) # Categorical data print("Airline") print("-"*75) print(test_data["Airline"].value_counts()) Airline = pd.get_dummies(test_data["Airline"], drop_first= True) print() print("Source") print("-"*75) print(test_data["Source"].value_counts()) Source = pd.get_dummies(test_data["Source"], drop_first= True) print() print("Destination") print("-"*75) print(test_data["Destination"].value_counts()) Destination = pd.get_dummies(test_data["Destination"], drop_first = True) # Additional_Info contains almost 80% no_info # Route and Total_Stops are related to each other test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True) # Replacing Total_Stops test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True) # Concatenate dataframe --> test_data + Airline + Source + Destination data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1) data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True) print() print() print("Shape of test data : ", data_test.shape) data_test.head() ``` --- # Feature Selection Finding out the best feature which will contribute and have good relation with target variable. Following are some of the feature selection methods, 1. <span style="color: red;">**heatmap**</span> 2. <span style="color: red;">**feature_importance_**</span> 3. <span style="color: red;">**SelectKBest**</span> ``` data_train.shape data_train.columns X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour', 'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours', 'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo', 'Airline_Jet Airways', 'Airline_Jet Airways Business', 'Airline_Multiple carriers', 'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet', 'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy', 'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai', 'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad', 'Destination_Kolkata', 'Destination_New Delhi']] X.head() y = data_train.iloc[:,1] y.head() # Finds correlation between Independent and dependent attributes plt.figure(figsize = (18,18)) sns.heatmap(train_data.corr(), annot = True) plt.show() # Important feature using ExtraTreesRegressor from sklearn.ensemble import ExtraTreesRegressor selection = ExtraTreesRegressor() selection.fit(X, y) print(selection.feature_importances_) #plot graph of feature importances for better visualization plt.figure(figsize = (12,8)) feat_importances = pd.Series(selection.feature_importances_, index=X.columns) feat_importances.nlargest(20).plot(kind='barh') plt.show() ``` ## Fitting model using Random Forest 1. Split dataset into train and test set in order to prediction w.r.t X_test 2. If needed do scaling of data * Scaling is not done in Random forest 3. Import model 4. Fit the data 5. Predict w.r.t X_test 6. In regression check **RSME** Score 7. Plot graph ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor() rf.fit(X_train, y_train) y_pred=rf.predict(X_test) rf.score(X_train,y_train) rf.score(X_test,y_test) sns.distplot(y_test-y_pred) plt.show() plt.scatter(y_test,y_pred) plt.xlabel("y_test") plt.ylabel("y_pred") plt.show() from sklearn import metrics print("MAE:",metrics.mean_absolute_error(y_test,y_pred)) print("MSE:",metrics.mean_squared_error(y_test,y_pred)) rmse=np.sqrt(metrics.mean_squared_error(y_test,y_pred)) print("RMSE:",rmse) rmse/(max(y)-min(y)) metrics.r2_score(y_test,y_pred) import pickle # open a file, where you ant to store the data file = open('flight_fare_pred.pkl', 'wb') # dump information to that file pickle.dump(rf, file) model = open('flight_fare_pred.pkl','rb') forest = pickle.load(model) y_prediction = forest.predict(X_test) metrics.r2_score(y_test, y_prediction) ```
github_jupyter
<a href="https://colab.research.google.com/github/DJCordhose/ai/blob/master/notebooks/rl/berater-v5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Berater Environment v5 ## Changes from v4 1. encode observation to local one 1. non existing connection has highest penalty ## next steps 1. use complex customer graph 1. per episode set certain rewards to 0 to simulate different customers per consultant 1. make sure things generalize well ## Installation (required for colab) ``` !pip install git+https://github.com/openai/baselines >/dev/null !pip install gym >/dev/null ``` ## Environment ``` import numpy import gym from gym.utils import seeding from gym import spaces def state_name_to_int(state): state_name_map = { 'S': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'K': 9, 'L': 10, 'M': 11, 'N': 12, 'O': 13 } return state_name_map[state] def int_to_state_name(state_as_int): state_map = { 0: 'S', 1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E', 6: 'F', 7: 'G', 8: 'H', 9: 'K', 10: 'L', 11: 'M', 12: 'N', 13: 'O' } return state_map[state_as_int] class BeraterEnv(gym.Env): """ The Berater Problem Actions: There are 4 discrete deterministic actions, each choosing one direction """ metadata = {'render.modes': ['ansi']} showStep = False showDone = True envEpisodeModulo = 100 def __init__(self): self.map = { 'S': [('A', 100), ('B', 400), ('C', 200 )], 'A': [('B', 250), ('C', 400), ('S', 100 )], 'B': [('A', 250), ('C', 250), ('S', 400 )], 'C': [('A', 400), ('B', 250), ('S', 200 )] } # self.map = { # 'S': [('A', 300), ('B', 100), ('C', 200 )], # 'A': [('S', 300), ('B', 100), ('E', 100 ), ('D', 100 )], # 'B': [('S', 100), ('A', 100), ('C', 50 ), ('K', 200 )], # 'C': [('S', 200), ('B', 50), ('M', 100 ), ('L', 200 )], # 'D': [('A', 100), ('F', 50)], # 'E': [('A', 100), ('F', 100), ('H', 100)], # 'F': [('D', 50), ('E', 100), ('G', 200)], # 'G': [('F', 200), ('O', 300)], # 'H': [('E', 100), ('K', 300)], # 'K': [('B', 200), ('H', 300)], # 'L': [('C', 200), ('M', 50)], # 'M': [('C', 100), ('L', 50), ('N', 100)], # 'N': [('M', 100), ('O', 100)], # 'O': [('N', 100), ('G', 300)] # } self.action_space = spaces.Discrete(4) # position, and up to 4 paths from that position, non existing path is -1000 and no position change self.observation_space = spaces.Box(low=numpy.array([0,-1000,-1000,-1000,-1000]), high=numpy.array([13,1000,1000,1000,1000]), dtype=numpy.float32) self.reward_range = (-1, 1) self.totalReward = 0 self.stepCount = 0 self.isDone = False self.envReward = 0 self.envEpisodeCount = 0 self.envStepCount = 0 self.reset() self.optimum = self.calculate_customers_reward() def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def iterate_path(self, state, action): paths = self.map[state] if action < len(paths): return paths[action] else: # sorry, no such action, stay where you are and pay a high penalty return (state, 1000) def step(self, action): destination, cost = self.iterate_path(self.state, action) lastState = self.state customerReward = self.customer_reward[destination] reward = (customerReward - cost) / self.optimum self.state = destination self.customer_visited(destination) done = destination == 'S' and self.all_customers_visited() stateAsInt = state_name_to_int(self.state) self.totalReward += reward self.stepCount += 1 self.envReward += reward self.envStepCount += 1 if self.showStep: print( "Episode: " + ("%4.0f " % self.envEpisodeCount) + " Step: " + ("%4.0f " % self.stepCount) + lastState + ' --' + str(action) + '-> ' + self.state + ' R=' + ("% 2.2f" % reward) + ' totalR=' + ("% 3.2f" % self.totalReward) + ' cost=' + ("%4.0f" % cost) + ' customerR=' + ("%4.0f" % customerReward) + ' optimum=' + ("%4.0f" % self.optimum) ) if done and not self.isDone: self.envEpisodeCount += 1 if BeraterEnv.showDone: episodes = BeraterEnv.envEpisodeModulo if (self.envEpisodeCount % BeraterEnv.envEpisodeModulo != 0): episodes = self.envEpisodeCount % BeraterEnv.envEpisodeModulo print( "Done: " + ("episodes=%6.0f " % self.envEpisodeCount) + ("avgSteps=%6.2f " % (self.envStepCount/episodes)) + ("avgTotalReward=% 3.2f" % (self.envReward/episodes) ) ) if (self.envEpisodeCount%BeraterEnv.envEpisodeModulo) == 0: self.envReward = 0 self.envStepCount = 0 self.isDone = done observation = self.getObservation(stateAsInt) info = {"from": self.state, "to": destination} return observation, reward, done, info def getObservation(self, position): result = numpy.array([ position, self.getPathObservation(position, 0), self.getPathObservation(position, 1), self.getPathObservation(position, 2), self.getPathObservation(position, 3) ], dtype=numpy.float32) return result def getPathObservation(self, position, path): source = int_to_state_name(position) paths = self.map[self.state] if path < len(paths): target, cost = paths[path] reward = self.customer_reward[target] result = reward - cost else: result = -1000 return result def customer_visited(self, customer): self.customer_reward[customer] = 0 def all_customers_visited(self): return self.calculate_customers_reward() == 0 def calculate_customers_reward(self): sum = 0 for value in self.customer_reward.values(): sum += value return sum def reset(self): self.totalReward = 0 self.stepCount = 0 self.isDone = False reward_per_customer = 1000 self.customer_reward = { 'S': 0, 'A': reward_per_customer, 'B': reward_per_customer, 'C': reward_per_customer, # 'D': reward_per_customer, # 'E': reward_per_customer, # 'F': reward_per_customer, # 'G': reward_per_customer, # 'H': reward_per_customer, # 'K': reward_per_customer, # 'L': reward_per_customer, # 'M': reward_per_customer, # 'N': reward_per_customer, # 'O': reward_per_customer } self.state = 'S' return self.getObservation(state_name_to_int(self.state)) ``` # Try out Environment ``` BeraterEnv.showStep = True BeraterEnv.showDone = True env = BeraterEnv() print(env) observation = env.reset() print(observation) for t in range(1000): action = env.action_space.sample() observation, reward, done, info = env.step(action) if done: print("Episode finished after {} timesteps".format(t+1)) break env.close() print(observation) ``` # Train model * 0.73 would be perfect total reward ``` import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) print(tf.__version__) !rm -r logs !mkdir logs !mkdir logs/berater # https://github.com/openai/baselines/blob/master/baselines/deepq/experiments/train_pong.py # log_dir = logger.get_dir() log_dir = '/content/logs/berater/' import gym from baselines import bench from baselines import logger from baselines.common.vec_env.dummy_vec_env import DummyVecEnv from baselines.common.vec_env.vec_monitor import VecMonitor from baselines.ppo2 import ppo2 BeraterEnv.showStep = False BeraterEnv.showDone = False env = BeraterEnv() wrapped_env = DummyVecEnv([lambda: BeraterEnv()]) monitored_env = VecMonitor(wrapped_env, log_dir) # https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py model = ppo2.learn(network='mlp', env=monitored_env, total_timesteps=50000) # monitored_env = bench.Monitor(env, log_dir) # https://en.wikipedia.org/wiki/Q-learning#Influence_of_variables # %time model = deepq.learn(\ # monitored_env,\ # seed=42,\ # network='mlp',\ # lr=1e-3,\ # gamma=0.99,\ # total_timesteps=30000,\ # buffer_size=50000,\ # exploration_fraction=0.5,\ # exploration_final_eps=0.02,\ # print_freq=1000) model.save('berater-ppo-v4.pkl') monitored_env.close() ``` ### Visualizing Results https://github.com/openai/baselines/blob/master/docs/viz/viz.ipynb ``` !ls -l $log_dir from baselines.common import plot_util as pu results = pu.load_results(log_dir) import matplotlib.pyplot as plt import numpy as np r = results[0] # plt.ylim(-1, 1) # plt.plot(np.cumsum(r.monitor.l), r.monitor.r) plt.plot(np.cumsum(r.monitor.l), pu.smooth(r.monitor.r, radius=100)) ``` # Enjoy model ``` import numpy as np observation = env.reset() state = np.zeros((1, 2*128)) dones = np.zeros((1)) BeraterEnv.showStep = True BeraterEnv.showDone = False for t in range(1000): actions, _, state, _ = model.step(observation, S=state, M=dones) observation, reward, done, info = env.step(actions[0]) if done: print("Episode finished after {} timesteps".format(t+1)) break env.close() ```
github_jupyter
<h1><center>Solving Linear Equations with Quantum Circuits</center></h1> <h2><center>Ax = b</center></h2> <h4><center> Attempt to replicate the following paper </center></h4> ![image.png](attachment:image.png) <h3><center>Algorithm for a simpler 2 x 2 example</center></h3> ![image.png](attachment:image.png) ![image.png](attachment:image.png) $\newcommand{\ket}[1]{\left|{#1}\right\rangle}$ $\newcommand{\bra}[1]{\left\langle{#1}\right|}$ The Final state looks like: $$ \ket{\psi} = \sum_{j=1}^N \beta_j \left( \sqrt{1-\frac{C^2}{\lambda_j^2}} \ket{0} + \frac{C}{\lambda_j} \ket{1} \right) \ket{00} \ket{u_j} $$ ``` #Solving a linear system of equation for 2 dimensional equantion of the form Ax = b ### code specific initialization (importing libraries) import matplotlib.pyplot as plt %matplotlib inline import numpy as np from math import * import scipy # importing Qiskit from qiskit import IBMQ, BasicAer #from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute # import basic plot tools from qiskit.tools.visualization import plot_histogram from qiskit.quantum_info.synthesis import euler_angles_1q from cmath import exp ### problem specific parameters # matrix representation of linear equation A = 0.5*np.array([[3,1],[1,3]]) t0 = 2*pi #time paramter appearing in the unitary r = 4 q = QuantumRegister(4, 'q') c = ClassicalRegister(1, 'c') qpe = QuantumCircuit(q,c) qpe.h(q[3]) qpe.barrier() qpe.h(q[1]) qpe.h(q[2]) # 1st unitary corresponding to A UA = scipy.linalg.expm(complex(0,1)*A*t0/4) [theta, phi, lmda] = euler_angles_1q(UA) qpe.cu3(theta, phi, lmda,q[2],q[3]) # 2nd unitary corresponding to A UA = scipy.linalg.expm(complex(0,1)*A*2*t0/4) [theta, phi, lmda] = euler_angles_1q(UA) qpe.cu3(theta, phi, lmda,q[1],q[3]) qpe.barrier() # quantum fourier transform qpe.swap(q[1],q[2]) qpe.h(q[2]) qpe.cu1(-pi/2,q[1],q[2]) qpe.h(q[1]) qpe.swap(q[1],q[2]) qpe.barrier() #controlled rotations gate qpe.cry(2*pi/(2**r),q[1],q[0]) qpe.cry(pi/(2**r),q[2],q[0]) qpe.barrier() qpe.draw(output="mpl") ############################################################### ### uncomputation # reversing fourier transform qpe.swap(q[1],q[2]) qpe.h(q[1]) qpe.cu1(pi/2,q[1],q[2]) qpe.h(q[2]) qpe.swap(q[1],q[2]) # reversing 2nd unitary corresponding to A UA = scipy.linalg.expm(complex(0,-1)*A*2*t0/4) [theta, phi, lmda] = euler_angles_1q(UA) qpe.cu3(theta, phi, lmda,q[1],q[3]) # reversing 1st unitary corresponding to A UA = scipy.linalg.expm(complex(0,-1)*A*t0/4) [theta, phi, lmda] = euler_angles_1q(UA) qpe.cu3(theta, phi, lmda,q[2],q[3]) qpe.h(q[1]) qpe.h(q[2]) qpe.barrier() qpe.draw(output="mpl") qpe.measure(q[0], c[0]) qpe.draw(output="mpl") circuit = qpe simulator = BasicAer.get_backend('qasm_simulator') result = execute(circuit, backend = simulator, shots = 2048).result() counts = result.get_counts() from qiskit.tools.visualization import plot_histogram plot_histogram(counts) ```
github_jupyter
``` from mocpy import MOC import numpy as np from astropy import units as u from astropy.coordinates import SkyCoord %matplotlib inline # Plot the polygon vertices on a matplotlib axis def plot_graph(vertices): import matplotlib.pyplot as plt from matplotlib import path, patches fig = plt.figure() ax = fig.add_subplot(111) p = path.Path(vertices) patch = patches.PathPatch(p, facecolor='orange', lw=2) ax.add_patch(patch) # Methods for defining random polygons def generate_rand_polygon(num_points): lon_min, lon_max = (-5, 5) lat_min, lat_max = (-5, 5) lon = (np.random.random(num_points) * (lon_max - lon_min) + lon_min) * u.deg lat = (np.random.random(num_points) * (lat_max - lat_min) + lat_min) * u.deg vertices = np.vstack((lon.to_value(), lat.to_value())).T return vertices def generate_concave_polygon(num_points, lon_offset, lat_offset): delta_ang = (2 * np.pi) / num_points radius_max = 10 angles = np.linspace(0, 2 * np.pi, num_points) radius = np.random.random(angles.shape[0]) * radius_max lon = (np.cos(angles) * radius + lon_offset) * u.deg lat = (np.sin(angles) * radius + lat_offset) * u.deg vertices = np.vstack((lon.to_value(), lat.to_value())).T return vertices def generate_convexe_polygon(num_points, lon_offset, lat_offset): delta_ang = (2 * np.pi) / num_points radius_max = 10 angles = np.linspace(0, 2 * np.pi, num_points) radius = np.random.random() * radius_max * np.ones(angles.shape[0]) lon = (np.cos(angles) * radius + lon_offset) * u.deg lat = (np.sin(angles) * radius + lat_offset) * u.deg vertices = np.vstack((lon.to_value(), lat.to_value())).T return vertices #vertices = generate_convexe_polygon(20, 10, 5) vertices = generate_concave_polygon(20, 10, 5) def plot(moc, skycoord): from matplotlib import path, patches import matplotlib.pyplot as plt fig = plt.figure(figsize=(10, 10)) from mocpy import World2ScreenMPL from astropy.coordinates import Angle with World2ScreenMPL(fig, fov=20 * u.deg, center=SkyCoord(10, 5, unit='deg', frame='icrs'), coordsys="icrs", rotation=Angle(0, u.degree), projection="TAN") as wcs: ax = fig.add_subplot(1, 1, 1, projection=wcs) moc.fill(ax=ax, wcs=wcs, edgecolor='r', facecolor='r', linewidth=1.0, fill=True, alpha=0.5) from astropy.wcs.utils import skycoord_to_pixel x, y = skycoord_to_pixel(skycoord, wcs) p = path.Path(np.vstack((x, y)).T) patch = patches.PathPatch(p, facecolor='green', alpha=0.25, lw=2) ax.add_patch(patch) plt.xlabel('ra') plt.ylabel('dec') plt.grid(color='black', ls='dotted') plt.title('from polygon') plt.show() plt.close() # Convert the vertices to lon and lat astropy quantities lon, lat = vertices[:, 0] * u.deg, vertices[:, 1] * u.deg skycoord = SkyCoord(lon, lat, unit="deg", frame="icrs") # Define a vertex that is said to belongs to the MOC. # It is important as there is no way on the sphere to know the area from # which we want to build the MOC (a set of vertices delimits two finite areas). %time moc = MOC.from_polygon(lon=lon, lat=lat, max_depth=12) plot(moc, skycoord) ```
github_jupyter
``` import json, sys, random, os, warnings, argparse, time, concurrent.futures from air_bus.airbus_utils import rle_decode, rle_encode, save_img, same_id from air_bus.decorate import profile import numpy as np import pandas as pd import tensorflow as tf from PIL import Image masks_data = "/media/thistle/Passport/Kaggle_Data/airbus/fromkaggle/train_ship_segmentations_v2.csv" masks = pd.read_csv(masks_data) # removing masks with no ships bc of large class imbalance with_ships = masks.dropna(axis=0) # now find the unique ImageId names unique_files = list(with_ships.ImageId.value_counts().index) files1000 = unique_files.copy()[:1000] target = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT" num = len(files1000) # 1,000 # define test_img_path = os.path.join(target, "Images","test", "images") train_img_path = os.path.join(target, "Images","train", "images") test_mask_path = os.path.join(target, "Masks", "test", "masks") train_mask_path = os.path.join(target, "Masks", "train", "masks") # os.makedirs(test_img_path) # os.makedirs(train_img_path) os.makedirs(test_mask_path) os.makedirs(train_mask_path) idxs = list(range(num_files_created)) np.random.seed(101) np.random.shuffle(idxs) split = .8 num_train = int(num*split) train_ids = idxs[:num_train] test_ids = idxs[num_train:] #create the 800 paths to new images train_paths = [files1000[each] for each in train_ids] test_paths = [files1000[each] for each in test_ids] #train_paths[1:11] source_images = "/media/thistle/Passport/Kaggle_Data/airbus/fromkaggle/train_v2" train_img_path, test_img_path /media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/train/images def copy_train_image(filename, source=source_images, target=train_img_path): r = tf.io.read_file(os.path.join(source,filename)) tf.io.write_file(os.path.join(target,filename), r) def copy_test_image(filename, source =source_images, target=test_img_path): r = tf.io.read_file(os.path.join(source,filename)) tf.io.write_file(os.path.join(target,filename), r) len(os.listdir("/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/train/images")) len(os.listdir("/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/test/images")) [copy_train_image(each) for each in train_paths] %%time [copy_test_image(each) for each in test_paths] # now for dividing masks up source_masks = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/combined_masks" test_masks_dir = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/test/masks" train_masks_dir = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/train/masks" def copy_train_masks(filename, source=source_masks, target=train_masks_dir): r = tf.io.read_file(os.path.join(source,filename)) tf.io.write_file(os.path.join(target,filename), r) %%time with concurrent.futures.ThreadPoolExecutor() as executor_train: train_result = executor_train.map(copy_train_masks, train_paths) length = len(os.listdir(train_masks_dir)) print(f"train files copied: {length}") def copy_test_masks(filename, source=source_masks, target=test_masks_dir): r = tf.io.read_file(os.path.join(source,filename)) tf.io.write_file(os.path.join(target,filename), r) %%time with concurrent.futures.ThreadPoolExecutor() as executor_test_mask: mask_test_result = executor_test_mask.map(copy_test_masks, test_paths) length = len(os.listdir(test_masks_dir)) print(f"train files copied: {length}") def copy_train_masks(filename, source=source_masks, target=train_masks_dir): r = tf.io.read_file(os.path.join(source,filename)) tf.io.write_file(os.path.join(target,filename), r) with concurrent.futures.ThreadPoolExecutor() as executor_train: train_result = executor_train.map(copy_train_masks, train_paths) length = len(os.listdir(train_masks_dir)) print(f"{length} train-mask files copied") randidxs = np.random.randint(1,200,size=5) randidxs testMasks = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/test/masks" trainMasks = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Masks/train/masks" testImg = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/test/images" trainImg = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/SHIT/Images/test/images" testm = sorted(os.listdir(testMasks))[:5] trainm = sorted(os.listdir(testImg))[:5] for i in range(5): if not testm[i] == trainm[i]: print("shit") break else: print("swell") #####################DELETE##########################33 testMasks = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/MASKS/test/masks" trainMasks = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/MASKS/train/masks" testImg = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/IMAGES/test/images" trainImg = "/media/thistle/Passport/Kaggle_Data/airbus/updated_data/iMAGES/test/images" testm = sorted(os.listdir(testMasks))[:50] trainm = sorted(os.listdir(testImg))[:50] for i in range(5): if not testm[i] == trainm[i]: print("shit") break else: print("swell") a = list (range(11)) np.random.seed(3) np.random.shuffle(a) a a = list (range(11)) np.random.seed(3) np.random.shuffle(a) a aa = list(range(11)) np.random.shuffle(aa) aa ```
github_jupyter
# MCMC sampling using the emcee package ## Introduction The goal of Markov Chain Monte Carlo (MCMC) algorithms is to approximate the posterior distribution of your model parameters by random sampling in a probabilistic space. For most readers this sentence was probably not very helpful so here we'll start straight with and example but you should read the more detailed mathematical approaches of the method [here](https://www.pas.rochester.edu/~sybenzvi/courses/phy403/2015s/p403_17_mcmc.pdf) and [here](https://github.com/jakevdp/BayesianAstronomy/blob/master/03-Bayesian-Modeling-With-MCMC.ipynb). ### How does it work ? The idea is that we use a number of walkers that will sample the posterior distribution (i.e. sample the Likelihood profile). The goal is to produce a "chain", i.e. a list of $\theta$ values, where each $\theta$ is a vector of parameters for your model.<br> If you start far away from the truth value, the chain will take some time to converge until it reaches a stationary state. Once it has reached this stage, each successive elements of the chain are samples of the target posterior distribution.<br> This means that, once we have obtained the chain of samples, we have everything we need. We can compute the distribution of each parameter by simply approximating it with the histogram of the samples projected into the parameter space. This will provide the errors and correlations between parameters. Now let's try to put a picture on the ideas described above. With this notebook, we have simulated and carried out a MCMC analysis for a source with the following parameters:<br> $Index=2.0$, $Norm=5\times10^{-12}$ cm$^{-2}$ s$^{-1}$ TeV$^{-1}$, $Lambda =(1/Ecut) = 0.02$ TeV$^{-1}$ (50 TeV) for 20 hours. The results that you can get from a MCMC analysis will look like this : <img src="images/gammapy_mcmc.png" width="800"> On the first two top panels, we show the pseudo-random walk of one walker from an offset starting value to see it evolve to a better solution. In the bottom right panel, we show the trace of each 16 walkers for 500 runs (the chain described previsouly). For the first 100 runs, the parameter evolve towards a solution (can be viewed as a fitting step). Then they explore the local minimum for 400 runs which will be used to estimate the parameters correlations and errors. The choice of the Nburn value (when walkers have reached a stationary stage) can be done by eye but you can also look at the autocorrelation time. ### Why should I use it ? When it comes to evaluate errors and investigate parameter correlation, one typically estimate the Likelihood in a gridded search (2D Likelihood profiles). Each point of the grid implies a new model fitting. If we use 10 steps for each parameters, we will need to carry out 100 fitting procedures. Now let's say that I have a model with $N$ parameters, we need to carry out that gridded analysis $N*(N-1)$ times. So for 5 free parameters you need 20 gridded search, resulting in 2000 individual fit. Clearly this strategy doesn't scale well to high-dimensional models. Just for fun: if each fit procedure takes 10s, we're talking about 5h of computing time to estimate the correlation plots. There are many MCMC packages in the python ecosystem but here we will focus on [emcee](https://emcee.readthedocs.io), a lightweight Python package. A description is provided here : [Foreman-Mackey, Hogg, Lang & Goodman (2012)](https://arxiv.org/abs/1202.3665). ``` %matplotlib inline import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") import numpy as np import astropy.units as u from astropy.coordinates import SkyCoord from gammapy.irf import load_cta_irfs from gammapy.maps import WcsGeom, MapAxis from gammapy.modeling.models import ( ExpCutoffPowerLawSpectralModel, GaussianSpatialModel, SkyModel, Models, FoVBackgroundModel, ) from gammapy.datasets import MapDataset from gammapy.makers import MapDatasetMaker from gammapy.data import Observation from gammapy.modeling.sampling import ( run_mcmc, par_to_model, plot_corner, plot_trace, ) from gammapy.modeling import Fit import logging logging.basicConfig(level=logging.INFO) ``` ## Simulate an observation Here we will start by simulating an observation using the `simulate_dataset` method. ``` irfs = load_cta_irfs( "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits" ) observation = Observation.create( pointing=SkyCoord(0 * u.deg, 0 * u.deg, frame="galactic"), livetime=20 * u.h, irfs=irfs, ) # Define map geometry axis = MapAxis.from_edges( np.logspace(-1, 2, 15), unit="TeV", name="energy", interp="log" ) geom = WcsGeom.create( skydir=(0, 0), binsz=0.05, width=(2, 2), frame="galactic", axes=[axis] ) empty_dataset = MapDataset.create(geom=geom, name="dataset-mcmc") maker = MapDatasetMaker(selection=["background", "edisp", "psf", "exposure"]) dataset = maker.run(empty_dataset, observation) # Define sky model to simulate the data spatial_model = GaussianSpatialModel( lon_0="0 deg", lat_0="0 deg", sigma="0.2 deg", frame="galactic" ) spectral_model = ExpCutoffPowerLawSpectralModel( index=2, amplitude="3e-12 cm-2 s-1 TeV-1", reference="1 TeV", lambda_="0.05 TeV-1", ) sky_model_simu = SkyModel( spatial_model=spatial_model, spectral_model=spectral_model, name="source" ) bkg_model = FoVBackgroundModel(dataset_name="dataset-mcmc") models = Models([sky_model_simu, bkg_model]) print(models) dataset.models = models dataset.fake() dataset.counts.sum_over_axes().plot(add_cbar=True); # If you want to fit the data for comparison with MCMC later # fit = Fit(dataset) # result = fit.run(optimize_opts={"print_level": 1}) ``` ## Estimate parameter correlations with MCMC Now let's analyse the simulated data. Here we just fit it again with the same model we had before as a starting point. The data that would be needed are the following: - counts cube, psf cube, exposure cube and background model Luckily all those maps are already in the Dataset object. We will need to define a Likelihood function and define priors on parameters.<br> Here we will assume a uniform prior reading the min, max parameters from the sky model. ### Define priors This steps is a bit manual for the moment until we find a better API to define priors.<br> Note the you **need** to define priors for each parameter otherwise your walkers can explore uncharted territories (e.g. negative norms). ``` print(dataset) # Define the free parameters and min, max values parameters = dataset.models.parameters parameters["sigma"].frozen = True parameters["lon_0"].frozen = True parameters["lat_0"].frozen = True parameters["amplitude"].frozen = False parameters["index"].frozen = False parameters["lambda_"].frozen = False parameters["norm"].frozen = True parameters["tilt"].frozen = True parameters["norm"].min = 0.5 parameters["norm"].max = 2 parameters["index"].min = 1 parameters["index"].max = 5 parameters["lambda_"].min = 1e-3 parameters["lambda_"].max = 1 parameters["amplitude"].min = 0.01 * parameters["amplitude"].value parameters["amplitude"].max = 100 * parameters["amplitude"].value parameters["sigma"].min = 0.05 parameters["sigma"].max = 1 # Setting amplitude init values a bit offset to see evolution # Here starting close to the real value parameters["index"].value = 2.0 parameters["amplitude"].value = 3.2e-12 parameters["lambda_"].value = 0.05 print(dataset.models) print("stat =", dataset.stat_sum()) %%time # Now let's define a function to init parameters and run the MCMC with emcee # Depending on your number of walkers, Nrun and dimensionality, this can take a while (> minutes) sampler = run_mcmc(dataset, nwalkers=6, nrun=150) # to speedup the notebook # sampler=run_mcmc(dataset,nwalkers=12,nrun=1000) # more accurate contours ``` ## Plot the results The MCMC will return a sampler object containing the trace of all walkers.<br> The most important part is the chain attribute which is an array of shape:<br> _(nwalkers, nrun, nfreeparam)_ The chain is then used to plot the trace of the walkers and estimate the burnin period (the time for the walkers to reach a stationary stage). ``` plot_trace(sampler, dataset) plot_corner(sampler, dataset, nburn=50) ``` ## Plot the model dispersion Using the samples from the chain after the burn period, we can plot the different models compared to the truth model. To do this we need to the spectral models for each parameter state in the sample. ``` emin, emax = [0.1, 100] * u.TeV nburn = 50 fig, ax = plt.subplots(1, 1, figsize=(12, 6)) for nwalk in range(0, 6): for n in range(nburn, nburn + 100): pars = sampler.chain[nwalk, n, :] # set model parameters par_to_model(dataset, pars) spectral_model = dataset.models["source"].spectral_model spectral_model.plot( energy_range=(emin, emax), ax=ax, energy_power=2, alpha=0.02, color="grey", ) sky_model_simu.spectral_model.plot( energy_range=(emin, emax), energy_power=2, ax=ax, color="red" ); ``` ## Fun Zone Now that you have the sampler chain, you have in your hands the entire history of each walkers in the N-Dimensional parameter space. <br> You can for example trace the steps of each walker in any parameter space. ``` # Here we plot the trace of one walker in a given parameter space parx, pary = 0, 1 plt.plot(sampler.chain[0, :, parx], sampler.chain[0, :, pary], "ko", ms=1) plt.plot( sampler.chain[0, :, parx], sampler.chain[0, :, pary], ls=":", color="grey", alpha=0.5, ) plt.xlabel("Index") plt.ylabel("Amplitude"); ``` ## PeVatrons in CTA ? Now it's your turn to play with this MCMC notebook. For example to test the CTA performance to measure a cutoff at very high energies (100 TeV ?). After defining your Skymodel it can be as simple as this : ``` # dataset = simulate_dataset(model, geom, pointing, irfs) # sampler = run_mcmc(dataset) # plot_trace(sampler, dataset) # plot_corner(sampler, dataset, nburn=200) ```
github_jupyter
# Pix2PixHD *Please note that this is an optional notebook, meant to introduce more advanced concepts if you're up for a challenge, so don't worry if you don't completely follow!* It is recommended that you should already be familiar with: - Residual blocks, from [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) (He et al. 2015) - Perceptual loss, from [Perceptual Losses for Real-Time Style Transfer and Super-Resolution](https://arxiv.org/abs/1603.08155) (Johnson et al. 2016) - VGG architecture, from [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) (Simonyan et al. 2015) - Instance normalization (which you should know from StyleGAN), from [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022) (Ulyanov et al. 2017) - Reflection padding, which Pytorch has implemented in [torch.nn.ReflectionPad2d](https://pytorch.org/docs/stable/generated/torch.nn.ReflectionPad2d.html) **Goals** In this notebook, you will learn about Pix2PixHD, which synthesizes high-resolution images from semantic label maps. Proposed in [High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs](https://arxiv.org/abs/1711.11585) (Wang et al. 2018), Pix2PixHD improves upon Pix2Pix via multiscale architecture, improved adversarial loss, and instance maps. ## Residual Blocks The residual block, which is relevant in many state-of-the-art computer vision models, is used in all parts of Pix2PixHD. If you're not familiar with residual blocks, please take a look [here](https://paperswithcode.com/method/residual-block). Now, you'll start by first implementing a basic residual block. ``` import torch import torch.nn as nn import torch.nn.functional as F class ResidualBlock(nn.Module): ''' ResidualBlock Class Values channels: the number of channels throughout the residual block, a scalar ''' def __init__(self, channels): super().__init__() self.layers = nn.Sequential( nn.ReflectionPad2d(1), nn.Conv2d(channels, channels, kernel_size=3, padding=0), nn.InstanceNorm2d(channels, affine=False), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(channels, channels, kernel_size=3, padding=0), nn.InstanceNorm2d(channels, affine=False), ) def forward(self, x): return x + self.layers(x) ``` ## Multiscale Generator: Generating at multiple scales (resolutions) The Pix2PixHD generator is comprised of two separate subcomponent generators: $G_1$ is called the global generator and operates at low resolution (1024 x 512) to transfer styles. $G_2$ is the local enhancer and operates at high resolution (2048 x 1024) to deal with higher resolution. The architecture for each network is adapted from [Perceptual Losses for Real-Time Style Transfer and Super-Resolution](https://arxiv.org/abs/1603.08155) (Johnson et al. 2016) and is comprised of \begin{align*} G = \left[G^{(F)}, G^{(R)}, G^{(B)}\right], \end{align*} where $G^{(F)}$ is a frontend of convolutional blocks (downsampling), $G^{(R)}$ is a set of residual blocks, and $G^{(B)}$ is a backend of transposed convolutional blocks (upsampling). This is just a type of encoder-decoder generator that you learned about with Pix2Pix! $G_1$ is trained first on low-resolution images. Then, $G_2$ is added to the pre-trained $G_1$ and both are trained jointly on high-resolution images. Specifically, $G_2^{(F)}$ encodes a high-resolution image, $G_1$ encodes a downsampled, low-resolution image, and the outputs from both are summed and passed sequentially to $G_2^{(R)}$ and $G_2^{(B)}$. This pre-training and fine-tuning scheme works well because the model is able to learn accurate coarser representations before using them to touch up its refined representations, since learning high-fidelity representations is generally a pretty hard task. > ![Pix2PixHD Generator](https://github.com/https-deeplearning-ai/GANs-Public/blob/master/Pix2PixHD-Generator.png?raw=true) *Pix2PixHD Generator, taken from Figure 3 of [High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs](https://arxiv.org/abs/1711.11585) (Wang et al. 2018). Following our notation, $G = \left[G_2^{(F)}, G_1^{(F)}, G_1^{(R)}, G_1^{(B)}, G_2^{(R)}, G_2^{(B)}\right]$ from left to right.* ### Global Subgenerator ($G_1$) Let's first start by building the global generator ($G_1$). Even though the global generator is nested inside the local enhancer, you'll still need a separate module for training $G_1$ on its own first. ``` class GlobalGenerator(nn.Module): ''' GlobalGenerator Class: Implements the global subgenerator (G1) for transferring styles at lower resolutions. Values: in_channels: the number of input channels, a scalar out_channels: the number of output channels, a scalar base_channels: the number of channels in first convolutional layer, a scalar fb_blocks: the number of frontend / backend blocks, a scalar res_blocks: the number of residual blocks, a scalar ''' def __init__(self, in_channels=3, out_channels=3, base_channels=64, fb_blocks=3, res_blocks=9): super().__init__() # Initial convolutional layer g1 = [ nn.ReflectionPad2d(3), nn.Conv2d(in_channels, base_channels, kernel_size=7, padding=0), nn.InstanceNorm2d(base_channels, affine=False), nn.ReLU(inplace=True), ] channels = base_channels # Frontend blocks for _ in range(fb_blocks): g1 += [ nn.Conv2d(channels, 2 * channels, kernel_size=3, stride=2, padding=1), nn.InstanceNorm2d(2 * channels, affine=False), nn.ReLU(inplace=True), ] channels *= 2 # Residual blocks for _ in range(res_blocks): g1 += [ResidualBlock(channels)] # Backend blocks for _ in range(fb_blocks): g1 += [ nn.ConvTranspose2d(channels, channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1), nn.InstanceNorm2d(channels // 2, affine=False), nn.ReLU(inplace=True), ] channels //= 2 # Output convolutional layer as its own nn.Sequential since it will be omitted in second training phase self.out_layers = nn.Sequential( nn.ReflectionPad2d(3), nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=0), nn.Tanh(), ) self.g1 = nn.Sequential(*g1) def forward(self, x): x = self.g1(x) x = self.out_layers(x) return x ``` ### Local Enhancer Subgenerator ($G_2$) And now onto the local enhancer ($G_2$)! Recall that the local enhancer uses (a pretrained) $G_1$ as part of its architecture. Following our earlier notation, recall that the residual connections from the last layers of $G_2^{(F)}$ and $G_1^{(B)}$ are added together and passed through $G_2^{(R)}$ and $G_2^{(B)}$ to synthesize a high-resolution image. Because of this, you should reuse the $G_1$ implementation so that the weights are consistent for the second training phase. ``` class LocalEnhancer(nn.Module): ''' LocalEnhancer Class: Implements the local enhancer subgenerator (G2) for handling larger scale images. Values: in_channels: the number of input channels, a scalar out_channels: the number of output channels, a scalar base_channels: the number of channels in first convolutional layer, a scalar global_fb_blocks: the number of global generator frontend / backend blocks, a scalar global_res_blocks: the number of global generator residual blocks, a scalar local_res_blocks: the number of local enhancer residual blocks, a scalar ''' def __init__(self, in_channels, out_channels, base_channels=32, global_fb_blocks=3, global_res_blocks=9, local_res_blocks=3): super().__init__() global_base_channels = 2 * base_channels # Downsampling layer for high-res -> low-res input to g1 self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) # Initialize global generator without its output layers self.g1 = GlobalGenerator( in_channels, out_channels, base_channels=global_base_channels, fb_blocks=global_fb_blocks, res_blocks=global_res_blocks, ).g1 self.g2 = nn.ModuleList() # Initialize local frontend block self.g2.append( nn.Sequential( # Initial convolutional layer nn.ReflectionPad2d(3), nn.Conv2d(in_channels, base_channels, kernel_size=7, padding=0), nn.InstanceNorm2d(base_channels, affine=False), nn.ReLU(inplace=True), # Frontend block nn.Conv2d(base_channels, 2 * base_channels, kernel_size=3, stride=2, padding=1), nn.InstanceNorm2d(2 * base_channels, affine=False), nn.ReLU(inplace=True), ) ) # Initialize local residual and backend blocks self.g2.append( nn.Sequential( # Residual blocks *[ResidualBlock(2 * base_channels) for _ in range(local_res_blocks)], # Backend blocks nn.ConvTranspose2d(2 * base_channels, base_channels, kernel_size=3, stride=2, padding=1, output_padding=1), nn.InstanceNorm2d(base_channels, affine=False), nn.ReLU(inplace=True), # Output convolutional layer nn.ReflectionPad2d(3), nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=0), nn.Tanh(), ) ) def forward(self, x): # Get output from g1_B x_g1 = self.downsample(x) x_g1 = self.g1(x_g1) # Get output from g2_F x_g2 = self.g2[0](x) # Get final output from g2_B return self.g2[1](x_g1 + x_g2) ``` And voilà! You now have modules for both the global subgenerator and local enhancer subgenerator! ## Multiscale Discriminator: Discriminating at different scales too! Pix2PixHD uses 3 separate subcomponents (subdiscriminators $D_1$, $D_2$, and $D_3$) to generate predictions. They all have the same architectures but $D_2$ and $D_3$ operate on inputs downsampled by 2x and 4x, respectively. The GAN objective is now modified as \begin{align*} \min_G \max_{D_1,D_2,D_3}\sum_{k=1,2,3}\mathcal{L}_{\text{GAN}}(G, D_k) \end{align*} Each subdiscriminator is a PatchGAN, which you should be familiar with from Pix2Pix! Let's first implement a single PatchGAN - this implementation will be slightly different than the one you saw in Pix2Pix since the intermediate feature maps will be needed for computing loss. ``` class Discriminator(nn.Module): ''' Discriminator Class Implements the discriminator class for a subdiscriminator, which can be used for all the different scales, just with different argument values. Values: in_channels: the number of channels in input, a scalar base_channels: the number of channels in first convolutional layer, a scalar n_layers: the number of convolutional layers, a scalar ''' def __init__(self, in_channels=3, base_channels=64, n_layers=3): super().__init__() # Use nn.ModuleList so we can output intermediate values for loss. self.layers = nn.ModuleList() # Initial convolutional layer self.layers.append( nn.Sequential( nn.Conv2d(in_channels, base_channels, kernel_size=4, stride=2, padding=2), nn.LeakyReLU(0.2, inplace=True), ) ) # Downsampling convolutional layers channels = base_channels for _ in range(1, n_layers): prev_channels = channels channels = min(2 * channels, 512) self.layers.append( nn.Sequential( nn.Conv2d(prev_channels, channels, kernel_size=4, stride=2, padding=2), nn.InstanceNorm2d(channels, affine=False), nn.LeakyReLU(0.2, inplace=True), ) ) # Output convolutional layer prev_channels = channels channels = min(2 * channels, 512) self.layers.append( nn.Sequential( nn.Conv2d(prev_channels, channels, kernel_size=4, stride=1, padding=2), nn.InstanceNorm2d(channels, affine=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(channels, 1, kernel_size=4, stride=1, padding=2), ) ) def forward(self, x): outputs = [] # for feature matching loss for layer in self.layers: x = layer(x) outputs.append(x) return outputs model = Discriminator() p = model(torch.rand(1, 3, 256, 256)) for pi in p: print(pi.shape) ``` Now you're ready to implement the multiscale discriminator in full! This puts together the different subdiscriminator scales. ``` class MultiscaleDiscriminator(nn.Module): ''' MultiscaleDiscriminator Class Values: in_channels: number of input channels to each discriminator, a scalar base_channels: number of channels in first convolutional layer, a scalar n_layers: number of downsampling layers in each discriminator, a scalar n_discriminators: number of discriminators at different scales, a scalar ''' def __init__(self, in_channels, base_channels=64, n_layers=3, n_discriminators=3): super().__init__() # Initialize all discriminators self.discriminators = nn.ModuleList() for _ in range(n_discriminators): self.discriminators.append( Discriminator(in_channels, base_channels=base_channels, n_layers=n_layers) ) # Downsampling layer to pass inputs between discriminators at different scales self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) def forward(self, x): outputs = [] for i, discriminator in enumerate(self.discriminators): # Downsample input for subsequent discriminators if i != 0: x = self.downsample(x) outputs.append(discriminator(x)) # Return list of multiscale discriminator outputs return outputs @property def n_discriminators(self): return len(self.discriminators) ``` ## Instance Boundary Map: Learning boundaries between instances Here's a new method that adds additional information as conditional input! The authors observed that previous approaches have typically taken in a label map (aka. segmentation map) that labels all the pixels to be of a certain class (i.e. car) but doesn't differentiate between two instances of the same class (i.e. two cars in the image). This is the difference between *semantic label maps*, which have class labels but not instance labels, and *instance label maps*, which represent unique instances with unique numbers. The authors found that the most important information in the instance lelab map is actually the boundaries between instances (i.e. the outline of each car). You can create boundary maps by mapping each pixel maps to a 1 if it's a different instance from its 4 neighbors, and 0 otherwise. To include this information, the authors concatenate the boundary map with the semantic label map as input. From the figure below, you can see that including both as input results in much sharper generated images (right) than only inputting the semantic label map (left). > ![Semantic label map input vs instance boundary map input](https://github.com/https-deeplearning-ai/GANs-Public/blob/master/Pix2PixHD-Instance-Map.png?raw=true) ![Semantic label map vs instance boundary map](https://github.com/https-deeplearning-ai/GANs-Public/blob/master/Pix2PixHD-Instance-Map-2.png?raw=true) *Semantic label map input (top left) and its blurry output between instances (bottom left) vs. instance boundary map (top right) and the much clearer output between instances from inputting both the semantic label map and the instance boundary map (bottom right). Taken from Figures 4 and 5 of [High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs](https://arxiv.org/abs/1711.11585) (Wang et al. 2018).* ## Instance-level Feature Encoder: Adding controllable diversity As you already know, the task of generation has more than one possible realistic output. For example, an object of class `road` could be concrete, cobblestone, dirt, etc. To learn this diversity, the authors introduce an encoder $E$, which takes the original image as input and outputs a feature map (like the feature extractor from Course 2, Week 1). They apply *instance-wise averaging*, averaging the feature vectors across all occurrences of each instance (so that every pixel corresponding to the same instance has the same feature vector). They then concatenate this instance-level feature embedding with the semantic label and instance boundary maps as input to the generator. What's cool is that the encoder $E$ is trained jointly with $G_1$. One huge backprop! When training $G_2$, $E$ is fed a downsampled image and the corresponding output is upsampled to pass into $G_2$. To allow for control over different features (e.g. concrete, cobblestone, and dirt) for inference, the authors first use K-means clustering to cluster all the feature vectors for each object class in the training set. You can think of this as a dictionary, mapping each class label to a set of feature vectors (so $K$ centroids, each representing different clusters of features). Now during inference, you can perform a random lookup from this dictionary for each class (e.g. road) in the semantic label map to generate one type of feature (e.g. dirt). To provide greater control, you can select among different feature types for each class to generate diverse feature types and, as a result, multi-modal outputs from the same input. Higher values of $K$ increase diversity and potentially decrease fidelity. You've seen this tradeoff between diversity and fidelity before with the truncation trick, and this is just another way to trade-off between them. ``` class Encoder(nn.Module): ''' Encoder Class Values: in_channels: number of input channels to each discriminator, a scalar out_channels: number of channels in output feature map, a scalar base_channels: number of channels in first convolutional layer, a scalar n_layers: number of downsampling layers, a scalar ''' def __init__(self, in_channels, out_channels, base_channels=16, n_layers=4): super().__init__() self.out_channels = out_channels channels = base_channels layers = [ nn.ReflectionPad2d(3), nn.Conv2d(in_channels, base_channels, kernel_size=7, padding=0), nn.InstanceNorm2d(base_channels), nn.ReLU(inplace=True), ] # Downsampling layers for i in range(n_layers): layers += [ nn.Conv2d(channels, 2 * channels, kernel_size=3, stride=2, padding=1), nn.InstanceNorm2d(2 * channels), nn.ReLU(inplace=True), ] channels *= 2 # Upsampling layers for i in range(n_layers): layers += [ nn.ConvTranspose2d(channels, channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1), nn.InstanceNorm2d(channels // 2), nn.ReLU(inplace=True), ] channels //= 2 layers += [ nn.ReflectionPad2d(3), nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=0), nn.Tanh(), ] self.layers = nn.Sequential(*layers) def instancewise_average_pooling(self, x, inst): ''' Applies instance-wise average pooling. Given a feature map of size (b, c, h, w), the mean is computed for each b, c across all h, w of the same instance ''' x_mean = torch.zeros_like(x) classes = torch.unique(inst, return_inverse=False, return_counts=False) # gather all unique classes present for i in classes: for b in range(x.size(0)): indices = torch.nonzero(inst[b:b+1] == i, as_tuple=False) # get indices of all positions equal to class i for j in range(self.out_channels): x_ins = x[indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]] mean_feat = torch.mean(x_ins).expand_as(x_ins) x_mean[indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]] = mean_feat return x_mean def forward(self, x, inst): x = self.layers(x) x = self.instancewise_average_pooling(x, inst) return x ``` ## Additional Loss Functions In addition to the architectural and feature-map enhancements, the authors also incorporate a feature matching loss based on the discriminator. Essentially, they output intermediate feature maps at different resolutions from the discriminator and try to minimize the difference between the real and fake image features. The authors found this to stabilize training. In this case, this forces the generator to produce natural statistics at multiple scales. This feature-matching loss is similar to StyleGAN's perceptual loss. For some semantic label map $s$ and corresponding image $x$, \begin{align*} \mathcal{L}_{\text{FM}} = \mathbb{E}_{s,x}\left[\sum_{i=1}^T\dfrac{1}{N_i}\left|\left|D^{(i)}_k(s, x) - D^{(i)}_k(s, G(s))\right|\right|_1\right] \end{align*} where $T$ is the total number of layers, $N_i$ is the number of elements at layer $i$, and $D^{(i)}_k$ denotes the $i$th layer in discriminator $k$. The authors also report minor improvements in performance when adding perceptual loss, formulated as \begin{align*} \mathcal{L}_{\text{VGG}} = \mathbb{E}_{s,x}\left[\sum_{i=1}^N\dfrac{1}{M_i}\left|\left|F^i(x) - F^i(G(s))\right|\right|_1\right] \end{align*} where $F^i$ denotes the $i$th layer with $M_i$ elements of the VGG19 network. `torchvision` provides a pretrained VGG19 network, so you'll just need a simple wrapper for it to get the intermediate outputs. The overall loss looks like this: \begin{align*} \mathcal{L} = \mathcal{L}_{\text{GAN}} + \lambda_1\mathcal{L}_{\text{FM}} + \lambda_2\mathcal{L}_{\text{VGG}} \end{align*} where $\lambda_1 = \lambda_2 = 10$. ``` import torchvision.models as models class VGG19(nn.Module): ''' VGG19 Class Wrapper for pretrained torchvision.models.vgg19 to output intermediate feature maps ''' def __init__(self): super().__init__() vgg_features = models.vgg19(pretrained=True).features self.f1 = nn.Sequential(*[vgg_features[x] for x in range(2)]) self.f2 = nn.Sequential(*[vgg_features[x] for x in range(2, 7)]) self.f3 = nn.Sequential(*[vgg_features[x] for x in range(7, 12)]) self.f4 = nn.Sequential(*[vgg_features[x] for x in range(12, 21)]) self.f5 = nn.Sequential(*[vgg_features[x] for x in range(21, 30)]) for param in self.parameters(): param.requires_grad = False def forward(self, x): h1 = self.f1(x) h2 = self.f2(h1) h3 = self.f3(h2) h4 = self.f4(h3) h5 = self.f5(h4) return [h1, h2, h3, h4, h5] class Loss(nn.Module): ''' Loss Class Implements composite loss for GauGAN Values: lambda1: weight for feature matching loss, a float lambda2: weight for vgg perceptual loss, a float device: 'cuda' or 'cpu' for hardware to use norm_weight_to_one: whether to normalize weights to (0, 1], a bool ''' def __init__(self, lambda1=10., lambda2=10., device='cuda', norm_weight_to_one=True): super().__init__() self.vgg = VGG19().to(device) self.vgg_weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0] lambda0 = 1.0 # Keep ratio of composite loss, but scale down max to 1.0 scale = max(lambda0, lambda1, lambda2) if norm_weight_to_one else 1.0 self.lambda0 = lambda0 / scale self.lambda1 = lambda1 / scale self.lambda2 = lambda2 / scale def adv_loss(self, discriminator_preds, is_real): ''' Computes adversarial loss from nested list of fakes outputs from discriminator. ''' target = torch.ones_like if is_real else torch.zeros_like adv_loss = 0.0 for preds in discriminator_preds: pred = preds[-1] adv_loss += F.mse_loss(pred, target(pred)) return adv_loss def fm_loss(self, real_preds, fake_preds): ''' Computes feature matching loss from nested lists of fake and real outputs from discriminator. ''' fm_loss = 0.0 for real_features, fake_features in zip(real_preds, fake_preds): for real_feature, fake_feature in zip(real_features, fake_features): fm_loss += F.l1_loss(real_feature.detach(), fake_feature) return fm_loss def vgg_loss(self, x_real, x_fake): ''' Computes perceptual loss with VGG network from real and fake images. ''' vgg_real = self.vgg(x_real) vgg_fake = self.vgg(x_fake) vgg_loss = 0.0 for real, fake, weight in zip(vgg_real, vgg_fake, self.vgg_weights): vgg_loss += weight * F.l1_loss(real.detach(), fake) return vgg_loss def forward(self, x_real, label_map, instance_map, boundary_map, encoder, generator, discriminator): ''' Function that computes the forward pass and total loss for generator and discriminator. ''' feature_map = encoder(x_real, instance_map) x_fake = generator(torch.cat((label_map, boundary_map, feature_map), dim=1)) # Get necessary outputs for loss/backprop for both generator and discriminator fake_preds_for_g = discriminator(torch.cat((label_map, boundary_map, x_fake), dim=1)) fake_preds_for_d = discriminator(torch.cat((label_map, boundary_map, x_fake.detach()), dim=1)) real_preds_for_d = discriminator(torch.cat((label_map, boundary_map, x_real.detach()), dim=1)) g_loss = ( self.lambda0 * self.adv_loss(fake_preds_for_g, True) + \ self.lambda1 * self.fm_loss(real_preds_for_d, fake_preds_for_g) / discriminator.n_discriminators + \ self.lambda2 * self.vgg_loss(x_fake, x_real) ) d_loss = 0.5 * ( self.adv_loss(real_preds_for_d, True) + \ self.adv_loss(fake_preds_for_d, False) ) return g_loss, d_loss, x_fake.detach() ``` ## Training Pix2PixHD You now have the Pix2PixHD model coded up! All you have to do now is prepare your dataset. Pix2PixHD is trained on the Cityscapes dataset, which unfortunately requires registration. You'll have to download the dataset and put it in your `data` folder to initialize the dataset code below. Specifically, you should download the `gtFine_trainvaltest` and `leftImg8bit_trainvaltest` and specify the corresponding data splits into the dataloader. ``` import os import numpy as np import torchvision.transforms as transforms from PIL import Image def scale_width(img, target_width, method): ''' Function that scales an image to target_width while retaining aspect ratio. ''' w, h = img.size if w == target_width: return img target_height = target_width * h // w return img.resize((target_width, target_height), method) class CityscapesDataset(torch.utils.data.Dataset): ''' CityscapesDataset Class Values: paths: (a list of) paths to load examples from, a list or string target_width: the size of image widths for resizing, a scalar n_classes: the number of object classes, a scalar ''' def __init__(self, paths, target_width=1024, n_classes=35): super().__init__() self.n_classes = n_classes # Collect list of examples self.examples = {} if type(paths) == str: self.load_examples_from_dir(paths) elif type(paths) == list: for path in paths: self.load_examples_from_dir(path) else: raise ValueError('`paths` should be a single path or list of paths') self.examples = list(self.examples.values()) assert all(len(example) == 3 for example in self.examples) # Initialize transforms for the real color image self.img_transforms = transforms.Compose([ transforms.Lambda(lambda img: scale_width(img, target_width, Image.BICUBIC)), transforms.Lambda(lambda img: np.array(img)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) # Initialize transforms for semantic label and instance maps self.map_transforms = transforms.Compose([ transforms.Lambda(lambda img: scale_width(img, target_width, Image.NEAREST)), transforms.Lambda(lambda img: np.array(img)), transforms.ToTensor(), ]) def load_examples_from_dir(self, abs_path): ''' Given a folder of examples, this function returns a list of paired examples. ''' assert os.path.isdir(abs_path) img_suffix = '_leftImg8bit.png' label_suffix = '_gtFine_labelIds.png' inst_suffix = '_gtFine_instanceIds.png' for root, _, files in os.walk(abs_path): for f in files: if f.endswith(img_suffix): prefix = f[:-len(img_suffix)] attr = 'orig_img' elif f.endswith(label_suffix): prefix = f[:-len(label_suffix)] attr = 'label_map' elif f.endswith(inst_suffix): prefix = f[:-len(inst_suffix)] attr = 'inst_map' else: continue if prefix not in self.examples.keys(): self.examples[prefix] = {} self.examples[prefix][attr] = root + '/' + f def __getitem__(self, idx): example = self.examples[idx] # Load image and maps img = Image.open(example['orig_img']).convert('RGB') # color image: (3, 512, 1024) inst = Image.open(example['inst_map']) # instance map: (512, 1024) label = Image.open(example['label_map']) # semantic label map: (512, 1024) # Apply corresponding transforms img = self.img_transforms(img) inst = self.map_transforms(inst) label = self.map_transforms(label).long() * 255 # Convert labels to one-hot vectors label = torch.zeros(self.n_classes, img.shape[1], img.shape[2]).scatter_(0, label, 1.0).to(img.dtype) # Convert instance map to instance boundary map bound = torch.ByteTensor(inst.shape).zero_() bound[:, :, 1:] = bound[:, :, 1:] | (inst[:, :, 1:] != inst[:, :, :-1]) bound[:, :, :-1] = bound[:, :, :-1] | (inst[:, :, 1:] != inst[:, :, :-1]) bound[:, 1:, :] = bound[:, 1:, :] | (inst[:, 1:, :] != inst[:, :-1, :]) bound[:, :-1, :] = bound[:, :-1, :] | (inst[:, 1:, :] != inst[:, :-1, :]) bound = bound.to(img.dtype) return (img, label, inst, bound) def __len__(self): return len(self.examples) @staticmethod def collate_fn(batch): imgs, labels, insts, bounds = [], [], [], [] for (x, l, i, b) in batch: imgs.append(x) labels.append(l) insts.append(i) bounds.append(b) return ( torch.stack(imgs, dim=0), torch.stack(labels, dim=0), torch.stack(insts, dim=0), torch.stack(bounds, dim=0), ) from tqdm import tqdm from torch.utils.data import DataLoader n_classes = 35 # total number of object classes rgb_channels = n_features = 3 device = 'cuda' train_dir = ['cityscapes_data/train'] epochs = 200 # total number of train epochs decay_after = 100 # number of epochs with constant lr lr = 0.0002 betas = (0.5, 0.999) def lr_lambda(epoch): ''' Function for scheduling learning ''' return 1. if epoch < decay_after else 1 - float(epoch - decay_after) / (epochs - decay_after) def weights_init(m): ''' Function for initializing all model weights ''' if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): nn.init.normal_(m.weight, 0., 0.02) loss_fn = Loss(device=device) dataloader2 = DataLoader( CityscapesDataset(train_dir, target_width=2048, n_classes=n_classes), collate_fn=CityscapesDataset.collate_fn, batch_size=1, shuffle=True, drop_last=False, pin_memory=True, ) ``` Now initialize everything you'll need for training. Don't be worried if there looks like a lot of random code, it's all stuff you've seen before! ``` from tqdm import tqdm from torch.utils.data import DataLoader n_classes = 35 # total number of object classes rgb_channels = n_features = 3 device = 'cuda' train_dir = ['data'] epochs = 200 # total number of train epochs decay_after = 100 # number of epochs with constant lr lr = 0.0002 betas = (0.5, 0.999) def lr_lambda(epoch): ''' Function for scheduling learning ''' return 1. if epoch < decay_after else 1 - float(epoch - decay_after) / (epochs - decay_after) def weights_init(m): ''' Function for initializing all model weights ''' if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): nn.init.normal_(m.weight, 0., 0.02) loss_fn = Loss(device=device) ## Phase 1: Low Resolution (1024 x 512) dataloader1 = DataLoader( CityscapesDataset(train_dir, target_width=1024, n_classes=n_classes), collate_fn=CityscapesDataset.collate_fn, batch_size=1, shuffle=True, drop_last=False, pin_memory=True, ) encoder = Encoder(rgb_channels, n_features).to(device).apply(weights_init) generator1 = GlobalGenerator(n_classes + n_features + 1, rgb_channels).to(device).apply(weights_init) discriminator1 = MultiscaleDiscriminator(n_classes + 1 + rgb_channels, n_discriminators=2).to(device).apply(weights_init) g1_optimizer = torch.optim.Adam(list(generator1.parameters()) + list(encoder.parameters()), lr=lr, betas=betas) d1_optimizer = torch.optim.Adam(list(discriminator1.parameters()), lr=lr, betas=betas) g1_scheduler = torch.optim.lr_scheduler.LambdaLR(g1_optimizer, lr_lambda) d1_scheduler = torch.optim.lr_scheduler.LambdaLR(d1_optimizer, lr_lambda) ## Phase 2: High Resolution (2048 x 1024) dataloader2 = DataLoader( CityscapesDataset(train_dir, target_width=2048, n_classes=n_classes), collate_fn=CityscapesDataset.collate_fn, batch_size=1, shuffle=True, drop_last=False, pin_memory=True, ) generator2 = LocalEnhancer(n_classes + n_features + 1, rgb_channels).to(device).apply(weights_init) discriminator2 = MultiscaleDiscriminator(n_classes + 1 + rgb_channels).to(device).apply(weights_init) g2_optimizer = torch.optim.Adam(list(generator2.parameters()) + list(encoder.parameters()), lr=lr, betas=betas) d2_optimizer = torch.optim.Adam(list(discriminator2.parameters()), lr=lr, betas=betas) g2_scheduler = torch.optim.lr_scheduler.LambdaLR(g2_optimizer, lr_lambda) d2_scheduler = torch.optim.lr_scheduler.LambdaLR(d2_optimizer, lr_lambda) ``` And now the training loop, which is pretty much the same between the two phases: ``` from torchvision.utils import make_grid import matplotlib.pyplot as plt # Parse torch version for autocast # ###################################################### version = torch.__version__ version = tuple(int(n) for n in version.split('.')[:-1]) has_autocast = version >= (1, 6) # ###################################################### def show_tensor_images(image_tensor): ''' Function for visualizing images: Given a tensor of images, number of images, and size per image, plots and prints the images in an uniform grid. ''' image_tensor = (image_tensor + 1) / 2 image_unflat = image_tensor.detach().cpu() image_grid = make_grid(image_unflat[:1], nrow=1) plt.imshow(image_grid.permute(1, 2, 0).squeeze()) plt.show() def train(dataloader, models, optimizers, schedulers, device): encoder, generator, discriminator = models g_optimizer, d_optimizer = optimizers g_scheduler, d_scheduler = schedulers cur_step = 0 display_step = 100 mean_g_loss = 0.0 mean_d_loss = 0.0 for epoch in range(epochs): # Training epoch for (x_real, labels, insts, bounds) in tqdm(dataloader, position=0): x_real = x_real.to(device) labels = labels.to(device) insts = insts.to(device) bounds = bounds.to(device) # Enable autocast to FP16 tensors (new feature since torch==1.6.0) # If you're running older versions of torch, comment this out # and use NVIDIA apex for mixed/half precision training if has_autocast: with torch.cuda.amp.autocast(enabled=(device=='cuda')): g_loss, d_loss, x_fake = loss_fn( x_real, labels, insts, bounds, encoder, generator, discriminator ) else: g_loss, d_loss, x_fake = loss_fn( x_real, labels, insts, bounds, encoder, generator, discriminator ) g_optimizer.zero_grad() g_loss.backward() g_optimizer.step() d_optimizer.zero_grad() d_loss.backward() d_optimizer.step() mean_g_loss += g_loss.item() / display_step mean_d_loss += d_loss.item() / display_step if cur_step % display_step == 0 and cur_step > 0: print('Step {}: Generator loss: {:.5f}, Discriminator loss: {:.5f}' .format(cur_step, mean_g_loss, mean_d_loss)) show_tensor_images(x_fake.to(x_real.dtype)) show_tensor_images(x_real) mean_g_loss = 0.0 mean_d_loss = 0.0 cur_step += 1 g_scheduler.step() d_scheduler.step() ``` And now you can train your models! Remember to set the local enhancer subgenerator to the global subgenerator that you train in the first phase. In their official repository, the authors don't continue to train the encoder. Instead, they precompute all feature maps upsample them, and concatenate this to the input to the local enhancer subgenerator. (They also leave a re-train option for it). For simplicity, the script below will just downsample and upsample high-resolution inputs. ``` # Phase 1: Low Resolution ####################################################################### train( dataloader1, [encoder, generator1, discriminator1], [g1_optimizer, d1_optimizer], [g1_scheduler, d1_scheduler], device, ) # Phase 2: High Resolution ####################################################################### # Update global generator in local enhancer with trained generator2.g1 = generator1.g1 # Freeze encoder and wrap to support high-resolution inputs/outputs def freeze(encoder): encoder.eval() for p in encoder.parameters(): p.requires_grad = False @torch.jit.script def forward(x, inst): x = F.interpolate(x, scale_factor=0.5, recompute_scale_factor=True) inst = F.interpolate(inst.float(), scale_factor=0.5, recompute_scale_factor=True) feat = encoder(x, inst.int()) return F.interpolate(feat, scale_factor=2.0, recompute_scale_factor=True) return forward train( dataloader2, [freeze(encoder), generator2, discriminator2], [g2_optimizer, d2_optimizer], [g2_scheduler, d2_scheduler], device, ) ``` ## Inference with Pix2PixHD Recall that in inference time, the encoder feature maps from training are saved and clustered with K-means by object class. Again, you'll have to download the Cityscapes dataset into your `data` folder and then run these functions. ``` from sklearn.cluster import KMeans # Encode features by class label features = {} for (x, _, inst, _) in tqdm(dataloader2): x = x.to(device) inst = inst.to(device) area = inst.size(2) * inst.size(3) # Get pooled feature map with torch.no_grad(): feature_map = encoder(x, inst) for i in torch.unique(inst): label = i if i < 1000 else i // 1000 label = int(label.flatten(0).item()) # All indices should have same feature per class from pooling idx = torch.nonzero(inst == i, as_tuple=False) n_inst = idx.size(0) idx = idx[0, :] # Retrieve corresponding encoded feature feature = feature_map[idx[0], :, idx[2], idx[3]].unsqueeze(0) # Compute rate of feature appearance (in official code, they compute per block) block_size = 32 rate_per_block = 32 * n_inst / area rate = torch.ones((1, 1), device=device).to(feature.dtype) * rate_per_block feature = torch.cat((feature, rate), dim=1) if label in features.keys(): features[label] = torch.cat((features[label], feature), dim=0) else: features[label] = feature # Cluster features by class label k = 10 centroids = {} for label in range(n_classes): if label not in features.keys(): continue feature = features[label] # Thresholding by 0.5 isn't mentioned in the paper, but is present in the # official code repository, probably so that only frequent features are clustered feature = feature[feature[:, -1] > 0.5, :-1].cpu().numpy() if feature.shape[0]: n_clusters = min(feature.shape[0], k) kmeans = KMeans(n_clusters=n_clusters).fit(feature) centroids[label] = kmeans.cluster_centers_ ``` After getting the encoded feature centroids per class, you can now run inference! Remember that the generator is trained to take in a concatenation of the semantic label map, instance boundary map, and encoded feature map. Congrats on making it to the end of this complex notebook! Have fun with this powerful model and be responsible of course ;) ``` def infer(label_map, instance_map, boundary_map): # Sample feature vector centroids b, _, h, w = label_map.shape feature_map = torch.zeros((b, n_features, h, w), device=device).to(label_map.dtype) for i in torch.unique(instance_map): label = i if i < 1000 else i // 1000 label = int(label.flatten(0).item()) if label in centroids.keys(): centroid_idx = random.randint(0, centroids[label].shape[0] - 1) idx = torch.nonzero(instance_map == int(i), as_tuple=False) feature = torch.from_numpy(centroids[label][centroid_idx, :]).to(device) feature_map[idx[:, 0], :, idx[:, 2], idx[:, 3]] = feature with torch.no_grad(): x_fake = generator2(torch.cat((label_map, boundary_map, feature_map), dim=1)) return x_fake for x, labels, insts, bounds in dataloader2: x_fake = infer(labels.to(device), insts.to(device), bounds.to(device)) show_tensor_images(x_fake.to(x.dtype)) show_tensor_images(x) break ```
github_jupyter
# Calibrating Traditional Methods In this notebook, we show how to calibrate a $\mathcal{J}$-invariant denoiser, and compare its performance with the original denoiser. ``` %load_ext autoreload %autoreload 2 %matplotlib inline import sys sys.path.append("..") import numpy as np import matplotlib.pyplot as plt from skimage.morphology import disk from skimage.filters import gaussian, median from skimage import data, img_as_float, img_as_ubyte from skimage.color import gray2rgb from skimage.util import random_noise from skimage.measure import compare_ssim as ssim from skimage.measure import compare_psnr as psnr from skimage.measure import compare_mse as mse from util import plot_grid, plot_images, expand plt.rc('figure', figsize = (5,5)) show = lambda x: plt.imshow(x, cmap=plt.cm.gray) image = data.camera() show(image) np.random.seed(3) noisy_image = img_as_ubyte(random_noise(image, mode = 'gaussian', var=0.01)) show(noisy_image) ``` We begin by comparing an ordinary median filter to a "donut" median filter, with the center removed. ``` def mask_center(x): x[len(x)//2,len(x)//2] = 0 return x plot_images([1-disk(4), 1-mask_center(disk(4))]) cm = plt.get_cmap("tab10") orange_regular_disk = (1 - disk(4))[:,:,np.newaxis] + (disk(4))[:,:,np.newaxis]*np.array(cm(1)[:-1])[np.newaxis, np.newaxis] blue_donut_disk = (1 - mask_center(disk(4)))[:,:,np.newaxis] + (mask_center(disk(4)))[:,:,np.newaxis]*np.array(cm(0)[:-1])[np.newaxis, np.newaxis] plt.imsave(dir + 'regular_disk.png', expand(orange_regular_disk, 5)) plt.imsave(dir + 'donut_disk.png', expand(blue_donut_disk, 5)) radii = range(1, 7) mask_med = np.array([median(noisy_image, mask_center(disk(i))) for i in radii]) med = np.array([median(noisy_image, disk(i)) for i in radii]) def stats(im_list, noisy_img, img): #img is the ground truth img = img_as_float(img) noisy_img = img_as_float(noisy_img) im_list = [img_as_float(x) for x in im_list] loss = [mse(x, noisy_img) for x in im_list] mse_gt = [mse(x, img) for x in im_list] psnr_gt = [psnr(x, img) for x in im_list] return loss, mse_gt, psnr_gt loss_med, mse_med, psnr_med = stats(med, noisy_image, image) loss_mask_med, mse_mask_med, psnr_mask_med = stats(mask_med, noisy_image, image) opt = radii[np.argmin(loss_mask_med)] print(opt) plt.figure(figsize=(7,5)) plt.plot(radii, loss_mask_med, label = 'masked median -- noisy input', color = 'C0') plt.plot(radii, loss_med, label = 'median -- noisy input', color = 'C1') plt.axvline(radii[np.argmin(loss_mask_med)], color='k', linestyle='--') plt.title('Calibrating a Median Filter') plt.plot(radii, mse_mask_med, label = 'masked median -- ground truth', color = 'C0', linestyle='--') plt.plot(radii, mse_med, label = 'median -- ground truth', color = 'C1', linestyle='--') plt.ylabel('MSE') plt.xlabel('Radius of Median Filter') plt.yticks([0.002, 0.012]) plt.ylim(0, 0.0143) plt.legend(loc='center right') plt.savefig(dir + 'median_filter.pdf') plt.figure(figsize=(7,5)) plt.plot(radii, loss_mask_med, label = 'masked median -- noisy input', color = 'C0') plt.plot(radii, loss_med, label = 'median -- noisy input', color = 'C1') plt.axvline(radii[np.argmin(loss_mask_med)], color='k', linestyle='--') plt.plot(radii, mse_mask_med, label = 'masked median -- ground truth', color = 'C0', linestyle='--') plt.plot(radii, mse_med, label = 'median -- ground truth', color = 'C1', linestyle='--') plt.yticks([0.002, 0.012]) plt.ylim(0, 0.0143) # Hide the right and top spines plt.gca().spines['right'].set_visible(False) plt.gca().spines['top'].set_visible(False) plt.savefig(dir + 'median_filter_bare.pdf') inset_x_min = 100 inset_x_max = 160 inset_y_min = 230 inset_y_max = 290 get_inset = lambda x: x[inset_x_min:inset_x_max, inset_y_min:inset_y_max] plt.imsave(dir + 'camera_noisy.png', get_inset(noisy_image), cmap = 'Greys_r') plt.imsave(dir + 'camera_clean.png', get_inset(image), cmap = 'Greys_r') for i in range(len(mask_med)): plt.imsave(dir + 'camera_median_' + str(radii[i]) + '.png', get_inset(mask_med[i]), cmap = 'Greys_r') ``` # Conversion to J-invariance Let $f$ be a classical denoiser, and consider some partition $\mathcal{J}$ of the pixels. Let $s(x)$ be the function replacing each pixel with the average of its neighbors. Then the function $g$ defined by $g(x)_J := f_\theta(\mathbf{1}_{J}\cdot s(x) + \mathbf{1}_{J^c}\cdot x)_J,$ will be $J$-invariant for each $J \in \mathcal{J}$. Below, we implement this in a functional way: given a denoiser, the `invariant_denoise` is the appropriate invariant denoiser. ``` from scipy.signal import convolve2d def interpolate_image(x, conv_filter=None): # use the mean of 4-connected neighbor to filter the image. if conv_filter is None: conv_filter = np.array([[0, 0.25, 0], [0.25, 0, 0.25], [0, 0.25, 0]]) return convolve2d(x, conv_filter, mode = 'same') def generate_mask(shape, idx, width=3): m = np.zeros(shape) # get x and y index from a single index. phasex = idx % width phasey = (idx // width) % width m[phasex::width, phasey::width] = 1 return m def invariant_denoise(img, width, denoiser): # denoiser is the f shown above, should be a function. # number of all pixels in a block n_masks = width*width # the interpolation image interp = interpolate_image(img) # Initialize output image. output = np.zeros(img.shape) for i in range(n_masks): # for each i there is a mask for masking every ith pixel in block m = generate_mask(img.shape, i, width=width) #1𝐽⋅𝑠(𝑥)+1𝐽𝑐⋅𝑥 input_image = m*interp + (1 - m)*img input_image = input_image.astype(img.dtype) #𝑓𝜃(1𝐽⋅𝑠(𝑥)+1𝐽𝑐⋅𝑥)𝐽 output += m*denoiser(input_image) return output ``` ## Wavelet ``` from skimage.restoration import denoise_wavelet sigma_range = np.arange(0.08, 0.3, 0.03) reconstructions = [denoise_wavelet(noisy_image, sigma = sigma, mode='hard', multichannel = False) for sigma in sigma_range] reconstructions_mask = [reconstruct(noisy_image, 4, lambda x: denoise_wavelet(x, sigma = sigma, mode='hard', multichannel = False)) for sigma in sigma_range] loss_wavelet, mse_wavelet, psnr_wavelet = stats(reconstructions, noisy_image, image) loss_mask_wavelet, mse_mask_wavelet, psnr_mask_wavelet = stats(reconstructions_mask, noisy_image, image) plt.plot(sigma_range, psnr_wavelet) opt = sigma_range[np.argmin(loss_mask_wavelet)] plt.figure(figsize=(7,5)) plt.plot(sigma_range, loss_mask_wavelet, label = 'masked wavelet -- noisy data', color = 'C0') plt.plot(sigma_range, loss_wavelet, label = 'wavelet -- noisy data', color = 'C1') plt.axvline(sigma_range[np.argmin(loss_mask_wavelet)], color='k', linestyle='--') plt.title('Calibrating Wavelet Denoiser') plt.plot(sigma_range, mse_mask_wavelet, label = 'masked wavelet -- ground truth', color = 'C0', linestyle='--') plt.plot(sigma_range, mse_wavelet, label = 'wavelet -- ground truth', color = 'C1', linestyle='--') plt.ylabel('MSE') plt.xlabel('Sigma Threshold') plt.yticks([0.002, 0.012]) plt.ylim(0, 0.0143) plt.legend(loc='center right') plt.savefig(dir + 'wavelet_filter.pdf') plt.figure(figsize=(7,5)) plt.plot(sigma_range, loss_mask_wavelet, label = 'masked wavelet -- noisy data', color = 'C0') plt.plot(sigma_range, loss_wavelet, label = 'wavelet -- noisy data', color = 'C1') plt.axvline(sigma_range[np.argmin(loss_mask_wavelet)], color='k', linestyle='--') plt.plot(sigma_range, mse_mask_wavelet, label = 'masked wavelet -- ground truth', color = 'C0', linestyle='--') plt.plot(sigma_range, mse_wavelet, label = 'wavelet -- ground truth', color = 'C1', linestyle='--') plt.yticks([0.002, 0.012]) plt.ylim(0, 0.0143) # Hide the right and top spines plt.gca().spines['right'].set_visible(False) plt.gca().spines['top'].set_visible(False) plt.savefig(dir + 'wavelet_filter_bare.pdf') ``` ### Basic run of NL-Means ## NL-Means ``` from skimage.restoration import denoise_nl_means, estimate_sigma # non-local mean denoise. sigma_est = np.mean(estimate_sigma(noisy_image, multichannel=False)) print(sigma_est/255) patch_kw = dict(patch_size=5, # 5x5 patches patch_distance=6, # 13x13 search area multichannel=True) h_suggested = 0.8 * sigma_est denoise_fast = denoise_nl_means(noisy_image, h=h_suggested, fast_mode=True, **patch_kw) psnr(denoise_fast.astype(np.uint8), image) h_range = sigma_est*np.arange(0.5, 2.0, 0.2) reconstructions_nl = [denoise_nl_means(noisy_image, h=h, fast_mode=True, **patch_kw)/255 for h in h_range] reconstructions_nl_mask = [reconstruct(noisy_image, 4, lambda x: denoise_nl_means(x, h=h, fast_mode=True, **patch_kw))/255 for h in h_range] loss_nl, mse_nl, psnr_nl = stats(reconstructions_nl, noisy_image, image) loss_mask_nl, mse_mask_nl, psnr_mask_nl = stats(reconstructions_nl_mask, noisy_image, image) plt.plot(h_range, psnr_nl) opt = sigma_range[np.argmin(loss_mask_nl)] plt.figure(figsize=(7,5)) plt.plot(h_range, loss_mask_nl, label = 'masked NL-means -- noisy data', color = 'C0') plt.plot(h_range, loss_nl, label = 'NL-means -- noisy data', color = 'C1') plt.axvline(h_range[np.argmin(loss_mask_nl)], color='k', linestyle='--') plt.title('Calibrating NL-means') plt.plot(h_range, mse_mask_nl, label = 'masked NL-means -- ground truth', color = 'C0', linestyle='--') plt.plot(h_range, mse_nl, label = 'NL-means -- ground truth', color = 'C1', linestyle='--') plt.ylabel('MSE') plt.xlabel('Cut-off Distance') plt.legend(loc='center right') plt.yticks([0.002, 0.012]) plt.ylim(0, 0.0143) plt.savefig(dir + 'nl-means_filter.pdf') plt.figure(figsize=(7,5)) plt.plot(h_range, loss_mask_nl, label = 'masked NL-means -- noisy data', color = 'C0') plt.plot(h_range, loss_nl, label = 'NL-means -- noisy data', color = 'C1') plt.axvline(h_range[np.argmin(loss_mask_nl)], color='k', linestyle='--') plt.plot(h_range, mse_mask_nl, label = 'masked NL-means -- ground truth', color = 'C0', linestyle='--') plt.plot(h_range, mse_nl, label = 'NL-means -- ground truth', color = 'C1', linestyle='--') plt.yticks([0.002, 0.012]) plt.ylim(0, 0.0143) # Hide the right and top spines plt.gca().spines['right'].set_visible(False) plt.gca().spines['top'].set_visible(False) plt.savefig(dir + 'nl-means_filter_bare.pdf') ``` # Which method is best? ``` min(loss_mask_nl), min(loss_mask_wavelet), min(loss_mask_med) min(mse_mask_nl), min(mse_mask_wavelet), min(mse_mask_med) max(psnr_mask_nl), max(psnr_mask_wavelet), max(psnr_mask_med) show(get_inset(reconstructions_nl_mask[np.argmin(loss_mask_nl)])) show(get_inset(reconstructions_mask[np.argmin(loss_mask_wavelet)])) ``` ## Shrinkage Given two uncorrelated and unbiased estimators $u$ and $v$ of some quantity $y$, we may form a linear combination: $\lambda u + (1 - \lambda)v.$ The variance of this estimator is $\lambda^2 U + (1 - \lambda)^2 V,$ where $U$ and $V$ are the variances of $u$ and $v$ respectively. This expression is minimized at $\lambda = V/(U + V).$ The variance of the result is $UV/(U+V) = V\frac{1}{1+V/U}$. When $V$ is the term with lowest variance, here we can lower it by an amount depending on $V/U$. When the variance of $v$ is much lower than that of $u$, we just get $V$ out, but when they are the same our variance is exactly halved. Note that this is monotonic in $V$, so when comparing methods, mixing in the original signal will not change their order. In terms of PSNR, the new value is $10*\log_{10}(\frac{1+V/U}{V}) = \operatorname{PSNR}(V) + 10*\log_{10}(1 + V/U) \approx \operatorname{PSNR}(V) + 10/\log_{10}(e) (\frac{V}{U} - \frac{1}{2}(\frac{V}{U})^2) = \operatorname{PSNR}(V) + 4.34 \frac{V}{U}$ If we fix $y$, then $x_j$ and $\mathbb{E} y_j|x_{-j}$ are both independent estimators of $y_j$, so the above reasoning applies. Note that the loss is the variance of $x_j|x_{-j}$, whose two components are the variance of $x_j|y_j$ and the variance of $y_j|x_{-j}$. If we know the distribution of the noise, ie, we know the variance of $x_j|y_j$, then we can compute the variance of $y_j|x_{-j}$ by subtracting it from the variance of $x_j|x_{-j}$, aka, from the value of the loss. That will provide us with the optimal $\lambda$ to use. ``` log10 x = log e x / log e 10 image_float = img_as_float(image) noisy_image_float = img_as_float(noisy_image) noise_var = mse(noisy_image_float, image_float) idx = np.argmin(loss_mask_wavelet) y_pred = reconstructions_mask[idx] total_var = loss_mask_wavelet[idx] idx = np.argmin(loss_mask_nl) y_pred = reconstructions_nl_mask[idx] total_var = loss_mask_nl[idx] idx = np.argmin(loss_mask_med) y_pred = img_as_float(med[idx]) total_var = loss_mask_med[idx] for mode, recons, loss in [('median', mask_med, loss_mask_med), ('wavelet', reconstructions_mask, loss_mask_wavelet), ('nl_means', reconstructions_nl_mask, loss_mask_nl)]: print("mode: ", mode) idx = np.argmin(loss) y_pred = img_as_float(recons[idx]) total_var = loss[idx] lam = noise_var/total_var improved_prediction = lam*y_pred + (1 - lam)*noisy_image_float print("Lambda weighting: ", np.round(lam, 2)) print("Approx change in PSNR: ", np.round(4.34*(total_var - noise_var)/noise_var, 2)) print("Loss: ", total_var.round(4)) print("Original PSNR: ", psnr(y_pred, image_float).round(1)) print("New PSNR: ", psnr(improved_prediction, image_float).round(1)) print('---------------') 4.34*(total_var - noise_var)/noise_var psnr(noisy_image_float, image_float), psnr(y_pred, image_float), psnr(improved_prediction, image_float) plot_images([get_inset(noisy_image), get_inset(y_pred), get_inset(improved_prediction), get_inset(image)]) ssim(noisy_image_float, image_float), ssim(y_pred, image_float), ssim(improved_prediction, image_float) ```
github_jupyter
#Stock Price Predictor This is a Jupyter notebook that you can use to get prediction of adjusted close stock price per the specified day range after the last day from the training data set. The prediction is made by training the machine learning model with historical trade of the stock data. This is the result of study from the following notebook - https://github.com/pathompong-y/stock_predictor. To use this notebook, please follow this setup instruction. ##Setup Instructions 1. Download `stock_predictor.ipynb` and `stock_predictor.py` from https://github.com/pathompong-y/stock_predictor. 2. Go to https://colab.research.google.com and go to file and upload new notebook. Upload stock_predictor.ipynb. 3. Upload `stock_predictor.py` to Files panel by drag and drop from your local computer to the root/outmost level folder. 4. Follow how to use to do train the model and do prediction. ##How to use Provide input into the code cell below per this instruction. 1. At `stock_list`, Provide the list of stock symbol separated by space. Make sure that the symbol is searchable on Yahoo Finance - https://finance.yahoo.com/. 2. At `training_date_start_date` and `training_data_end_date`, specify the start date and end date for historical data of the stock to train the model. The date format is DD/MM/YYYY. 3. Push "Play" button at the cell upper left corner (or alt+enter / cmd+enter). Please wait until you see "Completed" message. For one stock, it could take up to 15 minutes. ``` stock_list = 'ASK.BK GOOGL' training_data_start_date = '08/05/2000' training_data_end_date = '13/05/2020' # ------ DO NOT CHANGE CODE BELOW THIS LINE -------- !pip install yfinance import yfinance as yf import os,sys sys.path.append(os.path.abspath("/content/stock_predictor.py")) from stock_predictor import * train_model(stock_list,training_data_start_date,training_data_end_date) ``` ##How to use (cont.) 4. You can query for the predicted stock price by adding the list of stock symbol in `query_list`. The symbol must be subset of `stock_list` that you provided in step 1. 5. `prediction_range` is the day range of price prediction after `end_date` in step 2. For example, if `end_date` is 15/05/2020 and `prediction_range` is 5. You will get the prediction for 5 days after 15/05/2020. 6. Push "Play" button at the cell upper left corner (or alt+enter / cmd+enter). You will get the predicted price (Adjusted Close) with mean squared error rate of prediction. ``` query_list = 'ASK.BK GOOGL' prediction_range = 5 # ------ DO NOT CHANGE CODE BELOW THIS LINE -------- query_price(query_list,prediction_range) ```
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ <font style="font-size:28px;" align="left"><b> Basics of Python: Loops </b></font> <br> _prepared by Abuzer Yakaryilmaz_ <br><br> We review using loops in Python here. Run each cell and check the results. <h3> For-loop </h3> ``` # let's print all numbers between 0 and 9 for i in range(10): print(i) # range(n) represents the list of all numbers from 0 to n-1 # i is the variable to take the values in the range(n) iteratively: 0,1,...,9 in our example # let's write the same code in two lines for i in range(10): # do not forget to use colon print(i) # the second line is indented # this means that the command in the second line will be executed inside the for-loop # any other code executed inside the for-loop must be intented in the same way #my_code_inside_for-loop_2 will come here #my_code_inside_for-loop_3 will come here #my_code_inside_for-loop_4 will come here # now I am out of the scope of for-loop #my_code_outside_for-loop_1 will come here #my_code_outside_for-loop_2 will come here # let's calculate the summation 1+2+...+10 by using a for-loop # we use variable total for the total summation total = 0 for i in range(11): # do not forget to use colon total = total + i # the value of total is increased by i in each iteration # alternatively, the same assignment can shortly be written as total += i similarly to the languages C, C++, Java, etc. # now I am out of the scope of for-loop # let's print the final value of total print(total) # let's calculate the summation 10+12+14+...+44 # we create a list having all numbers in the summation # for this purpose, this time we will use three parameters in range total = 0 for j in range(10,45,2): # the range is defined between 10 and 44, and the value of j will be increased by 2 after each iteration total += j # let's use the shortened version of total = total + j this time print(total) # let's calculate the summation 1+2+4+8+16+...+256 # remark that 256 = 2*2*...*2 (8 times) total = 0 current_number = 1 # this value will be multiplied by 2 after each iteration for k in range(9): total = total + current_number # current_number is 1 at the beginning, and its value will be doubled after each iteration current_number = 2 * current_number # let's double the value of the current_number for the next iteration # short version of the same assignment: current_number *= 2 as in the languages C, C++, Java, etc. # now I am out of the scope of for-loop # let's print the latest value of total print(total) # instead of range, we may also directly use a list if it is short for i in [1,10,100,1000,10000]: print(i) # instead of [...], we may also use (...) # but this time it is a tuple, not a list (keep in your mind that the values in a tuple cannot be changed) for i in (1,10,100,1000,10000): print(i) # let's create a range between 10 and 91 that contains the multiples of 7 for j in range(14,92,7): # 14 is the first multiple of 7 greater than or equal to 10; so we should start with 14 # 91 should be in the range, and so we end the range with 92 print(j) # let's create a range between 11 and 22 for i in range(11,23): print(i) # we can also use variables in range n = 5 for j in range(n,2*n): print(j) # we will print all numbers in {n,n+1,n+2,...,2n-1} # we can use a list of strings for name in ("Asja","Balvis","Fyodor"): print("Hello",name,":-)") # any range indeed returns a list L1 = list(range(10)) print(L1) L2 = list(range(55,200,11)) print(L2) ``` <h3> Task 1 </h3> Calculate the value of summation $ 3+6+9+\cdots+51 $, and then print the result. Your result should be 459. ``` # # your solution is here # ``` <a href="Python12_Basics_Loops_Solutions.ipynb#task1">click for our solution</a> <h3> Task 2 </h3> $ 3^k $ means $ 3 \cdot 3 \cdot \cdots \cdot 3 $ ($ k $ times) for $ k \geq 2 $. Moreover, $ 3^0 $ is 1 and $ 3^1 = 3 $. Calculate the value of summation $ 3^0 + 3^1 + 3^2 + \cdots + 3^8 $, and then print the result. Your result should be 9841. ``` # # your solution is here # ``` <a href="Python12_Basics_Loops_Solutions.ipynb#task2">click for our solution</a> <h3> While-loop </h3> ``` # let's calculate the summation 1+2+4+8+...+256 by using a while-loop total = 0 i = 1 #while condition(s): # your_code1 # your_code2 # your_code3 while i < 257: # this loop iterates as long as i is less than 257 total = total + i i = i * 2 # i is doubled in each iteration, and so soon it will be greater than 256 print(total) # we do the same summation by using for-loop above L = [0,1,2,3,4,5,11] # this is a list containing 7 integer values i = 0 while i in L: # this loop will be iterated as long as i is in L print(i) i = i + 1 # the value of i iteratively increased, and so soon it will hit a value not in the list L # the loop is terminated after i is set to 6, because 6 is not in L # let's use negation in the condition of while-loop L = [10] # this list has a single element i = 0 while i not in L: # this loop will be iterated as long as i is not equal to 10 print(i) i = i+1 # the value of i will hit 10 after ten iterations # let's rewrite the same loop by using a direct inequality i = 0 while i != 10: # "!=" is used for operator "not equal to" print(i) i=i+1 # let's rewrite the same loop by using negation of equality i = 0 while not (i == 10): # "==" is used for operator "equal to" print(i) i=i+1 # while-loop seems having more fun :-) # but we should be more careful when writing the condition(s)! ``` Consider the summation $ S(n) = 1+ 2+ 3 + \cdots + n $ for some natural number $ n $. Let's find the minimum value of $ n $ such that $ S(n) \geq 1000 $. While-loop works very well for this task. <ul> <li>We can iteratively increase $ n $ and update the value of $ S(n) $.</li> <li>The loop iterates as long as $S(n)$ is less than 1000.</li> <li>Once it hits 1000 or a greater number, the loop will be terminated.</li> </ul> ``` # summation and n are zeros at the beginning S = 0 n = 0 while S < 1000: # this loop will stop after S exceeds 999 (S = 1000 or S > 1000) n = n +1 S = S + n # let's print n and S print("n =",n," S =",S) ``` <h3> Task 3 </h3> Consider the summation $ T(n) = 1 + \dfrac{1}{2} + \dfrac{1}{4}+ \dfrac{1}{8} + \cdots + \dfrac{1}{2^n} $ for some natural number $ n $. Remark that $ T(0) = \dfrac{1}{2^0} = \dfrac{1}{1} = 1 $. This summation can be arbitrarily close to $2$. Find the minimum value of $ n $ such that $ T(n) $ is close to $2$ by $ 0.01 $, i.e., $ 2 - T(n) < 0.01 $. In other words, we find the minimum value of $n$ such that $ T(n) > 1.99 $. The operator for "less than or equal to" in python is "$ < = $". ``` # three examples for the operator "less than or equal to" #print (4 <= 5) #print (5 <= 5) #print (6 <= 5) # you may comment out the above three lines and see the results by running this cell # # your solution is here # ``` <a href="Python12_Basics_Loops_Solutions.ipynb#task3">click for our solution</a> <h3> Task 4 </h3> Randomly pick number(s) between 0 and 9 until hitting 3, and then print the number of attempt(s). We can use <i>randrange</i> function from <i>random</i> module for randomly picking a number in the given range. ``` # this is the code for including function randrange into our program from random import randrange # randrange(n) picks a number from the list [0,1,2,...,n-1] randomly #r = randrange(100) #print(r) # # your solution is here # ``` <a href="Python12_Basics_Loops_Solutions.ipynb#task4">click for our solution</a> <h3> Task 5 </h3> This task is challenging . It is designed for the usage of double nested loops: one loop inside of the other loop. In the fourth task above, the expected number of attempt(s) to hit number 3 is 10. Do a series of experiments by using your solution for Task 4. Experiment 1: Execute your solution 20 times, and then calculate the average attempts. Experiment 2: Execute your solution 200 times, and then calculate the average attempts. Experiment 3: Execute your solution 2000 times, and then calculate the average attempts. Experiment 4: Execute your solution 20000 times, and then calculate the average attempts. Experiment 5: Execute your solution 200000 times, and then calculate the average attempts. <i>Your experimental average sgould get closer to 10 when the number of executions is increased.</i> Remark that all five experiments may also be automatically done by using triple loops. <a href="Python12_Basics_Loops_Solutions.ipynb#task5">click for our solution</a> ``` # here is a schematic example for double nested loops #for i in range(10): # your_code1 # your_code2 # while j != 7: # your_code_3 # your_code_4 # # your solution is here # ```
github_jupyter
``` import os import struct import pandas as pd import numpy as np import talib as tdx def readTdxLdayFile(fname="data/sh000001.day"): dataSet=[] with open(fname,'rb') as fl: buffer=fl.read() #读取数据到缓存 size=len(buffer) rowSize=32 #通信达day数据,每32个字节一组数据 code=os.path.basename(fname).replace('.day','') for i in range(0,size,rowSize): #步长为32遍历buffer row=list( struct.unpack('IIIIIfII',buffer[i:i+rowSize]) ) row[1]=row[1]/100 row[2]=row[2]/100 row[3]=row[3]/100 row[4]=row[4]/100 row.pop() #移除最后无意义字段 row.insert(0,code) dataSet.append(row) data=pd.DataFrame(data=dataSet,columns=['code','tradeDate','open','high','low','close','amount','vol']) data=data.set_index(['tradeDate']) return code, data def select1(code, data): # 连续三日缩量 cn = data.close.iloc[-1] # df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250,500,750,1000,1500,2000,2500,) ], axis = 1).dropna()[-1:] df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250,500,750,1000,1500,2000,2500,) ], axis = 1)[-1:] df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm500', u'm750', u'm1000', u'm1500', u'm2000', u'm2500'] df_c2 = df.m5 > df.m10 df_c1 = cn > df.m5 df_c = cn > df.m5 df_h = df.apply(lambda x:cn > x.max() , axis = 1 ) # df_l = df.apply(lambda x:x.min() >= cl, axis = 1 ) df['dfh'] = df_h df['dfc2'] = df_c2 df['dfc1'] = df_c1 df['code'] =code # out=df.iloc[-1].apply(lambda x: True if x>cl and x < ch else False) df=df.reset_index('tradeDate') df=df.set_index(['code','tradeDate']) return df from threading import Thread, current_thread, Lock import multiprocessing #import Pool, cpu_count, Queue def asyncCalc(fname, queue): code, df = readTdxLdayFile(fname) queue.put(select1(code, df)) def readPath(path): files = os.listdir(path) # codes=[] q = multiprocessing.Queue() jobs = [] # dataSet=[]multiprocessing pool_size = multiprocessing.cpu_count() pool = multiprocessing.Pool(pool_size) output=pd.DataFrame() for i in range(0,len(files)): fname = os.path.join(path,files[i]) if os.path.isdir(fname): continue pool.apply_async(asyncCalc, args=(fname)) p = multiprocessing.Process(target=asyncCalc, args=(fname, q)) jobs.append(p) p.start() for p in jobs: p.join() for j in jobs: t = q.get() if t is not None: output=output.append(t) return output output=readPath('/tmp/easyquant/tdx/data') #读取目录下面的所有文件 output code, data = readTdxLdayFile('/tmp/easyquant/tdx/data/sh000001.day') select1(code,data) code data=df cn = data.close.iloc[-1] cn=cn+1000 df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250,500,750,1000,21500,20000,25000,) ], axis = 1)[-1:] df df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm500', u'm750', u'm1000', u'm1500', u'm2000', u'm2500'] df_c = df.m5 > df.m10 df_c1 = cn > df.m5 df_h = df.apply(lambda x:cn > x.max() , axis = 1 ) df_h df_h da=data_df.reset_index('tradeDate') df_c1 import datetime pd.to_datetime(da.tradeDate) # data_df.to_csv('test.csv') data_df.index[,-1:-1] def select1(code,data): # 连续三日缩量 ch= data.close.iloc[-1] * 1.1 cl= data.close.iloc[-1] * 0.9 # ch= data.close * 1.1 # cl = data.close * 0.9 df=pd.concat([tdx.MA(data.close, x) for x in (5,10,20,30,60,90,120,250) ], axis = 1).dropna()[-1:] df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250'] df_h = df.apply(lambda x:x.max() <= ch, axis = 1 ) df_l = df.apply(lambda x:x.min() >= cl, axis = 1 ) df['dfh'] = df_h df['dfl'] = df_l df['code'] =code # out=df.iloc[-1].apply(lambda x: True if x>cl and x < ch else False) df=df.reset_index('tradeDate') df=df.set_index(['code','tradeDate']) return df bbb=select1('sh000001',data_df.loc['sh000001',]) bbb bbb=bbb.set_index(['code','tradeDate']) data=bbb.set_index(['code','tradeDate']) output=None for code in codes: aaa=data_df.loc[code,] out=select1(code, aaa) if output is None: output = out else: # print(code) output=output.append(out) output output.query('dfh==True and dfl==True').to_csv('out1.csv') bb=select1('000001',aaa) type(bb) import talib as tdx aaa=pd.read_csv('test.csv') aaa.set_index('vol').sort_index() df=readTdxLdayFile() df['mon'] = df.tradeDate.apply(lambda x : str(x)[0:6]) df=df.set_index(['tradeDate']) dfmax=df.groupby(['mon']).apply(lambda x: x[x.high ==x.high.max()]) dfmax.drop_duplicates(subset=['high','mon'],keep='first',inplace=True) dfmin=df.groupby(['mon']).apply(lambda x: x[x.low ==x.low.min()]) dfmin.drop_duplicates(subset=['low','mon'],keep='first',inplace=True) dfmax.to_csv('max.csv') dfmin.to_csv('min.csv') dfmax for x in dfmax.index: print(df.loc[x[1]]) ```
github_jupyter
``` import argparse import torch.distributed as dist import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import test # import test.py to get mAP after each epoch from models import * from utils.datasets import * from utils.utils import * from mymodel import * # Hyperparameters (results68: 59.9 [email protected] yolov3-spp-416) https://github.com/ultralytics/yolov3/issues/310 hyp = {'giou': 3.54, # giou loss gain 'cls': 37.4, # cls loss gain 'cls_pw': 1.0, # cls BCELoss positive_weight 'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320) 'obj_pw': 1.0, # obj BCELoss positive_weight 'iou_t': 0.225, # iou training threshold 'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4) 'lrf': -4., # final LambdaLR learning rate = lr0 * (10 ** lrf) 'momentum': 0.937, # SGD momentum 'weight_decay': 0.000484, # optimizer weight decay 'fl_gamma': 0.5, # focal loss gamma 'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction) 'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction) 'hsv_v': 0.36, # image HSV-Value augmentation (fraction) 'degrees': 1.98, # image rotation (+/- deg) 'translate': 0.05, # image translation (+/- fraction) 'scale': 0.05, # image scale (+/- gain) 'shear': 0.641} # image shear (+/- deg) parser = argparse.ArgumentParser() parser.add_argument('--batch-size', type=int, default=16) # effective bs = batch_size * accumulate = 16 * 4 = 64 parser.add_argument('--accumulate', type=int, default=4, help='batches to accumulate before optimizing') parser.add_argument('--cfg', type=str, default='cfg/yolov3-tiny-1cls_1.cfg', help='*.cfg path') parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data path') parser.add_argument('--img-size', nargs='+', type=int, default=[320], help='train and test image-sizes') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') parser.add_argument('--weights', type=str, default='/home/denggc/DAC2021/dgc/April/ultra_bypass/weights/test_best.pt', help='initial weights path') parser.add_argument('--arc', type=str, default='default', help='yolo architecture') # default, uCE, uBCE parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied') parser.add_argument('--device', default='1', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--var', type=float, help='debug variable') opt = parser.parse_known_args()[0] print(opt) print(opt.weights) device = torch_utils.select_device(opt.device, batch_size=opt.batch_size) print(device) img_size, img_size_test = opt.img_size if len(opt.img_size) == 2 else opt.img_size * 2 # train, test sizes batch_size = opt.batch_size accumulate = opt.accumulate # effective bs = batch_size * accumulate = 16 * 4 = 64 weights = opt.weights # initial training weights test_path = '../DAC-SDC2021/dataset/sample' nc = 1 model = UltraNet_Bypass().to(device) model.hyp = hyp model.nc = 1 model.arc = 'default' if weights.endswith('.pt'): # pytorch format # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. print("load weights...") model.load_state_dict(torch.load(weights, map_location=device)['model']) batch_size = min(batch_size, 1) nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers dataset = LoadImagesAndLabels(test_path, img_size_test, batch_size * 2, hyp=hyp, rect=False, cache_images=opt.cache_images, single_cls=opt.single_cls) testloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size * 2, num_workers=nw, pin_memory=True, collate_fn=dataset.collate_fn) results = test.test(opt.cfg, opt.data, batch_size=batch_size * 2, img_size=img_size_test, model=model, conf_thres=0.001, # 0.001 if opt.evolve or (final_epoch and is_coco) else 0.01, iou_thres=0.6, save_json=False, single_cls=opt.single_cls, dataloader=testloader) print(results) ```
github_jupyter
# [Histogram](https://plotly.com/python/histograms/) ## 1. importar las librerías + csv con los datos de la encuesta. ``` # importar librerias import pandas as pd import plotly.express as px from dash import Dash, dcc, html, Input, Output #crear un dataframe con toda la informacion de la encuesta df_csv = pd.read_csv ('../data/survey_results_public2021.csv', index_col = [0]) # El indice sera la columna con el ID de la respuesta df_csv #mostrar df () ``` ## 2. Preprocesar datos. Tratar las columnas/conjunto de datos para comenzar a crear los gráficos. En este caso Age1stcode ``` df_csv['Age1stCode'].value_counts() ``` Para lidiar con rangos de edades, algunos de los cuales tienen texto, se va a calcular una nueva columna con la media de todos ellos. ``` #se hace una copia del df. df= df_csv.copy() #normalizar todos los datos. df = df[df['Age1stCode'].notna()] #eliminar los nulos df.loc[df["Age1stCode"] == "Younger than 5 years", "Age1stCode"] = "04 - 04 years" #ya hay un 05 anyos en el df. df.loc[df["Age1stCode"] == "Older than 64 years", "Age1stCode"] = "65 - 65 years" df.loc[df["Age1stCode"] == "5 - 10 years", "Age1stCode"] = "05 - 10 years" #primero se seleccionan los digitos del string (la columna del df es string) y el resultado se convierte a entero df['min'] = df.Age1stCode.astype(str).str[:2].astype(int) #la edad minima del rango es el primer numero df['max'] = df.Age1stCode.astype(str).str[5:7].astype(int) # el maximo es el segundo numero #una vez ya se tiene la edad minima y la maxima, se calcula la media de ambas columnas. df['media'] = df[['min', 'max']].mean(axis=1) ``` ## 3. Grafico. En este caso, un diagrama de barras. ``` app = Dash(__name__) server = app.server #heroku app.layout = html.Div([ html.H1("Tipo de desarrollador", style={'text-align': 'center'}), #cabecero h1. Header #primera mini prueba con un menu desplegable. dcc.Dropdown(id="select_opt", options=[ #el usuario va a ver las label. {"label": "#", "value": "numero"}, {"label": "%", "value": "porcentaje"}], multi=False, value="numero", style={'width': "40%"} ), dcc.Graph(id='my_survey', figure={}) # graph container ]) @app.callback( Output(component_id='my_survey', component_property='figure'), Input(component_id='select_opt', component_property='value')) def update_graph(option_slctd): #filtered_df = df[df.year == selected_year] fig = px.histogram(df, x="media", title='Histograma de edad', labels={'media':'media', 'count':'total'}, # can specify one label per df column opacity=0.8, color_discrete_sequence=['indianred'] # color of histogram bars ) # no implementado la opcion con el porcentaje return fig ``` ## 4. run server ``` app.run_server(debug=True, use_reloader=False) ```
github_jupyter
# How to Build a Personalized Trading Dashboard? > A personalized dashboard to viusalize trading actions and stock development - toc: false - badges: true - comments: true - categories: [data analysis, trading, jupternotebook] ``` # hide from datetime import datetime, timedelta import altair as alt import numpy as np import pandas as pd import panel as pn import yfinance as yf from altair import datum from utility import * alt.renderers.enable("default") pn.extension("vega") copy_to_post = 1 ``` **Update 11-May**: The [web app](https://share.streamlit.io/jinchao-chen/portfolio-dashboard/streamlit/web_app.py) is now deployed on streamlit. Please check it out and let me know how it works. TL;NR: if you are mainly interested in the codes, here is the link to [GitHub](https://github.com/jinchao-chen/portfolio-dashboard) # Motivation Yesterday I came across an article [The Boredom Economy](https://www.nytimes.com/2021/02/20/business/gamestop-investing-economy.html). Sydney Ember explained the GameStop phenomenon as investors' reaction to the boredom experienced during the pandemic. Being one *amateur day traders* new to the market, I fully concur with the explanation Sydney put forth. I noticed myself spending hours daily *analyzing* the market and *trading* frequently for profits, as an escape from boredom. Resultantly, I I generated an amount of data in 2020 that could pertentially be used for a study on my 'trading style'. With this in mind, I decided started a project on analysing my trading activities. To start with, I create a dashboard to visualize the activities, to understand how and when I tend to buy/sell a stock. # Preparation My primary trading platform is [Trading212](https://www.trading212.com). The platform recently included a [new feature](https://community.trading212.com/t/new-feature-export-your-investing-history/35612) that allows exporting transaction history in csv format. The exported data is clean neatly structured, which is ready for analysis. For the tools, I noticed a repo on GitHub [panel-altair-dashboard](https://github.com/bendoesdata/panel-altair-dashboard) that creates a simple, yet powerful, visualization tool (dashboard) in roughly 25 lines of codes. It is achieved by with Panel and Altair. To achieve visualizing my trading activities, I include the following features: - mark transaction actions (sell or buy), in the stock time history - plot the stock historical data using candlestick # Time to visualize the time series! Below is a screenshot of the dashboard. In it, the transaction data (buy or sell)is visualzied along with the market data. There is an option to visualize the market data in either line plot or candlestick, depending on whether you are interested in long term trends or the variations within the day. For a demo, please refer to [binder](https://mybinder.org/v2/gh/jinchao-chen/portfolio-dashboard/HEAD). It is still at the very early stage of development. In the future, I would like to add the following, - provide a summary of my portfolio - normalize the stock price for the selected during And more features will be inlcuded, if I find anything interesting! ![chart](./figs/trading_dashboard.png) ``` # hide fln = "dummy_transactions.csv" transactions = read_transactions(fln) # hide title = '# Trading Dashboard' subtitle = 'A personalized visualization tool for **Trading 212** trading activities and market data' companies = transactions["Ticker"].dropna().unique().tolist() ticker = pn.widgets.Select(name="Company", options=companies) style = pn.widgets.Select(name="Plot Style", options=["Candelstick", "Line"]) # this creates the date range slider date_range_slider = pn.widgets.DateRangeSlider( name="Date Range", start=datetime(2020, 1, 1), end=datetime.today(), value=(datetime(2020, 1, 1), datetime.today()), ) # tell Panel what your plot "depends" on. @pn.depends(ticker.param.value, date_range_slider.param.value, style.param.value) def get_plots(ticker, date_range, style): # start function # filter based on ticker subset = transactions[transactions["Ticker"] == ticker] start_date = date_range_slider.value[ 0 ] # store the first date range slider value in a var end_date = date_range_slider.value[1] # store the end date in a var ts = read_ticker_ts(ticker=ticker, start=start_date, end=end_date) if style == "Candelstick": chart = plot_transactions(subset, ts) else: chart = plot_transactions_2(subset, ts) return chart dashboard = pn.Row( pn.Column(title, subtitle, ticker, style, date_range_slider), get_plots, # draw chart function! ) # hide dashboard.servable() # hide from shutil import copyfile if copy_to_post: src = "transation_view_altair.ipynb" dst = "../blog_posts/_notebooks/2021-02-21-portfolio_analysis.ipynb" copyfile(src, dst) print("copied") ```
github_jupyter
# Answer Key to the Data Wrangling with DataFrames Coding Quiz Helpful resources: http://spark.apache.org/docs/latest/api/python/pyspark.sql.html ``` from pyspark.sql import SparkSession from pyspark.sql.functions import isnan, count, when, col, desc, udf, col, sort_array, asc, avg from pyspark.sql.functions import sum as Fsum from pyspark.sql.window import Window from pyspark.sql.types import IntegerType # 1) import any other libraries you might need # 2) instantiate a Spark session # 3) read in the data set located at the path "data/sparkify_log_small.json" # 4) write code to answer the quiz questions spark = SparkSession \ .builder \ .appName("Data Frames practice") \ .getOrCreate() df = spark.read.json("data/sparkify_log_small.json") ``` # Question 1 Which page did user id "" (empty string) NOT visit? ``` df.printSchema() # filter for users with blank user id blank_pages = df.filter(df.userId == '') \ .select(col('page') \ .alias('blank_pages')) \ .dropDuplicates() # get a list of possible pages that could be visited all_pages = df.select('page').dropDuplicates() # find values in all_pages that are not in blank_pages # these are the pages that the blank user did not go to for row in set(all_pages.collect()) - set(blank_pages.collect()): print(row.page) ``` # Question 2 - Reflect What type of user does the empty string user id most likely refer to? Perhaps it represents users who have not signed up yet or who are signed out and are about to log in. # Question 3 How many female users do we have in the data set? ``` df.filter(df.gender == 'F') \ .select('userId', 'gender') \ .dropDuplicates() \ .count() ``` # Question 4 How many songs were played from the most played artist? ``` df.filter(df.page == 'NextSong') \ .select('Artist') \ .groupBy('Artist') \ .agg({'Artist':'count'}) \ .withColumnRenamed('count(Artist)', 'Artistcount') \ .sort(desc('Artistcount')) \ .show(1) ``` # Question 5 (challenge) How many songs do users listen to on average between visiting our home page? Please round your answer to the closest integer. ``` # TODO: filter out 0 sum and max sum to get more exact answer function = udf(lambda ishome : int(ishome == 'Home'), IntegerType()) user_window = Window \ .partitionBy('userID') \ .orderBy(desc('ts')) \ .rangeBetween(Window.unboundedPreceding, 0) cusum = df.filter((df.page == 'NextSong') | (df.page == 'Home')) \ .select('userID', 'page', 'ts') \ .withColumn('homevisit', function(col('page'))) \ .withColumn('period', Fsum('homevisit').over(user_window)) cusum.filter((cusum.page == 'NextSong')) \ .groupBy('userID', 'period') \ .agg({'period':'count'}) \ .agg({'count(period)':'avg'}).show() ```
github_jupyter
``` #hide %load_ext autoreload %autoreload 2 # default_exp analysis ``` # Analysis > The analysis functions help a modeler quickly run a full time series analysis. An analysis consists of: 1. Initializing a DGLM, using `define_dglm`. 2. Updating the model coefficients at each time step, using `dglm.update`. 3. Forecasting at each time step between `forecast_start` and `forecast_end`, using `dglm.forecast_marginal` or `dglm.forecast_path`. 4. Returning the desired output, specified in the argument `ret`. The default is to return the model and forecast samples. The analysis starts by defining a new DGLM with `define_dglm`. The default number of observations to use is set at `prior_length=20`. Any arguments that are used to define a model in `define_dglm` can be passed into analysis as keyword arguments. Alternatively, you may define the model beforehand, and pass the pre-initialized DGLM into analysis as the argument `model_prior`. Once the model has been initialized, the analysis loop begins. If $\text{forecast_start} \leq t \leq \text{forecast_end}$, then the model will forecast ahead. The forecast horizon k must be specified. The default is to simulate `nsamps=500` times from the forecast distribution using `forecast_marginal`, from $1$ to `k` steps into the future. To simulate from the joint forecast distribution over the next `k` steps, set the flag `forecast_path=True`. Note that all forecasts are *out-of-sample*, i.e. they are made before the model has seen the observation. This is to ensure than the forecast accuracy is a more fair representation of future model performance. After the forecast has been made, the model sees the observation $y_t$, and updates the state vector accordingly. The analysis ends after seeing the last observation in `Y`. The output is a list specified by the argument `ret`, which may contain: - `mod`: The final model - `forecast`: The forecast samples, stored in a 3-dimensional array with axes *nsamps* $\times$ *forecast length* $\times$ *k* - `model_coef`: A time series of the state vector mean vector and variance matrix Please note that `analysis` is used on a historic dataset that already exists. This means that a typical sequence of events is to run an analysis on the data you current have, and return the model and forecast samples. The forecast samples are used to evaluate the past forecast performance. Then you can use `dglm.forecast_marginal` and `dglm.forecast_path` to forecast into the future. ``` #hide #exporti import numpy as np import pandas as pd from pybats.define_models import define_dglm, define_dcmm, define_dbcm, define_dlmm from pybats.shared import define_holiday_regressors from collections.abc import Iterable ``` ## Analysis for a DGLM ``` #export def analysis(Y, X=None, k=1, forecast_start=0, forecast_end=0, nsamps=500, family = 'normal', n = None, model_prior = None, prior_length=20, ntrend=1, dates = None, holidays = [], seasPeriods = [], seasHarmComponents = [], latent_factor = None, new_latent_factors = None, ret=['model', 'forecast'], mean_only = False, forecast_path = False, **kwargs): """ This is a helpful function to run a standard analysis. The function will: 1. Automatically initialize a DGLM 2. Run sequential updating 3. Forecast at each specified time step """ # Add the holiday indicator variables to the regression matrix nhol = len(holidays) X = define_holiday_regressors(X, dates, holidays) # Check if it's a latent factor DGLM if latent_factor is not None: is_lf = True nlf = latent_factor.p else: is_lf = False nlf = 0 if model_prior is None: mod = define_dglm(Y, X, family=family, n=n, prior_length=prior_length, ntrend=ntrend, nhol=nhol, nlf=nlf, seasPeriods=seasPeriods, seasHarmComponents=seasHarmComponents, **kwargs) else: mod = model_prior # Convert dates into row numbers if dates is not None: dates = pd.Series(dates) if type(forecast_start) == type(dates.iloc[0]): forecast_start = np.where(dates == forecast_start)[0][0] if type(forecast_end) == type(dates.iloc[0]): forecast_end = np.where(dates == forecast_end)[0][0] # Define the run length T = len(Y) + 1 if ret.__contains__('model_coef'): m = np.zeros([T-1, mod.a.shape[0]]) C = np.zeros([T-1, mod.a.shape[0], mod.a.shape[0]]) if family == 'normal': n = np.zeros(T) s = np.zeros(T) if new_latent_factors is not None: if not ret.__contains__('new_latent_factors'): ret.append('new_latent_factors') if not isinstance(new_latent_factors, Iterable): new_latent_factors = [new_latent_factors] tmp = [] for lf in new_latent_factors: tmp.append(lf.copy()) new_latent_factors = tmp # Create dummy variable if there are no regression covariates if X is None: X = np.array([None]*(T+k)).reshape(-1,1) else: if len(X.shape) == 1: X = X.reshape(-1,1) # Initialize updating + forecasting horizons = np.arange(1, k + 1) if mean_only: forecast = np.zeros([1, forecast_end - forecast_start + 1, k]) else: forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k]) for t in range(prior_length, T): if forecast_start <= t <= forecast_end: if t == forecast_start: print('beginning forecasting') if ret.__contains__('forecast'): if is_lf: if forecast_path: pm, ps, pp = latent_factor.get_lf_forecast(dates.iloc[t]) forecast[:, t - forecast_start, :] = mod.forecast_path_lf_copula(k=k, X=X[t + horizons - 1, :], nsamps=nsamps, phi_mu=pm, phi_sigma=ps, phi_psi=pp) else: pm, ps = latent_factor.get_lf_forecast(dates.iloc[t]) pp = None # Not including path dependency in latent factor forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x, pm, ps: mod.forecast_marginal_lf_analytic(k=k, X=x, phi_mu=pm, phi_sigma=ps, nsamps=nsamps, mean_only=mean_only), horizons, X[t + horizons - 1, :], pm, ps))).squeeze().T.reshape(-1, k)#.reshape(-1, 1) else: if forecast_path: forecast[:, t - forecast_start, :] = mod.forecast_path(k=k, X = X[t + horizons - 1, :], nsamps=nsamps) else: if family == "binomial": forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, n, x: mod.forecast_marginal(k=k, n=n, X=x, nsamps=nsamps, mean_only=mean_only), horizons, n[t + horizons - 1], X[t + horizons - 1, :]))).squeeze().T.reshape(-1, k) # .reshape(-1, 1) else: # Get the forecast samples for all the items over the 1:k step ahead marginal forecast distributions forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x: mod.forecast_marginal(k=k, X=x, nsamps=nsamps, mean_only=mean_only), horizons, X[t + horizons - 1, :]))).squeeze().T.reshape(-1, k)#.reshape(-1, 1) if ret.__contains__('new_latent_factors'): for lf in new_latent_factors: lf.generate_lf_forecast(date=dates[t], mod=mod, X=X[t + horizons - 1], k=k, nsamps=nsamps, horizons=horizons) # Now observe the true y value, and update: if t < len(Y): if is_lf: pm, ps = latent_factor.get_lf(dates.iloc[t]) mod.update_lf_analytic(y=Y[t], X=X[t], phi_mu=pm, phi_sigma=ps) else: if family == "binomial": mod.update(y=Y[t], X=X[t], n=n[t]) else: mod.update(y=Y[t], X=X[t]) if ret.__contains__('model_coef'): m[t,:] = mod.m.reshape(-1) C[t,:,:] = mod.C if family == 'normal': n[t] = mod.n / mod.delVar s[t] = mod.s if ret.__contains__('new_latent_factors'): for lf in new_latent_factors: lf.generate_lf(date=dates[t], mod=mod, Y=Y[t], X=X[t], k=k, nsamps=nsamps) out = [] for obj in ret: if obj == 'forecast': out.append(forecast) if obj == 'model': out.append(mod) if obj == 'model_coef': mod_coef = {'m':m, 'C':C} if family == 'normal': mod_coef.update({'n':n, 's':s}) out.append(mod_coef) if obj == 'new_latent_factors': #for lf in new_latent_factors: # lf.append_lf() # lf.append_lf_forecast() if len(new_latent_factors) == 1: out.append(new_latent_factors[0]) else: out.append(new_latent_factors) if len(out) == 1: return out[0] else: return out ``` This function is core to the PyBATS package, because it allows a modeler to easily run a full time series analysis in one step. Below is a quick example of analysis of quarterly inflation in the US using a normal DLM. We'll start by loading in the data: ``` from pybats.shared import load_us_inflation from pybats.analysis import analysis import pandas as pd from pybats.plot import plot_data_forecast from pybats.point_forecast import median import matplotlib.pyplot as plt from pybats.loss_functions import MAPE data = load_us_inflation() pd.concat([data.head(3), data.tail(3)]) ``` And then running an analysis. We're going to use the previous (lag-1) value of inflation as a predictor. ``` forecast_start = '1990-Q1' forecast_end = '2014-Q3' X = data.Inflation.values[:-1] mod, samples = analysis(Y = data.Inflation.values[1:], X=X, family="normal", k = 1, prior_length = 12, forecast_start = forecast_start, forecast_end = forecast_end, dates=data.Date, ntrend = 2, deltrend=.99, seasPeriods=[4], seasHarmComponents=[[1,2]], delseas=.99, nsamps = 5000) ``` A couple of things to note here: - `forecast_start` and `forecast_end` were specified as elements in the `dates` vector. You can also specify forecast_start and forecast_end by row numbers in `Y`, and avoid providing the `dates` argument. - `ntrend=2` creates a model with an intercept and a local slope term, and `deltrend=.98` discounts the impact of older observations on the trend component by $2\%$ at each time step. - The seasonal component was set as `seasPeriods=[4]`, because we think the seasonal effect has a cycle of length $4$ in this quarterly inflation data. Let's examine the output. Here is the mean and standard deviation of the state vector (aka the coefficients) after the model has seen the last observation in `Y`: ``` mod.get_coef() ``` It's clear that the lag-1 regression term is dominant, with a mean of $0.92$. The only other large coefficient is the intercept, with a mean of $0.10$. The seasonal coefficients turned out to be very small. Most likely this is because the publicly available dataset for US inflation is pre-adjusted for seasonality. The forecast samples are stored in a 3-dimensional array, with axes *nsamps* $\times$ *forecast length* $\times$ *k*: - **nsamps** is the number of samples drawn from the forecast distribution - **forecast length** is the number of time steps between `forecast_start` and `forecast_end` - **k** is the forecast horizon, or the number of steps that were forecast ahead We can plot the forecasts using `plot_data_forecast`. We'll plot the 1-quarter ahead forecasts, using the median as our point estimate. ``` forecast = median(samples) # Plot the 1-quarter ahead forecast h = 1 start = data[data.Date == forecast_start].index[0] + h end = data[data.Date == forecast_end].index[0] + h + 1 fig, ax = plt.subplots(figsize=(12, 6)) plot_data_forecast(fig, ax, y = data[start:end].Inflation.values, f = forecast[:,h-1], samples = samples[:,:,h-1], dates = pd.to_datetime(data[start:end].Date.values), xlabel='Time', ylabel='Quarterly US Inflation', title='1-Quarter Ahead Forecasts'); ``` We can see that the forecasts are quite good, and nearly all of the observations fall within the $95\%$ credible interval. There's also a clear pattern - the forecasts look as if they're shifted forward from the data by 1 step. This is because the lag-1 predictor is very strong, with a coefficient mean of $0.91$. The model is primarily using the previous month's value as its forecast, with some small modifications. Having the previous value as our best forecast is common in many time series. We can put a number on the quality of the forecast by using a loss function, the Mean Absolute Percent Error (MAPE). We see that on average, our forecasts of quarterly inflation have an error of under $15\%$. ``` MAPE(data[start:end].Inflation.values, forecast[:,0]).round(1) assert(MAPE(data[start:end].Inflation.values, forecast[:,0]).round(0) <= 15) ``` Finally, we can use the returned model to forecast $1-$step ahead to Q1 2015, which is past the end of the dataset. We need the `X` value to forecast into the future. Luckily, in this model the predictor `X` is simply the previous value of Inflation from Q4 2014. ``` x_future = data.Inflation.iloc[-1] one_step_forecast_samples = mod.forecast_marginal(k=1, X=x_future, nsamps=1000000) ``` From here, we can find the mean and standard deviation of the forecast for next quarter's inflation: ``` print('Mean: ' + str(np.mean(one_step_forecast_samples).round(2))) print('Std Dev: ' + str(np.std(one_step_forecast_samples).round(2))) ``` We can also plot the full forecast distribution for Q1 2015: ``` fig, ax = plt.subplots(figsize=(10,6)) ax.hist(one_step_forecast_samples.reshape(-1), bins=200, alpha=0.3, color='b', density=True, label='Forecast Distribution'); ax.vlines(x=np.mean(one_step_forecast_samples), ymin=0, ymax=ax.get_ylim()[1], label='Forecast Mean'); ax.set_title('1-Step Ahead Forecast Distribution for Q1 2015 Inflation'); ax.set_ylabel('Forecast Density') ax.set_xlabel('Q1 2015 Inflation') ax.legend(); ``` ## Analysis for a DCMM ``` #export def analysis_dcmm(Y, X=None, k=1, forecast_start=0, forecast_end=0, nsamps=500, rho=.6, model_prior=None, prior_length=20, ntrend=1, dates=None, holidays=[], seasPeriods=[], seasHarmComponents=[], latent_factor=None, new_latent_factors=None, mean_only=False, ret=['model', 'forecast'], **kwargs): """ This is a helpful function to run a standard analysis using a DCMM. """ if latent_factor is not None: is_lf = True # Note: This assumes that the bernoulli & poisson components have the same number of latent factor components if isinstance(latent_factor, (list, tuple)): nlf = latent_factor[0].p else: nlf = latent_factor.p else: is_lf = False nlf = 0 # Convert dates into row numbers if dates is not None: dates = pd.Series(dates) # dates = pd.to_datetime(dates, format='%y/%m/%d') if type(forecast_start) == type(dates.iloc[0]): forecast_start = np.where(dates == forecast_start)[0][0] if type(forecast_end) == type(dates.iloc[0]): forecast_end = np.where(dates == forecast_end)[0][0] # Add the holiday indicator variables to the regression matrix nhol = len(holidays) if nhol > 0: X = define_holiday_regressors(X, dates, holidays) # Initialize the DCMM if model_prior is None: mod = define_dcmm(Y, X, prior_length = prior_length, seasPeriods = seasPeriods, seasHarmComponents = seasHarmComponents, ntrend=ntrend, nlf = nlf, rho = rho, nhol = nhol, **kwargs) else: mod = model_prior if ret.__contains__('new_latent_factors'): if not isinstance(new_latent_factors, Iterable): new_latent_factors = [new_latent_factors] tmp = [] for sig in new_latent_factors: tmp.append(sig.copy()) new_latent_factors = tmp T = len(Y) + 1 # np.min([len(Y), forecast_end]) + 1 nu = 9 if X is None: X = np.array([None]*(T+k)).reshape(-1,1) else: if len(X.shape) == 1: X = X.reshape(-1,1) # Initialize updating + forecasting horizons = np.arange(1,k+1) if mean_only: forecast = np.zeros([1, forecast_end - forecast_start + 1, k]) else: forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k]) # Run updating + forecasting for t in range(prior_length, T): # if t % 100 == 0: # print(t) if ret.__contains__('forecast'): if t >= forecast_start and t <= forecast_end: if t == forecast_start: print('beginning forecasting') # Get the forecast samples for all the items over the 1:k step ahead path if is_lf: if isinstance(latent_factor, (list, tuple)): pm_bern, ps_bern = latent_factor[0].get_lf_forecast(dates.iloc[t]) pm_pois, ps_pois = latent_factor[1].get_lf_forecast(dates.iloc[t]) pm = (pm_bern, pm_pois) ps = (ps_bern, ps_pois) else: pm, ps = latent_factor.get_lf_forecast(dates.iloc[t]) pp = None # Not including the path dependency of the latent factor if mean_only: forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x, pm, ps: mod.forecast_marginal_lf_analytic( k=k, X=(x, x), phi_mu=(pm, pm), phi_sigma=(ps, ps), nsamps=nsamps, mean_only=mean_only), horizons, X[t + horizons - 1, :], pm, ps))).reshape(1, -1) else: forecast[:, t - forecast_start, :] = mod.forecast_path_lf_copula( k=k, X=(X[t + horizons - 1, :], X[t + horizons - 1, :]), phi_mu=(pm, pm), phi_sigma=(ps, ps), phi_psi=(pp, pp), nsamps=nsamps, t_dist=True, nu=nu) else: if mean_only: forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x: mod.forecast_marginal( k=k, X=(x, x), nsamps=nsamps, mean_only=mean_only), horizons, X[t + horizons - 1, :]))).reshape(1,-1) else: forecast[:, t - forecast_start, :] = mod.forecast_path_copula( k=k, X=(X[t + horizons - 1, :], X[t + horizons - 1, :]), nsamps=nsamps, t_dist=True, nu=nu) if ret.__contains__('new_latent_factors'): if t >= forecast_start and t <= forecast_end: for lf in new_latent_factors: lf.generate_lf_forecast(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :], k=k, nsamps=nsamps, horizons=horizons) # Update the DCMM if t < len(Y): if is_lf: if isinstance(latent_factor, (list, tuple)): pm_bern, ps_bern = latent_factor[0].get_lf(dates.iloc[t]) pm_pois, ps_pois = latent_factor[1].get_lf(dates.iloc[t]) pm = (pm_bern, pm_pois) ps = (ps_bern, ps_pois) else: pm, ps = latent_factor.get_lf(dates.iloc[t]) mod.update_lf_analytic(y=Y[t], X=(X[t], X[t]), phi_mu=(pm, pm), phi_sigma=(ps, ps)) else: mod.update(y = Y[t], X=(X[t], X[t])) if ret.__contains__('new_latent_factors'): for lf in new_latent_factors: lf.generate_lf(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :], k=k, nsamps=nsamps, horizons=horizons) out = [] for obj in ret: if obj == 'forecast': out.append(forecast) if obj == 'model': out.append(mod) if obj == 'new_latent_factors': #for lf in new_latent_factors: # lf.append_lf() # lf.append_lf_forecast() if len(new_latent_factors) == 1: out.append(new_latent_factors[0]) else: out.append(new_latent_factors) if len(out) == 1: return out[0] else: return out ``` `analysis_dcmm` works identically to the standard `analysis`, but is specialized for a DCMM. The observations must be integer counts, which are modeled as a combination of a Poisson and Bernoulli DGLM. Typically a DCMM is equally good as a Poisson DGLM for modeling series with consistently large integers, while being significantly better at modeling series with many zeros. Note that by default, all simulated forecasts made with `analysis_dcmm` are *path* forecasts, meaning that they account for the dependence across forecast horizons. ## Analysis for a DBCM ``` #export def analysis_dbcm(Y_transaction, X_transaction, Y_cascade, X_cascade, excess, k, forecast_start, forecast_end, nsamps = 500, rho = .6, model_prior=None, prior_length=20, ntrend=1, dates=None, holidays = [], latent_factor = None, new_latent_factors = None, seasPeriods = [], seasHarmComponents = [], mean_only=False, ret=['model', 'forecast'], **kwargs): """ This is a helpful function to run a standard analysis using a DBCM. """ if latent_factor is not None: is_lf = True # Note: This assumes that the bernoulli & poisson components have the same number of latent factor components if isinstance(latent_factor, (list, tuple)): nlf = latent_factor[0].p else: nlf = latent_factor.p else: is_lf = False nlf = 0 # Convert dates into row numbers if dates is not None: dates = pd.Series(dates) # dates = pd.to_datetime(dates, format='%y/%m/%d') if type(forecast_start) == type(dates.iloc[0]): forecast_start = np.where(dates == forecast_start)[0][0] if type(forecast_end) == type(dates.iloc[0]): forecast_end = np.where(dates == forecast_end)[0][0] # Add the holiday indicator variables to the regression matrix nhol = len(holidays) if nhol > 0: X_transaction = define_holiday_regressors(X_transaction, dates, holidays) if model_prior is None: mod = define_dbcm(Y_transaction, X_transaction, Y_cascade, X_cascade, excess_values = excess, prior_length = prior_length, seasPeriods = seasPeriods, seasHarmComponents=seasHarmComponents, nlf = nlf, rho = rho, nhol=nhol, **kwargs) else: mod = model_prior if ret.__contains__('new_latent_factors'): if not isinstance(new_latent_factors, Iterable): new_latent_factors = [new_latent_factors] tmp = [] for sig in new_latent_factors: tmp.append(sig.copy()) new_latent_factors = tmp # Initialize updating + forecasting horizons = np.arange(1,k+1) if mean_only: forecast = np.zeros([1, forecast_end - forecast_start + 1, k]) else: forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k]) T = len(Y_transaction) + 1 #np.min([len(Y_transaction)- k, forecast_end]) + 1 nu = 9 # Run updating + forecasting for t in range(prior_length, T): # if t % 100 == 0: # print(t) # print(mod.dcmm.pois_mod.param1) # print(mod.dcmm.pois_mod.param2) if ret.__contains__('forecast'): if t >= forecast_start and t <= forecast_end: if t == forecast_start: print('beginning forecasting') # Get the forecast samples for all the items over the 1:k step ahead path if is_lf: if isinstance(latent_factor, (list, tuple)): pm_bern, ps_bern = latent_factor[0].get_lf_forecast(dates.iloc[t]) pm_pois, ps_pois = latent_factor[1].get_lf_forecast(dates.iloc[t]) pm = (pm_bern, pm_pois) ps = (ps_bern, ps_pois) pp = None # Not including path dependency in latent factor else: if latent_factor.forecast_path: pm, ps, pp = latent_factor.get_lf_forecast(dates.iloc[t]) else: pm, ps = latent_factor.get_lf_forecast(dates.iloc[t]) pp = None if mean_only: forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x_trans, x_cascade, pm, ps: mod.forecast_marginal_lf_analytic( k=k, X_transaction=x_trans, X_cascade=x_cascade, phi_mu=pm, phi_sigma=ps, nsamps=nsamps, mean_only=mean_only), horizons, X_transaction[t + horizons - 1, :], X_cascade[t + horizons - 1, :], pm, ps))).reshape(1, -1) else: forecast[:, t - forecast_start, :] = mod.forecast_path_lf_copula( k=k, X_transaction=X_transaction[t + horizons - 1, :], X_cascade=X_cascade[t + horizons - 1, :], phi_mu=pm, phi_sigma=ps, phi_psi=pp, nsamps=nsamps, t_dist=True, nu=nu) else: if mean_only: forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x_trans, x_cascade: mod.forecast_marginal( k=k, X_transaction=x_trans, X_cascade=x_cascade, nsamps=nsamps, mean_only=mean_only), horizons, X_transaction[t + horizons - 1, :], X_cascade[t + horizons - 1, :]))).reshape(1,-1) else: forecast[:, t - forecast_start, :] = mod.forecast_path_copula( k=k, X_transaction=X_transaction[t + horizons - 1, :], X_cascade=X_cascade[t + horizons - 1, :], nsamps=nsamps, t_dist=True, nu=nu) if ret.__contains__('new_latent_factors'): if t >= forecast_start and t <= forecast_end: for lf in new_latent_factors: lf.generate_lf_forecast(date=dates.iloc[t], mod=mod, X_transaction=X_transaction[t + horizons - 1, :], X_cascade = X_cascade[t + horizons - 1, :], k=k, nsamps=nsamps, horizons=horizons) # Update the DBCM if t < len(Y_transaction): if is_lf: if isinstance(latent_factor, (list, tuple)): pm_bern, ps_bern = latent_factor[0].get_lf(dates.iloc[t]) pm_pois, ps_pois = latent_factor[1].get_lf(dates.iloc[t]) pm = (pm_bern, pm_pois) ps = (ps_bern, ps_pois) else: pm, ps = latent_factor.get_lf(dates.iloc[t]) mod.update_lf_analytic(y_transaction=Y_transaction[t], X_transaction=X_transaction[t, :], y_cascade=Y_cascade[t,:], X_cascade=X_cascade[t, :], phi_mu=pm, phi_sigma=ps, excess=excess[t]) else: mod.update(y_transaction=Y_transaction[t], X_transaction=X_transaction[t, :], y_cascade=Y_cascade[t,:], X_cascade=X_cascade[t, :], excess=excess[t]) if ret.__contains__('new_latent_factors'): for lf in new_latent_factors: lf.generate_lf(date=dates.iloc[t], mod=mod, X_transaction=X_transaction[t + horizons - 1, :], X_cascade = X_cascade[t + horizons - 1, :], k=k, nsamps=nsamps, horizons=horizons) out = [] for obj in ret: if obj == 'forecast': out.append(forecast) if obj == 'model': out.append(mod) if obj == 'new_latent_factors': #for lf in new_latent_factors: # lf.append_lf() # lf.append_lf_forecast() if len(new_latent_factors) == 1: out.append(new_latent_factors[0]) else: out.append(new_latent_factors) if len(out) == 1: return out[0] else: return out ``` `analysis_dbcm` works identically to the standard `analysis`, but is specialized for a DBCM. Separate data must be specified for the DCMM on transactions, `y_transaction` and `X_transaction`, the binomial cascade,`y_cascade`, `X_cascade`, and any excess counts, `excess`. Note that by default, all simulated forecasts made with `analysis_dbcm` are *path* forecasts, meaning that they account for the dependence across forecast horizons. ## Analysis for a DLMM ``` #export def analysis_dlmm(Y, X, k=1, forecast_start=0, forecast_end=0, nsamps=500, rho=.6, model_prior=None, prior_length=20, ntrend=1, dates=None, holidays=[], seasPeriods=[], seasHarmComponents=[], latent_factor=None, new_latent_factors=None, mean_only=False, ret=['model', 'forecast'], **kwargs): """ This is a helpful function to run a standard analysis using a DLMM. """ if latent_factor is not None: is_lf = True # Note: This assumes that the bernoulli & poisson components have the same number of latent factor components if isinstance(latent_factor, (list, tuple)): nlf = latent_factor[0].p else: nlf = latent_factor.p else: is_lf = False nlf = 0 # Convert dates into row numbers if dates is not None: dates = pd.Series(dates) # dates = pd.to_datetime(dates, format='%y/%m/%d') if type(forecast_start) == type(dates.iloc[0]): forecast_start = np.where(dates == forecast_start)[0][0] if type(forecast_end) == type(dates.iloc[0]): forecast_end = np.where(dates == forecast_end)[0][0] # Add the holiday indicator variables to the regression matrix nhol = len(holidays) if nhol > 0: X = define_holiday_regressors(X, dates, holidays) # Initialize the DCMM if model_prior is None: mod = define_dlmm(Y, X, prior_length = prior_length, seasPeriods = seasPeriods, seasHarmComponents = seasHarmComponents, ntrend=ntrend, nlf = nlf, rho = rho, nhol = nhol, **kwargs) else: mod = model_prior if ret.__contains__('new_latent_factors'): if not isinstance(new_latent_factors, Iterable): new_latent_factors = [new_latent_factors] tmp = [] for sig in new_latent_factors: tmp.append(sig.copy()) new_latent_factors = tmp if ret.__contains__('model_coef'): ## Return normal dlm params m = np.zeros([T, mod.dlm_mod.a.shape[0]]) C = np.zeros([T, mod.dlm_mod.a.shape[0], mod.dlm_mod.a.shape[0]]) a = np.zeros([T, mod.dlm_mod.a.shape[0]]) R = np.zeros([T, mod.dlm_mod.a.shape[0], mod.dlm_mod.a.shape[0]]) n = np.zeros(T) s = np.zeros(T) # Initialize updating + forecasting horizons = np.arange(1,k+1) if mean_only: forecast = np.zeros([1, forecast_end - forecast_start + 1, k]) else: forecast = np.zeros([nsamps, forecast_end - forecast_start + 1, k]) T = len(Y) + 1 nu = 9 # Run updating + forecasting for t in range(prior_length, T): # if t % 100 == 0: # print(t) if ret.__contains__('forecast'): if t >= forecast_start and t <= forecast_end: if t == forecast_start: print('beginning forecasting') # Get the forecast samples for all the items over the 1:k step ahead path if is_lf: if isinstance(latent_factor, (list, tuple)): pm_bern, ps_bern = latent_factor[0].get_lf_forecast(dates.iloc[t]) pm_dlm, ps_dlm = latent_factor[1].get_lf_forecast(dates.iloc[t]) pm = (pm_bern, pm_dlm) ps = (ps_bern, ps_dlm) else: pm, ps = latent_factor.get_lf_forecast(dates.iloc[t]) pp = None # Not including the path dependency of the latent factor if mean_only: forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x, pm, ps: mod.forecast_marginal_lf_analytic( k=k, X=(x, x), phi_mu=(pm, pm), phi_sigma=(ps, ps), nsamps=nsamps, mean_only=mean_only), horizons, X[t + horizons - 1, :], pm, ps))).reshape(1, -1) else: forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x, pm, ps: mod.forecast_marginal_lf_analytic( k=k, X=(x, x), phi_mu=(pm, pm), phi_sigma=(ps, ps), nsamps=nsamps, mean_only=mean_only), horizons, X[t + horizons - 1, :], pm, ps))).squeeze().T.reshape(-1, k) else: if mean_only: forecast[:, t - forecast_start, :] = np.array(list(map( lambda k, x: mod.forecast_marginal( k=k, X=(x, x), nsamps=nsamps, mean_only=mean_only), horizons, X[t + horizons - 1, :]))).reshape(1,-1) else: forecast[:, t - forecast_start, :] = mod.forecast_path_copula( k=k, X=(X[t + horizons - 1, :], X[t + horizons - 1, :]), nsamps=nsamps, t_dist=True, nu=nu) if ret.__contains__('new_latent_factors'): if t >= forecast_start and t <= forecast_end: for lf in new_latent_factors: lf.generate_lf_forecast(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :], k=k, nsamps=nsamps, horizons=horizons) # Update the DLMM if t < len(Y): if is_lf: if isinstance(latent_factor, (list, tuple)): pm_bern, ps_bern = latent_factor[0].get_lf(dates.iloc[t]) pm_dlm, ps_dlm = latent_factor[1].get_lf(dates.iloc[t]) pm = (pm_bern, pm_dlm) ps = (ps_bern, ps_dlm) else: pm, ps = latent_factor.get_lf(dates.iloc[t]) mod.update_lf_analytic(y=Y[t], X=(X[t], X[t]), phi_mu=(pm, pm), phi_sigma=(ps, ps)) else: mod.update(y = Y[t], X=(X[t], X[t])) if ret.__contains__('new_latent_factors'): for lf in new_latent_factors: lf.generate_lf(date=dates.iloc[t], mod=mod, X=X[t + horizons - 1, :], k=k, nsamps=nsamps, horizons=horizons) # Store the dlm coefficients if ret.__contains__('model_coef'): m[t,:] = mod.dlm.m.reshape(-1) C[t,:,:] = mod.dlm.C a[t,:] = mod.dlm.a.reshape(-1) R[t,:,:] = mod.dlm.R n[t] = mod.dlm.n / mod.dlm.delVar s[t] = mod.dlm.s out = [] for obj in ret: if obj == 'forecast': out.append(forecast) if obj == 'model': out.append(mod) if obj == 'model_coef': mod_coef = {'m':m, 'C':C, 'a':a, 'R':R, 'n':n, 's':s} out.append(mod_coef) if obj == 'new_latent_factors': #for lf in new_latent_factors: # lf.append_lf() # lf.append_lf_forecast() if len(new_latent_factors) == 1: out.append(new_latent_factors[0]) else: out.append(new_latent_factors) if len(out) == 1: return out[0] else: return out ``` `analysis_dlmm` works identically to the standard `analysis`, but is specialized for a DLMM. `analysis_dlmm` returns the model coefficients for the Normal DLM portion of the model only. The observations are continuous and are modeled as a combination of a Bernoulli DGLM and a Normal DLM. Note that by default, all simulated forecasts made with `analysis_dlmm` are *path* forecasts, meaning that they account for the dependence across forecast horizons. The exception is for latent factor DLMMs, which default to marginal forecasting. ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
# Support Vector Machines Support Vector Machines (SVM) are an extension of the linear methods that attempt to separate classes with hyperplans. These extensions come in three steps: 1. When classes are linearly separable, maximize the margin between the two classes 2. When classes are not linearly separable, maximize the margin but allow some samples within the margin. That is the soft margin 3. The "Kernel trick" to extend the separation to non linear frontieres The boost in performance of the Kernel trick has made the SVM the best classification method of the 2000's until the deep neural nets. ### Learning goals - Understand and implement SVM concepts stated above - Reminder to the Lagrange multiplier and optimization theory - Deal with a general purpose solver with constraints - Apply SVM to a non linear problem (XOR) with a non linear kernel (G-RBF) ### References - [1] [The Elements of Statistical Learning](https://web.stanford.edu/~hastie/ElemStatLearn/) - Trevor Hastie, Robert Tibshirani, Jerome Friedman, Springer - [2] Convex Optimization - Stephen Boyd, Lieven Vandenberghe, Cambridge University Press - [3] [Pattern Recognition and Machine Learning - Ch 7 demo](https://github.com/yiboyang/PRMLPY/blob/master/ch7/svm.py) - Christopher M Bishop, Github ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as pltcolors from sklearn import linear_model, svm, discriminant_analysis, metrics from scipy import optimize import seaborn as sns ``` ## Helpers ``` def plotLine(ax, xRange, w, x0, label, color='grey', linestyle='-', alpha=1.): """ Plot a (separating) line given the normal vector (weights) and point of intercept """ if type(x0) == int or type(x0) == float or type(x0) == np.float64: x0 = [0, -x0 / w[1]] yy = -(w[0] / w[1]) * (xRange - x0[0]) + x0[1] ax.plot(xRange, yy, color=color, label=label, linestyle=linestyle) def plotSvm(X, y, support=None, w=None, intercept=0., label='Data', separatorLabel='Separator', ax=None, bound=[[-1., 1.], [-1., 1.]]): """ Plot the SVM separation, and margin """ if ax is None: fig, ax = plt.subplots(1) im = ax.scatter(X[:,0], X[:,1], c=y, cmap=cmap, alpha=0.5, label=label) if support is not None: ax.scatter(support[:,0], support[:,1], label='Support', s=80, facecolors='none', edgecolors='y', color='y') print("Number of support vectors = %d" % (len(support))) if w is not None: xx = np.array(bound[0]) plotLine(ax, xx, w, intercept, separatorLabel) # Plot margin if support is not None: signedDist = np.matmul(support, w) margin = np.max(signedDist) - np.min(signedDist) * np.sqrt(np.dot(w, w)) supportMaxNeg = support[np.argmin(signedDist)] plotLine(ax, xx, w, supportMaxNeg, 'Margin -', linestyle='-.', alpha=0.8) supportMaxPos = support[np.argmax(signedDist)] plotLine(ax, xx, w, supportMaxPos, 'Margin +', linestyle='--', alpha=0.8) ax.set_title('Margin = %.3f' % (margin)) ax.legend(loc='upper left') ax.grid() ax.set_xlim(bound[0]) ax.set_ylim(bound[1]) cb = plt.colorbar(im, ax=ax) loc = np.arange(-1,1,1) cb.set_ticks(loc) cb.set_ticklabels(['-1','1']) ``` ## The data model Let's use a simple model with two Gaussians that are faraway in order to be separable ``` colors = ['blue','red'] cmap = pltcolors.ListedColormap(colors) nFeatures = 2 N = 100 def generateBatchBipolar(n, mu=0.5, sigma=0.2): """ Two gaussian clouds on each side of the origin """ X = np.random.normal(mu, sigma, (n, 2)) yB = np.random.uniform(0, 1, n) > 0.5 # y is in {-1, 1} y = 2. * yB - 1 X *= y[:, np.newaxis] X -= X.mean(axis=0) return X, y ``` # 1. Maximum margin separator The following explanation is about the binary classification but generalizes to more classes. Let $X$ be the matrix of $n$ samples of the $p$ features. We want to separate the two classes of $y$ with an hyperplan (a straight line in 2D, that is $p=2$). The separation equation is: $$ w^T x + b = 0, w \in \mathbb{R}^{p}, x \in \mathbb{R}^{p}, b \in \mathbb{R} $$ Given $x_0$ a point on the hyperplan, the signed distance of any point $x$ to the hyperplan is : $$ \frac{w}{\Vert w \Vert} (x - x_0) = \frac{1}{\Vert w \Vert} (w^T x + b) $$ If $y$, such that $y \in \{-1, 1\}$, is the corresponding label of $x$, the (unsigned) distance is : $$ \frac{y}{\Vert w \Vert} (w^T x + b) $$ This is the update quantity used by the Rosenblatt Perceptron. The __Maximum margin separator__ is aiming at maximizing $M$ such that : $$ \underset{w, b}{\max} M $$ __Subject to :__ - $y_i(x_i^T w + b) \ge M, i = 1..n$ - $\Vert w \Vert = 1$ $x_i$ and $y_i$ are samples of $x$ and $y$, a row of the matrix $X$ and the vector $y$. However, we may change the condition on the norm of $w$ such that : $\Vert w \Vert = \frac 1M$ Leading to the equivalent statement of the maximum margin classifier : $$ \min_{w, b} \frac 12 \Vert w \Vert^2 $$ __Subject to : $y_i(x_i^T w + b) \ge 1, i = 1..n$__ For more details, see [1, chap 4.5] The corresponding Lagrange primal problem is : $$\mathcal{L}_p(w, b, \alpha) = \frac 12 \Vert w \Vert^2 - \sum_{i=0}^n \alpha_i (y_i(x_i^T w + b) - 1)$$ __Subject to:__ - $\alpha_i \ge 0, i\in 1..n$ This shall be __minimized__ on $w$ and $b$, using the corresponding partial derivates equal to 0, we get : $$\begin{align} \sum_{i=0}^n \alpha_i y_i x_i &= w \\ \sum_{i=0}^n \alpha_i y_i &= 0 \end{align}$$ From $\mathcal{L}_p$, we get the (Wolfe) dual : $$\begin{align} \mathcal{L}_d (\alpha) &= \sum_{i=0}^n \alpha_i - \frac 12 \sum_{i=0}^n \sum_{k=0}^n \alpha_i \alpha_k y_i y_k x_i^T x_k \\ &= \sum_{i=0}^n \alpha_i - \frac 12 \sum_{i=0}^n \sum_{k=0}^n \langle \alpha_i y_i x_i, \alpha_k y_k x_k \rangle \\ \end{align}$$ __Subject to :__ - $\alpha_i \ge 0, i\in 1..n$ - $\sum_{i=0}^n \alpha_i y_i = 0$ Which is a concave problem that is __maximized__ using a solver. Strong duality requires (KKT) [2, chap. 5.5]: - $\alpha_i (y_i(x_i^T w + b) - 1) = 0, \forall i \in 1..n$ Implying that : - If $\alpha_i > 0$, then $y_i(x_i^T w + b) = 1$, meaning that $x_i$ is on one of the two hyperplans located at the margin distance from the separating hyperplan. $x_i$ is said to be a support vector - If $y_i(x_i^T w + b) > 1$, the distance of $x_i$ to the hyperplan is larger than the margin. ### Train data To demonstrate the maximum margin classifier, a dataset with separable classes is required. Let's use a mixture of two gaussian distributed classes with mean and variance such that the two classes are separated. ``` xTrain0, yTrain0 = generateBatchBipolar(N, sigma=0.2) plotSvm(xTrain0, yTrain0) ``` ## Implementation of the Maximum margin separator $$\mathcal{L}_d = \sum_{i=0}^n \alpha_i - \frac 12 \sum_{i=0}^n \sum_{k=0}^n \alpha_i \alpha_k y_i y_k x_i^T x_k $$ __Subject to :__ - $\sum_{i=0}^n \alpha_i y_i = \langle \alpha, y \rangle = 0$ - $\alpha_i \ge 0, i\in 1..n$ The classifier is built on the scipy.optimize.minimum solver. The implementation is correct but inefficient as it is not taking into account for the sparsity of the $\alpha$ vector. ``` class MaxMarginClassifier: def __init__(self): self.alpha = None self.w = None self.supportVectors = None def fit(self, X, y): N = len(y) # Gram matrix of (X.y) Xy = X * y[:, np.newaxis] GramXy = np.matmul(Xy, Xy.T) # Lagrange dual problem def Ld0(G, alpha): return alpha.sum() - 0.5 * alpha.dot(alpha.dot(G)) # Partial derivate of Ld on alpha def Ld0dAlpha(G, alpha): return np.ones_like(alpha) - alpha.dot(G) # Constraints on alpha of the shape : # - d - C*alpha = 0 # - b - A*alpha >= 0 A = -np.eye(N) b = np.zeros(N) constraints = ({'type': 'eq', 'fun': lambda a: np.dot(a, y), 'jac': lambda a: y}, {'type': 'ineq', 'fun': lambda a: b - np.dot(A, a), 'jac': lambda a: -A}) # Maximize by minimizing the opposite optRes = optimize.minimize(fun=lambda a: -Ld0(GramXy, a), x0=np.ones(N), method='SLSQP', jac=lambda a: -Ld0dAlpha(GramXy, a), constraints=constraints) self.alpha = optRes.x self.w = np.sum((self.alpha[:, np.newaxis] * Xy), axis=0) epsilon = 1e-6 self.supportVectors = X[self.alpha > epsilon] # Any support vector is at a distance of 1 to the separation plan # => use support vector #0 to compute the intercept, assume label is in {-1, 1} supportLabels = y[self.alpha > epsilon] self.intercept = supportLabels[0] - np.matmul(self.supportVectors[0].T, self.w) def predict(self, X): """ Predict y value in {-1, 1} """ assert(self.w is not None) assert(self.w.shape[0] == X.shape[1]) return 2 * (np.matmul(X, self.w) > 0) - 1 ``` Reference: - https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize ``` model00 = MaxMarginClassifier() model00.fit(xTrain0, yTrain0) model00.w, model00.intercept fig, ax = plt.subplots(1, figsize=(12, 7)) plotSvm(xTrain0, yTrain0, model00.supportVectors, model00.w, model00.intercept, label='Training', ax=ax) ``` ## Maximum margin classifier using Scikit Learn (SVC) SVC is used in place of LinearSVC as the support vectors are provided. These vectors are displayed in the graph here below. Set a high $C$ parameter to disable soft margin ``` model01 = svm.SVC(kernel='linear', gamma='auto', C = 1e6) model01.fit(xTrain0, yTrain0) model01.coef_[0], model01.intercept_[0] fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain0, yTrain0, model01.support_vectors_, model01.coef_[0], model01.intercept_[0], label='Training', ax=ax) ``` The two implementations of the linear SVM agree on the ceofficients and margin. Good ! ### Comparison of the maximum margin classifier to the Logistic regression and Linear Discriminant Analysis (LDA) Logistic regression is based on the linear regression that is the computation of the square error of any point $x$ to the separation plan and a projection on the probability space using the sigmoid in order to compute the binary cross entropy, see ([HTML](ClassificationContinuous2Features.html) / [Jupyter](ClassificationContinuous2Features.ipynb)). LDA is assuming a Gaussian mixture prior (our case) and performs bayesian inference. ``` model02 = linear_model.LogisticRegression(solver='lbfgs') model02.fit(xTrain0, yTrain0) model02.coef_[0], model02.intercept_[0] model03 = discriminant_analysis.LinearDiscriminantAnalysis(solver='svd') model03.fit(xTrain0, yTrain0) model03.coef_[0], model03.intercept_[0] ``` We observe that the coefficients of the three models are very different in amplitude but globally draw a separator line with slope $-\frac \pi4$ in the 2D plan ``` fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain0, yTrain0, w=model01.coef_[0], intercept=model01.intercept_[0], separatorLabel='Max Margin SVM', label='Training', ax=ax) xx = np.array([-1., 1.]) plotLine(ax, xx, w=model02.coef_[0], x0=model02.intercept_[0], label='Logistic', color='g') plotLine(ax, xx, w=model03.coef_[0], x0=model03.intercept_[0], label='LDA', color='c') ax.legend(); ``` # 2. Soft Margin Linear SVM for non separable classes The example above has little interest as the separation is trivial. Using the same SVM implementation on a non separable case would not be possible, the solver would fail. Here comes the soft margin: some $x_i$ are allowed to lie in between the two margin bars. The __Soft margin linear SVM__ is adding a regularization parameter in maximizing $M$: $$ \underset{w, b}{\max} M ( 1 - \xi_i) $$ __Subject to $\forall i = 1..n$:__ - $y_i(x_i^T w + b) \ge M$ - $\Vert w \Vert = 1$ - $\xi_i \ge 0$ Equivalently : $$ \min_{w, b} \frac 12 \Vert w \Vert^2 + C \sum_{i=1}^n \xi_i$$ __Subject to $\forall i = 1..n$:__ - $\xi_i \ge 0$ - $y_i(x_i^T w + b) \ge 1 - \xi_i$ The corresponding Lagrange primal problem is : $$\mathcal{L}_p(w, b, \alpha, \mu) = \frac 12 \Vert w \Vert^2 - \sum_{i=0}^n \alpha_i (y_i(x_i^T w + b) - (1 - \xi_i) - \sum_{i=0}^n \mu_i \xi_i $$ __Subject to $\forall i\in 1..n$:__ - $\alpha_i \ge 0$ - $\mu_i \ge 0$ - $\xi_i \ge 0$ This shall be minimized on $w$, $b$ and $\xi_i$, using the corresponding partial derivates equal to 0, we get : $$\begin{align} \sum_{i=0}^n \alpha_i y_i x_i &= w \\ \sum_{i=0}^n \alpha_i y_i &= 0 \\ \alpha_i &= C - \mu_i \end{align}$$ From $\mathcal{L}_p$, we get the (Wolfe) dual : $$\begin{align} \mathcal{L}_d (\alpha) &= \sum_{i=0}^n \alpha_i - \frac 12 \sum_{i=0}^n \sum_{k=0}^n \alpha_i \alpha_k y_i y_k x_i^T x_k \\ &= \sum_{i=0}^n \alpha_i - \frac 12 \sum_{i=0}^n \sum_{k=0}^n \langle \alpha_i y_i x_i, \alpha_k y_k x_k \rangle \\ \end{align}$$ __Subject to $\forall i\in 1..n$:__ - $0 \le \alpha_i \le C$ - $\sum_{i=0}^n \alpha_i y_i = 0$ This problem is very similar to the one of the Maximum margin separator, but with one more constraint on $\alpha$. It is a concave problem that is maximized using a solver. Extra conditions to get strong duality are required (KKT), $\forall i \in 1..n$: - $\alpha_i (y_i(x_i^T w + b) - (1 - \xi_i)) = 0$ - $\mu_i \xi_i = 0$ - $y_i(x_i^T w + b) - (1 - \xi_i) \ge 0$ More detailed explainations are in [1 chap. 12.1, 12.2] ## Data model Let's reuse the same model made of two gaussians, but with larger variance in order to mix the positive and negative points ``` xTrain1, yTrain1 = generateBatchBipolar(N, mu=0.3, sigma=0.3) plotSvm(xTrain1, yTrain1, label='Training') ``` ## Custom implementation Changes to the Maximum margin classifier are identified by "# <---" ``` class LinearSvmClassifier: def __init__(self, C): self.C = C # <--- self.alpha = None self.w = None self.supportVectors = None def fit(self, X, y): N = len(y) # Gram matrix of (X.y) Xy = X * y[:, np.newaxis] GramXy = np.matmul(Xy, Xy.T) # Lagrange dual problem def Ld0(G, alpha): return alpha.sum() - 0.5 * alpha.dot(alpha.dot(G)) # Partial derivate of Ld on alpha def Ld0dAlpha(G, alpha): return np.ones_like(alpha) - alpha.dot(G) # Constraints on alpha of the shape : # - d - C*alpha = 0 # - b - A*alpha >= 0 A = np.vstack((-np.eye(N), np.eye(N))) # <--- b = np.hstack((np.zeros(N), self.C * np.ones(N))) # <--- constraints = ({'type': 'eq', 'fun': lambda a: np.dot(a, y), 'jac': lambda a: y}, {'type': 'ineq', 'fun': lambda a: b - np.dot(A, a), 'jac': lambda a: -A}) # Maximize by minimizing the opposite optRes = optimize.minimize(fun=lambda a: -Ld0(GramXy, a), x0=np.ones(N), method='SLSQP', jac=lambda a: -Ld0dAlpha(GramXy, a), constraints=constraints) self.alpha = optRes.x self.w = np.sum((self.alpha[:, np.newaxis] * Xy), axis=0) epsilon = 1e-6 self.supportVectors = X[self.alpha > epsilon] # Support vectors is at a distance <= 1 to the separation plan # => use min support vector to compute the intercept, assume label is in {-1, 1} signedDist = np.matmul(self.supportVectors, self.w) minDistArg = np.argmin(signedDist) supportLabels = y[self.alpha > epsilon] self.intercept = supportLabels[minDistArg] - signedDist[minDistArg] def predict(self, X): """ Predict y value in {-1, 1} """ assert(self.w is not None) assert(self.w.shape[0] == X.shape[1]) return 2 * (np.matmul(X, self.w) > 0) - 1 model10 = LinearSvmClassifier(C=1) model10.fit(xTrain1, yTrain1) model10.w, model10.intercept fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain1, yTrain1, model10.supportVectors, model10.w, model10.intercept, label='Training', ax=ax) ``` ### Linear SVM using Scikit Learn ``` model11 = svm.SVC(kernel='linear', gamma='auto', C = 1) model11.fit(xTrain1, yTrain1) model11.coef_[0], model11.intercept_[0] fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain1, yTrain1, model11.support_vectors_, model11.coef_[0], model11.intercept_[0], label='Training', ax=ax) ``` With the soft margin, the support vectors are all the vectors on the boundary or within the margin slab. The custom and SKLearn implementations are matching ! ### Comparison of the soft margin classifier to the Logistic regression and Linear Discriminant Analysis (LDA) ``` model12 = linear_model.LogisticRegression(solver='lbfgs') model12.fit(xTrain1, yTrain1) model12.coef_[0], model12.intercept_[0] model13 = discriminant_analysis.LinearDiscriminantAnalysis(solver='svd') model13.fit(xTrain1, yTrain1) model13.coef_[0], model13.intercept_[0] ``` As shown below, the three models separator hyperplans are very similar, negative slope. ``` fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain1, yTrain1, w=model11.coef_[0], intercept=model11.intercept_[0], label='Training', separatorLabel='Soft Margin SVM', ax=ax) xx = np.array([-1., 1.]) plotLine(ax, xx, w=model12.coef_[0], x0=model12.intercept_[0], label='Logistic reg', color='orange') plotLine(ax, xx, w=model13.coef_[0], x0=model13.intercept_[0], label='LDA', color='c') ax.legend(); ``` ### Validation with test data ``` xTest1, yTest1 = generateBatchBipolar(2*N, mu=0.3, sigma=0.3) ``` #### Helpers for binary classification performance ``` def plotHeatMap(X, classes, title=None, fmt='.2g', ax=None, xlabel=None, ylabel=None): """ Fix heatmap plot from Seaborn with pyplot 3.1.0, 3.1.1 https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot """ ax = sns.heatmap(X, xticklabels=classes, yticklabels=classes, annot=True, \ fmt=fmt, cmap=plt.cm.Blues, ax=ax) #notation: "annot" not "annote" bottom, top = ax.get_ylim() ax.set_ylim(bottom + 0.5, top - 0.5) if title: ax.set_title(title) if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) def plotConfusionMatrix(yTrue, yEst, classes, title=None, fmt='.2g', ax=None): plotHeatMap(metrics.confusion_matrix(yTrue, yEst), classes, title, fmt, ax, xlabel='Estimations', \ ylabel='True values'); ``` ### Confusion matrices ``` fig, axes = plt.subplots(1, 3, figsize=(16, 3)) for model, ax, title in zip([model10, model12, model13], axes, ['Custom linear SVM', 'Logistic reg', 'LDA']): yEst = model.predict(xTest1) plotConfusionMatrix(yTest1, yEst, colors, title, ax=ax) ``` There is no clear winner, all models are performing equally well. ``` fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTest1, yTest1, w=model10.w, intercept=model10.intercept, separatorLabel='Cust. linear SVM', ax=ax) xx = np.array([-1., 1.]) plotLine(ax, xx, w=model12.coef_[0], x0=model12.intercept_[0], label='Logistic reg', color='orange') plotLine(ax, xx, w=model13.coef_[0], x0=model13.intercept_[0], label='LDA', color='c') ax.legend(); ``` # 3. The "kernel trick" for non linearly separable classes Let's use a very famous dataset showing the main limitation of the Logistic regression and LDA : the XOR. ``` def generateBatchXor(n, mu=0.5, sigma=0.5): """ Four gaussian clouds in a Xor fashion """ X = np.random.normal(mu, sigma, (n, 2)) yB0 = np.random.uniform(0, 1, n) > 0.5 yB1 = np.random.uniform(0, 1, n) > 0.5 # y is in {-1, 1} y0 = 2. * yB0 - 1 y1 = 2. * yB1 - 1 X[:,0] *= y0 X[:,1] *= y1 X -= X.mean(axis=0) return X, y0*y1 xTrain3, yTrain3 = generateBatchXor(2*N, sigma=0.25) plotSvm(xTrain3, yTrain3) xTest3, yTest3 = generateBatchXor(2*N, sigma=0.25) ``` ## Logistic regression and LDA on XOR problem ``` model32 = linear_model.LogisticRegression(solver='lbfgs') model32.fit(xTrain3, yTrain3) model32.coef_[0], model32.intercept_[0] model33 = discriminant_analysis.LinearDiscriminantAnalysis(solver='svd') model33.fit(xTrain3, yTrain3) model33.coef_[0], model33.intercept_[0] ``` The linear separators are sometimes mitigating the issue by isolating a single class within a corner. Or they are simply fully failing (separator is of limit). ``` fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain3, yTrain3, w=model32.coef_[0], intercept=model32.intercept_[0], label='Training', separatorLabel='Logistic reg', ax=ax) xx = np.array([-1., 1.]) plotLine(ax, xx, w=model33.coef_[0], x0=model33.intercept_[0], label='LDA', color='c') ax.legend(); ``` ## Introducing the Kernel trick When using linear separators like the regression, the traditional way to deal with non linear functions is to expand the feature space using powers and products of the initial features. This is also necessary in case of multiclass problems as shown in [1 chap. 4.2]. There are limits to this trick. For example, the XOR problem is not handled proprely. The SVM has used a new method known as the "Kernel trick". Let's apply a transformation to $x$ using function $h(x)$. The Lagrange (Wolfe) dual problem becomes : $$\begin{align} \mathcal{L}_d (\alpha) &= \sum_{i=0}^n \alpha_i - \frac 12 \sum_{i=0}^n \sum_{k=0}^n \alpha_i \alpha_k y_i y_k h(x_i)^T h(x_k) \\ &= \sum_{i=0}^n \alpha_i - \frac 12 \sum_{i=0}^n \sum_{k=0}^n \alpha_i \alpha_k \langle y_i h(x_i), y_k h(x_k) \rangle \\ \end{align}$$ __Subject to $\forall i\in 1..n$:__ - $0 \le \alpha_i \le C$ - $\sum_{i=0}^n \alpha_i y_i = 0$ Since $ w = \sum_{i=0}^n \alpha_i y_i h(x_i)$, the prediction function is now : $$ f(x) = sign(w^T h(x) + b) = sign \left(\sum_{i=0}^n \alpha_i y_i \langle h(x_i), h(x) \rangle \right) $$ This prediction needs to be computed for $\alpha_i > 0$, that are support vectors. Both the fit and prediction are based on the inner product $K(x, x') = \langle h(x), h(x') \rangle$, also known as the kernel function. This function shall be symmetric, semi-definite. Popular kernel is the Gaussian Radial Basis Function (RBF) : $K(x, x') = exp(- \gamma \Vert x - x' \Vert^2 )$ ### Custom implementation of the SVM with G-RBF kernel Modifications made on the Linear SVM implementation are enclosed in blocks starting with _"# --->"_ and ending with _"# <---"_ ``` class KernelSvmClassifier: def __init__(self, C, kernel): self.C = C self.kernel = kernel # <--- self.alpha = None self.supportVectors = None def fit(self, X, y): N = len(y) # ---> # Gram matrix of h(x) y hXX = np.apply_along_axis(lambda x1 : np.apply_along_axis(lambda x2: self.kernel(x1, x2), 1, X), 1, X) yp = y.reshape(-1, 1) GramHXy = hXX * np.matmul(yp, yp.T) # <--- # Lagrange dual problem def Ld0(G, alpha): return alpha.sum() - 0.5 * alpha.dot(alpha.dot(G)) # Partial derivate of Ld on alpha def Ld0dAlpha(G, alpha): return np.ones_like(alpha) - alpha.dot(G) # Constraints on alpha of the shape : # - d - C*alpha = 0 # - b - A*alpha >= 0 A = np.vstack((-np.eye(N), np.eye(N))) # <--- b = np.hstack((np.zeros(N), self.C * np.ones(N))) # <--- constraints = ({'type': 'eq', 'fun': lambda a: np.dot(a, y), 'jac': lambda a: y}, {'type': 'ineq', 'fun': lambda a: b - np.dot(A, a), 'jac': lambda a: -A}) # Maximize by minimizing the opposite optRes = optimize.minimize(fun=lambda a: -Ld0(GramHXy, a), x0=np.ones(N), method='SLSQP', jac=lambda a: -Ld0dAlpha(GramHXy, a), constraints=constraints) self.alpha = optRes.x # ---> epsilon = 1e-8 supportIndices = self.alpha > epsilon self.supportVectors = X[supportIndices] self.supportAlphaY = y[supportIndices] * self.alpha[supportIndices] # <--- def predict(self, X): """ Predict y values in {-1, 1} """ # ---> def predict1(x): x1 = np.apply_along_axis(lambda s: self.kernel(s, x), 1, self.supportVectors) x2 = x1 * self.supportAlphaY return np.sum(x2) d = np.apply_along_axis(predict1, 1, X) return 2 * (d > 0) - 1 # <--- def GRBF(x1, x2): diff = x1 - x2 return np.exp(-np.dot(diff, diff) * len(x1) / 2) model30 = KernelSvmClassifier(C=5, kernel=GRBF) model30.fit(xTrain3, yTrain3) fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain3, yTrain3, support=model30.supportVectors, label='Training', ax=ax) # Estimate and plot decision boundary xx = np.linspace(-1, 1, 50) X0, X1 = np.meshgrid(xx, xx) xy = np.vstack([X0.ravel(), X1.ravel()]).T Y30 = model30.predict(xy).reshape(X0.shape) ax.contour(X0, X1, Y30, colors='k', levels=[-1, 0], alpha=0.3, linestyles=['-.', '-']); ``` ## Scikit Learn SVM with Radial basis kernel ``` model31 = svm.SVC(kernel='rbf', C=10, gamma=1/2, shrinking=False) model31.fit(xTrain3, yTrain3); fig, ax = plt.subplots(1, figsize=(11, 7)) plotSvm(xTrain3, yTrain3, support=model31.support_vectors_, label='Training', ax=ax) # Estimate and plot decision boundary Y31 = model31.predict(xy).reshape(X0.shape) ax.contour(X0, X1, Y31, colors='k', levels=[-1, 0], alpha=0.3, linestyles=['-.', '-']); ``` ### SVM with RBF performance on XOR ``` fig, axes = plt.subplots(1, 2, figsize=(11, 3)) for model, ax, title in zip([model30, model31], axes, ["Custom SVM with RBF", "SKLearn SVM with RBF"]): yEst3 = model.predict(xTest3) plotConfusionMatrix(yTest3, yEst3, colors, title, ax=ax) ``` Both models' predictions are almost matching on the XOR example. ## Conclusion We have shown the power of SVM classifiers for non linearly separable problems. From the end of the 1990's, SVM was the leading machine learning algorithm family for many problems. This situation has changed a little since 2010 as deep learning has shown better performance for some classes of problems. However, SVM remains stronger in many contexts. For example, the amount of training data for SVM is lower than the one required for deep learning. ### Where to go from here - Multiclass classifier using Neural Nets in Keras ([HTML](ClassificationMulti2Features-Keras.html) / [Jupyter](ClassificationMulti2Features-Keras.ipynb)) - Multiclass classifier using Decision Trees ([HTML](ClassificationMulti2Features-Tree.html) / [Jupyter](ClassificationMulti2Features-Tree.ipynb)) - Bivariate continuous function approximation with Linear Regression ([HTML](ClassificationContinuous2Features.html) / [Jupyter](ClassificationContinuous2Features.ipynb)) - Bivariate continuous function approximation with k Nearest Neighbors ([HTML](ClassificationContinuous2Features-KNN.html) / [Jupyter](ClassificationContinuous2Features-KNN.ipynb))
github_jupyter
``` import caffe import numpy as np import matplotlib.pyplot as plt import os from keras.datasets import mnist from caffe.proto import caffe_pb2 import google.protobuf.text_format plt.rcParams['image.cmap'] = 'gray' %matplotlib inline ``` Loading the model ``` model_def = 'example_caffe_mnist_model.prototxt' model_weights = 'mnist.caffemodel' net = caffe.Net(model_def, model_weights, caffe.TEST) ``` A Caffe net offers a layer dict that maps layer names to layer objects. These objects do not provide very much information though, but access to their weights and the type of the layer. ``` net.layer_dict conv_layer = net.layer_dict['conv2d_1'] conv_layer.type, conv_layer.blobs[0].data.shape ``` ### Getting input and output shape. The net provides a `blobs dict`. These blobs contain `data`, i.e. all the intermediary computation results and `diff`, i.e. the gradients. ``` for name, blob in net.blobs.items(): print('{}: \t {}'.format(name, blob.data.shape)) ``` ### Getting the weigths. The net provides access to a `param dict` that contains the weights. The first entry in param corresponds to the weights, the second corresponds to the bias. ``` net.params for name, param in net.params.items(): print('{}:\t {} \t{}'.format(name, param[0].data.shape, param[1].data.shape)) ``` The weights are also accessible through the layer blobs. ``` for layer in net.layers: try: print (layer.type + '\t' + str(layer.blobs[0].data.shape), str(layer.blobs[1].data.shape)) except: continue weights = net.params['conv2d_1'][0].data weights.shape ``` For visualizing the weights the axis still have to be moved around. ``` for i in range(32): plt.imshow(np.moveaxis(weights[i], 0, -1)[..., 0]) plt.show() ``` Layers that have no weights simply keep empty lists as their blob vector. ``` list(net.layer_dict['dropout_1'].blobs) ``` ### Getting the activations and the net input. For getting activations, first data has to be passed through the network. Then the activations can be read out from the blobs. If the activations are defined as in place operations, the net input will not be stored in any blob and can therefore not be recovered. This problem can be circumvented if the network definition is changed so that in place operations are avoided. This can also be done programatically as follows. ``` def remove_inplace(model_def): protonet = caffe_pb2.NetParameter() with open(model_def, 'r') as fp: google.protobuf.text_format.Parse(str(fp.read()), protonet) replaced_tops = {} for layer in protonet.layer: # Check whehter bottoms were renamed. for i in range(len(layer.bottom)): if layer.bottom[i] in replaced_tops.keys(): layer.bottom[i] = replaced_tops[layer.bottom[i]] if layer.bottom == layer.top: for i in range(len(layer.top)): # Retain the mapping from the old to the new name. new_top = layer.top[i] + '_' + layer.name replaced_tops[layer.top[i]] = new_top # Redefine layer.top layer.top[i] = new_top return protonet model_def = 'example_caffe_mnist_model_deploy.prototxt' protonet_no_inplace = remove_inplace(model_def) protonet_no_inplace model_def = 'example_caffe_network_no_inplace_deploy.prototxt' model_weights = 'mnist.caffemodel' net_no_inplace = caffe.Net(model_def, model_weights, caffe.TEST) net_no_inplace.layer_dict net_no_inplace.blobs # Loading and preprocessing data. data = mnist.load_data()[1][0] # Normalize data. data = data / data.max() plt.imshow(data[0, :, :]) seven = data[0, :, :] print(seven.shape) seven = seven[np.newaxis, ...] print(seven.shape) ``` Feeding the input and forwarding it. ``` net_no_inplace.blobs['data'].data[...] = seven output = net_no_inplace.forward() output['prob'][0].argmax() activations = net_no_inplace.blobs['relu_1'].data for i in range(32): plt.imshow(activations[0, i, :, :]) plt.title('Feature map %d' % i) plt.show() net_input = net_no_inplace.blobs['conv2d_1'].data for i in range(32): plt.imshow(net_input[0, i, :, :]) plt.title('Feature map %d' % i) plt.show() ``` ### Getting layer properties From the layer object not more then type information is available. There the original .prototxt has to be parsed to access attributes such as kernel size. ``` model_def = 'example_caffe_mnist_model.prototxt' f = open(model_def, 'r') protonet = caffe_pb2.NetParameter() google.protobuf.text_format.Parse(str(f.read()), protonet) f.close() protonet type(protonet) ``` Parsed messages for the layer can be found in `message.layer` list. ``` for i in range(0, len(protonet.layer)): if protonet.layer[i].type == 'Convolution': print('layer %s has kernel_size %d' % (protonet.layer[i].name, protonet.layer[i].convolution_param.kernel_size[0])) lconv_proto = protonet.layer[i] len(protonet.layer), len(net.layers) ```
github_jupyter
# Transpose convolution: Upsampling In section 10.5.3, we discussed how transpose convolutions are can be used to upsample a lower resolution input into a higher resolution output. This notebook contains fully functional PyTorch code for the same. ``` import matplotlib.pyplot as plt import torch import math ``` First, let's look at how transpose convolution works on a simple input tensor. Then we will look at a real image. For this purpose, we will consider the example described in Figure 10.17. The input is a 2x2 array as follows: $$ x = \begin{bmatrix} 5 & 6 \\ 7 & 8 \\ \end{bmatrix} $$ and the transpose convolution kernel is also a 2x2 array as follows $$ w = \begin{bmatrix} 1 & 2 \\ 3 & 4 \\ \end{bmatrix} $$ Transpose convolution with stride 1 results in a 3x3 output as shown below. # Transpose conv 2D with stride 1 ``` x = torch.tensor([ [5., 6.], [7., 8.] ]) w = torch.tensor([ [1., 2.], [3., 4.] ]) x = x.unsqueeze(0).unsqueeze(0) w = w.unsqueeze(0).unsqueeze(0) transpose_conv2d = torch.nn.ConvTranspose2d(1, 1, kernel_size=2, stride=1, bias=False) # set weights of the TransposeConv2d object with torch.no_grad(): transpose_conv2d.weight = torch.nn.Parameter(w) with torch.no_grad(): y = transpose_conv2d(x) y ``` # Transpose conv 2D with stride 2 In the above example, we did not get a truly upsampled version of the input because we used a kernel stride of 1. Thei increase in resolution from 2 to 3 comes because of padding. Now, let's see how to truly upsample the image - we will run transpose convolution with stride 2. The step by step demonstration of this is shown in Figure 10.18. As you can see below, we obtained a 4z4 output. This is because we used a kernel a stride 2. Using a larger stride with further increase the output resolution ``` x = torch.tensor([ [5., 6.], [7., 8.] ]) w = torch.tensor([ [1., 2.], [3., 4.] ]) x = x.unsqueeze(0).unsqueeze(0) w = w.unsqueeze(0).unsqueeze(0) transpose_conv2d = torch.nn.ConvTranspose2d(1, 1, kernel_size=2, stride=2, bias=False) # set weights of the TransposeConv2d object with torch.no_grad(): transpose_conv2d.weight = torch.nn.Parameter(w) with torch.no_grad(): y = transpose_conv2d(x) y ``` Now, let's take a sample image and see how the input compares to the output post transpose convolution with stride 2. ``` import cv2 x = torch.tensor(cv2.imread("./Figures/dog2.jpg", 0), dtype=torch.float32) w = torch.tensor([ [1., 1.], [1., 1.] ]) x = x.unsqueeze(0).unsqueeze(0) w = w.unsqueeze(0).unsqueeze(0) transpose_conv2d = torch.nn.ConvTranspose2d(1, 1, kernel_size=2, stride=2, bias=False) # set weights of the TransposeConv2d object with torch.no_grad(): transpose_conv2d.weight = torch.nn.Parameter(w) with torch.no_grad(): y = transpose_conv2d(x) y print("Input shape:", x.shape) print("Output shape:", y.shape) ``` As expected, the output is twice the size of the input. The images below should make this clear ``` def display_image_in_actual_size(im_data, title): dpi = 80 height, width = im_data.shape # What size does the figure need to be in inches to fit the image? figsize = width / float(dpi), height / float(dpi) # Create a figure of the right size with one axes that takes up the full figure fig = plt.figure(figsize=figsize) ax = fig.add_axes([0, 0, 1, 1]) # Hide spines, ticks, etc. ax.axis('off') # Display the image. ax.imshow(im_data, cmap='gray') ax.set_title(title) plt.show() display_image_in_actual_size(x.squeeze().squeeze(), "Input image") display_image_in_actual_size(y.squeeze().squeeze(), "Output image") ```
github_jupyter
``` import tensorflow as tf print(tf.__version__) import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras def plot_series(time, series, format="-", start=0, end=None): plt.plot(time[start:end], series[start:end], format) plt.xlabel("Time") plt.ylabel("Value") plt.grid(True) def trend(time, slope=0): return slope * time def seasonal_pattern(season_time): """Just an arbitrary pattern, you can change it if you wish""" return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): """Repeats the same pattern at each period""" season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) def noise(time, noise_level=1, seed=None): rnd = np.random.RandomState(seed) return rnd.randn(len(time)) * noise_level time = np.arange(4 * 365 + 1, dtype="float32") baseline = 10 series = trend(time, 0.1) baseline = 10 amplitude = 40 slope = 0.05 noise_level = 5 # Create the series series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) # Update with noise series += noise(time, noise_level, seed=42) plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() ``` Now that we have the time series, let's split it so we can start forecasting ``` split_time = 1000 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] plt.figure(figsize=(10, 6)) plot_series(time_train, x_train) plt.show() plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plt.show() ``` Naive Forecast ``` naive_forecast = series[split_time - 1:-1] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, naive_forecast) x = [1,2, 3, 4, 5, 6, 7, 8, 9, 10] print(x) split=7 naive=x[split - 1:-1] print(naive) x_val = x[split:] print(x_val) ``` Let's zoom in on the start of the validation period: ``` plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid, start=0, end=150) plot_series(time_valid, naive_forecast, start=1, end=151) ``` You can see that the naive forecast lags 1 step behind the time series. Now let's compute the mean squared error and the mean absolute error between the forecasts and the predictions in the validation period: ``` print(keras.metrics.mean_squared_error(x_valid, naive_forecast).numpy()) print(keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy()) ``` That's our baseline, now let's try a moving average: ``` def moving_average_forecast(series, window_size): """Forecasts the mean of the last few values. If window_size=1, then this is equivalent to naive forecast""" forecast = [] for time in range(len(series) - window_size): forecast.append(series[time:time + window_size].mean()) return np.array(forecast) moving_avg = moving_average_forecast(series, 30)[split_time - 30:] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, moving_avg) print(keras.metrics.mean_squared_error(x_valid, moving_avg).numpy()) print(keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy()) ``` That's worse than naive forecast! The moving average does not anticipate trend or seasonality, so let's try to remove them by using differencing. Since the seasonality period is 365 days, we will subtract the value at time t – 365 from the value at time t. ``` diff_series = (series[365:] - series[:-365]) diff_time = time[365:] plt.figure(figsize=(10, 6)) plot_series(diff_time, diff_series) plt.show() ``` Great, the trend and seasonality seem to be gone, so now we can use the moving average: ``` diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:] plt.figure(figsize=(10, 6)) plot_series(time_valid, diff_series[split_time - 365:]) plot_series(time_valid, diff_moving_avg) plt.show() ``` Now let's bring back the trend and seasonality by adding the past values from t – 365: ``` diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, diff_moving_avg_plus_past) plt.show() print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_past).numpy()) print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy()) ``` Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise: ``` diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-360], 10) + diff_moving_avg plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, diff_moving_avg_plus_smooth_past) plt.show() print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_smooth_past).numpy()) print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy()) ```
github_jupyter
``` !pip install -U -q dvc dvc[gdrive] # !dvc get https://github.com/sparsh-ai/reco-data ml1m/v0/ratings.dat ``` ## Dataset ``` RAW_DATASET_ROOT_FOLDER = '/content/data/bronze' PREP_DATASET_ROOT_FOLDER = '/content/data/silver' FNL_DATASET_ROOT_FOLDER = '/content/data/gold' import pickle import shutil import tempfile import os from datetime import date from pathlib import Path import gzip from abc import * import numpy as np import pandas as pd from tqdm import tqdm tqdm.pandas() class AbstractDataset(metaclass=ABCMeta): def __init__(self, args): self.args = args self.min_rating = args.min_rating self.min_uc = args.min_uc self.min_sc = args.min_sc self.split = args.split assert self.min_uc >= 2, 'Need at least 2 ratings per user for validation and test' @classmethod @abstractmethod def code(cls): pass @classmethod def raw_code(cls): return cls.code() @classmethod def all_raw_file_names(cls): return [] @classmethod @abstractmethod def url(cls): pass @abstractmethod def preprocess(self): pass @abstractmethod def load_ratings_df(self): pass @abstractmethod def maybe_download_raw_dataset(self): pass def load_dataset(self): self.preprocess() dataset_path = self._get_preprocessed_dataset_path() dataset = pickle.load(dataset_path.open('rb')) return dataset def filter_triplets(self, df): print('Filtering triplets') if self.min_sc > 0: item_sizes = df.groupby('sid').size() good_items = item_sizes.index[item_sizes >= self.min_sc] df = df[df['sid'].isin(good_items)] if self.min_uc > 0: user_sizes = df.groupby('uid').size() good_users = user_sizes.index[user_sizes >= self.min_uc] df = df[df['uid'].isin(good_users)] return df def densify_index(self, df): print('Densifying index') umap = {u: i for i, u in enumerate(set(df['uid']), start=1)} smap = {s: i for i, s in enumerate(set(df['sid']), start=1)} df['uid'] = df['uid'].map(umap) df['sid'] = df['sid'].map(smap) return df, umap, smap def split_df(self, df, user_count): if self.args.split == 'leave_one_out': print('Splitting') user_group = df.groupby('uid') user2items = user_group.progress_apply( lambda d: list(d.sort_values(by=['timestamp', 'sid'])['sid'])) train, val, test = {}, {}, {} for i in range(user_count): user = i + 1 items = user2items[user] train[user], val[user], test[user] = items[:-2], items[-2:-1], items[-1:] return train, val, test else: raise NotImplementedError def _get_rawdata_root_path(self): return Path(RAW_DATASET_ROOT_FOLDER) def _get_rawdata_folder_path(self): root = self._get_rawdata_root_path() return root.joinpath(self.raw_code()) def _get_preprocessed_root_path(self): root = Path(PREP_DATASET_ROOT_FOLDER) return root.joinpath(self.raw_code()) def _get_preprocessed_folder_path(self): preprocessed_root = self._get_preprocessed_root_path() folder_name = '{}_min_rating{}-min_uc{}-min_sc{}-split{}' \ .format(self.code(), self.min_rating, self.min_uc, self.min_sc, self.split) return preprocessed_root.joinpath(folder_name) def _get_preprocessed_dataset_path(self): folder = self._get_preprocessed_folder_path() return folder.joinpath('dataset.pkl') class ML1MDataset(AbstractDataset): @classmethod def code(cls): return 'ml-1m' @classmethod def url(cls): return {'path':'ml1m/v0', 'repo':'https://github.com/sparsh-ai/reco-data'} @classmethod def all_raw_file_names(cls): return ['ratings.dat'] def maybe_download_raw_dataset(self): folder_path = self._get_rawdata_folder_path() if not folder_path.is_dir(): folder_path.mkdir(parents=True) if all(folder_path.joinpath(filename).is_file() for filename in self.all_raw_file_names()): print('Raw data already exists. Skip downloading') return print("Raw file doesn't exist. Downloading...") for filename in self.all_raw_file_names(): with open(os.path.join(folder_path,filename), "wb") as f: with dvc.api.open( path=self.url()['path']+'/'+filename, repo=self.url()['repo'], mode='rb') as scan: f.write(scan.read()) print() def preprocess(self): dataset_path = self._get_preprocessed_dataset_path() if dataset_path.is_file(): print('Already preprocessed. Skip preprocessing') return if not dataset_path.parent.is_dir(): dataset_path.parent.mkdir(parents=True) self.maybe_download_raw_dataset() df = self.load_ratings_df() df = self.filter_triplets(df) df, umap, smap = self.densify_index(df) train, val, test = self.split_df(df, len(umap)) dataset = {'train': train, 'val': val, 'test': test, 'umap': umap, 'smap': smap} with dataset_path.open('wb') as f: pickle.dump(dataset, f) def load_ratings_df(self): folder_path = self._get_rawdata_folder_path() file_path = folder_path.joinpath('ratings.dat') df = pd.read_csv(file_path, sep='::', header=None) df.columns = ['uid', 'sid', 'rating', 'timestamp'] return df DATASETS = { ML1MDataset.code(): ML1MDataset, # ML20MDataset.code(): ML20MDataset, # SteamDataset.code(): SteamDataset, # GamesDataset.code(): GamesDataset, # BeautyDataset.code(): BeautyDataset, # BeautyDenseDataset.code(): BeautyDenseDataset, # YooChooseDataset.code(): YooChooseDataset } def dataset_factory(args): dataset = DATASETS[args.dataset_code] return dataset(args) ``` ## Negative Sampling ``` from abc import * from pathlib import Path import pickle class AbstractNegativeSampler(metaclass=ABCMeta): def __init__(self, train, val, test, user_count, item_count, sample_size, seed, flag, save_folder): self.train = train self.val = val self.test = test self.user_count = user_count self.item_count = item_count self.sample_size = sample_size self.seed = seed self.flag = flag self.save_folder = save_folder @classmethod @abstractmethod def code(cls): pass @abstractmethod def generate_negative_samples(self): pass def get_negative_samples(self): savefile_path = self._get_save_path() if savefile_path.is_file(): print('Negatives samples exist. Loading.') seen_samples, negative_samples = pickle.load(savefile_path.open('rb')) return seen_samples, negative_samples print("Negative samples don't exist. Generating.") seen_samples, negative_samples = self.generate_negative_samples() with savefile_path.open('wb') as f: pickle.dump([seen_samples, negative_samples], f) return seen_samples, negative_samples def _get_save_path(self): folder = Path(self.save_folder) filename = '{}-sample_size{}-seed{}-{}.pkl'.format( self.code(), self.sample_size, self.seed, self.flag) return folder.joinpath(filename) from tqdm import trange import numpy as np class RandomNegativeSampler(AbstractNegativeSampler): @classmethod def code(cls): return 'random' def generate_negative_samples(self): assert self.seed is not None, 'Specify seed for random sampling' np.random.seed(self.seed) num_samples = 2 * self.user_count * self.sample_size all_samples = np.random.choice(self.item_count, num_samples) + 1 seen_samples = {} negative_samples = {} print('Sampling negative items randomly...') j = 0 for i in trange(self.user_count): user = i + 1 seen = set(self.train[user]) seen.update(self.val[user]) seen.update(self.test[user]) seen_samples[user] = seen samples = [] while len(samples) < self.sample_size: item = all_samples[j % num_samples] j += 1 if item in seen or item in samples: continue samples.append(item) negative_samples[user] = samples return seen_samples, negative_samples from tqdm import trange from collections import Counter import numpy as np class PopularNegativeSampler(AbstractNegativeSampler): @classmethod def code(cls): return 'popular' def generate_negative_samples(self): assert self.seed is not None, 'Specify seed for random sampling' np.random.seed(self.seed) popularity = self.items_by_popularity() items = list(popularity.keys()) total = 0 for i in range(len(items)): total += popularity[items[i]] for i in range(len(items)): popularity[items[i]] /= total probs = list(popularity.values()) num_samples = 2 * self.user_count * self.sample_size all_samples = np.random.choice(items, num_samples, p=probs) seen_samples = {} negative_samples = {} print('Sampling negative items by popularity...') j = 0 for i in trange(self.user_count): user = i + 1 seen = set(self.train[user]) seen.update(self.val[user]) seen.update(self.test[user]) seen_samples[user] = seen samples = [] while len(samples) < self.sample_size: item = all_samples[j % num_samples] j += 1 if item in seen or item in samples: continue samples.append(item) negative_samples[user] = samples return seen_samples, negative_samples def items_by_popularity(self): popularity = Counter() self.users = sorted(self.train.keys()) for user in self.users: popularity.update(self.train[user]) popularity.update(self.val[user]) popularity.update(self.test[user]) popularity = dict(popularity) popularity = {k: v for k, v in sorted(popularity.items(), key=lambda item: item[1], reverse=True)} return popularity NEGATIVE_SAMPLERS = { PopularNegativeSampler.code(): PopularNegativeSampler, RandomNegativeSampler.code(): RandomNegativeSampler, } def negative_sampler_factory(code, train, val, test, user_count, item_count, sample_size, seed, flag, save_folder): negative_sampler = NEGATIVE_SAMPLERS[code] return negative_sampler(train, val, test, user_count, item_count, sample_size, seed, flag, save_folder) ``` ## Dataloader ``` from abc import * import random class AbstractDataloader(metaclass=ABCMeta): def __init__(self, args, dataset): self.args = args self.rng = random.Random() self.save_folder = dataset._get_preprocessed_folder_path() dataset = dataset.load_dataset() self.train = dataset['train'] self.val = dataset['val'] self.test = dataset['test'] self.umap = dataset['umap'] self.smap = dataset['smap'] self.user_count = len(self.umap) self.item_count = len(self.smap) @classmethod @abstractmethod def code(cls): pass @abstractmethod def get_pytorch_dataloaders(self): pass import torch import random import torch.utils.data as data_utils class RNNDataloader(): def __init__(self, args, dataset): self.args = args self.rng = random.Random() self.save_folder = dataset._get_preprocessed_folder_path() dataset = dataset.load_dataset() self.train = dataset['train'] self.val = dataset['val'] self.test = dataset['test'] self.umap = dataset['umap'] self.smap = dataset['smap'] self.user_count = len(self.umap) self.item_count = len(self.smap) args.num_items = len(self.smap) self.max_len = args.bert_max_len val_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code, self.train, self.val, self.test, self.user_count, self.item_count, args.test_negative_sample_size, args.test_negative_sampling_seed, 'val', self.save_folder) test_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code, self.train, self.val, self.test, self.user_count, self.item_count, args.test_negative_sample_size, args.test_negative_sampling_seed, 'test', self.save_folder) self.seen_samples, self.val_negative_samples = val_negative_sampler.get_negative_samples() self.seen_samples, self.test_negative_samples = test_negative_sampler.get_negative_samples() @classmethod def code(cls): return 'rnn' def get_pytorch_dataloaders(self): train_loader = self._get_train_loader() val_loader = self._get_val_loader() test_loader = self._get_test_loader() return train_loader, val_loader, test_loader def _get_train_loader(self): dataset = self._get_train_dataset() dataloader = data_utils.DataLoader(dataset, batch_size=self.args.train_batch_size, shuffle=True, pin_memory=True) return dataloader def _get_train_dataset(self): dataset = RNNTrainDataset( self.train, self.max_len) return dataset def _get_val_loader(self): return self._get_eval_loader(mode='val') def _get_test_loader(self): return self._get_eval_loader(mode='test') def _get_eval_loader(self, mode): batch_size = self.args.val_batch_size if mode == 'val' else self.args.test_batch_size dataset = self._get_eval_dataset(mode) dataloader = data_utils.DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True) return dataloader def _get_eval_dataset(self, mode): if mode == 'val': dataset = RNNValidDataset(self.train, self.val, self.max_len, self.val_negative_samples) elif mode == 'test': dataset = RNNTestDataset(self.train, self.val, self.test, self.max_len, self.test_negative_samples) return dataset class RNNTrainDataset(data_utils.Dataset): def __init__(self, u2seq, max_len): # self.u2seq = u2seq # self.users = sorted(self.u2seq.keys()) self.max_len = max_len self.all_seqs = [] self.all_labels = [] for u in sorted(u2seq.keys()): seq = u2seq[u] for i in range(1, len(seq)): self.all_seqs += [seq[:-i]] self.all_labels += [seq[-i]] assert len(self.all_seqs) == len(self.all_labels) def __len__(self): return len(self.all_seqs) def __getitem__(self, index): tokens = self.all_seqs[index][-self.max_len:] length = len(tokens) tokens = tokens + [0] * (self.max_len - length) return torch.LongTensor(tokens), torch.LongTensor([length]), torch.LongTensor([self.all_labels[index]]) class RNNValidDataset(data_utils.Dataset): def __init__(self, u2seq, u2answer, max_len, negative_samples, valid_users=None): self.u2seq = u2seq # train if not valid_users: self.users = sorted(self.u2seq.keys()) else: self.users = valid_users self.users = sorted(self.u2seq.keys()) self.u2answer = u2answer self.max_len = max_len self.negative_samples = negative_samples def __len__(self): return len(self.users) def __getitem__(self, index): user = self.users[index] tokens = self.u2seq[user][-self.max_len:] length = len(tokens) tokens = tokens + [0] * (self.max_len - length) answer = self.u2answer[user] negs = self.negative_samples[user] candidates = answer + negs labels = [1] * len(answer) + [0] * len(negs) return torch.LongTensor(tokens), torch.LongTensor([length]), torch.LongTensor(candidates), torch.LongTensor(labels) class RNNTestDataset(data_utils.Dataset): def __init__(self, u2seq, u2val, u2answer, max_len, negative_samples, test_users=None): self.u2seq = u2seq # train self.u2val = u2val # val if not test_users: self.users = sorted(self.u2seq.keys()) else: self.users = test_users self.users = sorted(self.u2seq.keys()) self.u2answer = u2answer # test self.max_len = max_len self.negative_samples = negative_samples def __len__(self): return len(self.users) def __getitem__(self, index): user = self.users[index] tokens = (self.u2seq[user] + self.u2val[user])[-self.max_len:] # append validation item after train seq length = len(tokens) tokens = tokens + [0] * (self.max_len - length) answer = self.u2answer[user] negs = self.negative_samples[user] candidates = answer + negs labels = [1] * len(answer) + [0] * len(negs) return torch.LongTensor(tokens), torch.LongTensor([length]), torch.LongTensor(candidates), torch.LongTensor(labels) import torch import random import torch.utils.data as data_utils class BERTDataloader(): def __init__(self, args, dataset): self.args = args self.rng = random.Random() self.save_folder = dataset._get_preprocessed_folder_path() dataset = dataset.load_dataset() self.train = dataset['train'] self.val = dataset['val'] self.test = dataset['test'] self.umap = dataset['umap'] self.smap = dataset['smap'] self.user_count = len(self.umap) self.item_count = len(self.smap) args.num_items = self.item_count self.max_len = args.bert_max_len self.mask_prob = args.bert_mask_prob self.max_predictions = args.bert_max_predictions self.sliding_size = args.sliding_window_size self.CLOZE_MASK_TOKEN = self.item_count + 1 val_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code, self.train, self.val, self.test, self.user_count, self.item_count, args.test_negative_sample_size, args.test_negative_sampling_seed, 'val', self.save_folder) test_negative_sampler = negative_sampler_factory(args.test_negative_sampler_code, self.train, self.val, self.test, self.user_count, self.item_count, args.test_negative_sample_size, args.test_negative_sampling_seed, 'test', self.save_folder) self.seen_samples, self.val_negative_samples = val_negative_sampler.get_negative_samples() self.seen_samples, self.test_negative_samples = test_negative_sampler.get_negative_samples() @classmethod def code(cls): return 'bert' def get_pytorch_dataloaders(self): train_loader = self._get_train_loader() val_loader = self._get_val_loader() test_loader = self._get_test_loader() return train_loader, val_loader, test_loader def _get_train_loader(self): dataset = self._get_train_dataset() dataloader = data_utils.DataLoader(dataset, batch_size=self.args.train_batch_size, shuffle=True, pin_memory=True) return dataloader def _get_train_dataset(self): dataset = BERTTrainDataset( self.train, self.max_len, self.mask_prob, self.max_predictions, self.sliding_size, self.CLOZE_MASK_TOKEN, self.item_count, self.rng) return dataset def _get_val_loader(self): return self._get_eval_loader(mode='val') def _get_test_loader(self): return self._get_eval_loader(mode='test') def _get_eval_loader(self, mode): batch_size = self.args.val_batch_size if mode == 'val' else self.args.test_batch_size dataset = self._get_eval_dataset(mode) dataloader = data_utils.DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True) return dataloader def _get_eval_dataset(self, mode): if mode == 'val': dataset = BERTValidDataset(self.train, self.val, self.max_len, self.CLOZE_MASK_TOKEN, self.val_negative_samples) elif mode == 'test': dataset = BERTTestDataset(self.train, self.val, self.test, self.max_len, self.CLOZE_MASK_TOKEN, self.test_negative_samples) return dataset class BERTTrainDataset(data_utils.Dataset): def __init__(self, u2seq, max_len, mask_prob, max_predictions, sliding_size, mask_token, num_items, rng): # self.u2seq = u2seq # self.users = sorted(self.u2seq.keys()) self.max_len = max_len self.mask_prob = mask_prob self.max_predictions = max_predictions self.sliding_step = int(sliding_size * max_len) self.mask_token = mask_token self.num_items = num_items self.rng = rng assert self.sliding_step > 0 self.all_seqs = [] for u in sorted(u2seq.keys()): seq = u2seq[u] if len(seq) < self.max_len + self.sliding_step: self.all_seqs.append(seq) else: start_idx = range(len(seq) - max_len, -1, -self.sliding_step) self.all_seqs = self.all_seqs + [seq[i:i + max_len] for i in start_idx] def __len__(self): return len(self.all_seqs) # return len(self.users) def __getitem__(self, index): # user = self.users[index] # seq = self._getseq(user) seq = self.all_seqs[index] tokens = [] labels = [] covered_items = set() for i in range(len(seq)): s = seq[i] if (len(covered_items) >= self.max_predictions) or (s in covered_items): tokens.append(s) labels.append(0) continue temp_mask_prob = self.mask_prob if i == (len(seq) - 1): temp_mask_prob += 0.1 * (1 - self.mask_prob) prob = self.rng.random() if prob < temp_mask_prob: covered_items.add(s) prob /= temp_mask_prob if prob < 0.8: tokens.append(self.mask_token) elif prob < 0.9: tokens.append(self.rng.randint(1, self.num_items)) else: tokens.append(s) labels.append(s) else: tokens.append(s) labels.append(0) tokens = tokens[-self.max_len:] labels = labels[-self.max_len:] mask_len = self.max_len - len(tokens) tokens = [0] * mask_len + tokens labels = [0] * mask_len + labels return torch.LongTensor(tokens), torch.LongTensor(labels) def _getseq(self, user): return self.u2seq[user] class BERTValidDataset(data_utils.Dataset): def __init__(self, u2seq, u2answer, max_len, mask_token, negative_samples, valid_users=None): self.u2seq = u2seq # train if not valid_users: self.users = sorted(self.u2seq.keys()) else: self.users = valid_users self.u2answer = u2answer self.max_len = max_len self.mask_token = mask_token self.negative_samples = negative_samples def __len__(self): return len(self.users) def __getitem__(self, index): user = self.users[index] seq = self.u2seq[user] answer = self.u2answer[user] negs = self.negative_samples[user] candidates = answer + negs labels = [1] * len(answer) + [0] * len(negs) seq = seq + [self.mask_token] seq = seq[-self.max_len:] padding_len = self.max_len - len(seq) seq = [0] * padding_len + seq return torch.LongTensor(seq), torch.LongTensor(candidates), torch.LongTensor(labels) class BERTTestDataset(data_utils.Dataset): def __init__(self, u2seq, u2val, u2answer, max_len, mask_token, negative_samples, test_users=None): self.u2seq = u2seq # train self.u2val = u2val # val if not test_users: self.users = sorted(self.u2seq.keys()) else: self.users = test_users self.users = sorted(self.u2seq.keys()) self.u2answer = u2answer # test self.max_len = max_len self.mask_token = mask_token self.negative_samples = negative_samples def __len__(self): return len(self.users) def __getitem__(self, index): user = self.users[index] seq = self.u2seq[user] + self.u2val[user] # append validation item after train seq answer = self.u2answer[user] negs = self.negative_samples[user] candidates = answer + negs labels = [1] * len(answer) + [0] * len(negs) seq = seq + [self.mask_token] seq = seq[-self.max_len:] padding_len = self.max_len - len(seq) seq = [0] * padding_len + seq return torch.LongTensor(seq), torch.LongTensor(candidates), torch.LongTensor(labels) def dataloader_factory(args): dataset = dataset_factory(args) if args.model_code == 'bert': dataloader = BERTDataloader(args, dataset) elif args.model_code == 'sas': dataloader = SASDataloader(args, dataset) else: dataloader = RNNDataloader(args, dataset) train, val, test = dataloader.get_pytorch_dataloaders() return train, val, test ``` ## Args ``` import numpy as np import random import torch import argparse def set_template(args): args.min_uc = 5 args.min_sc = 5 args.split = 'leave_one_out' dataset_code = {'1': 'ml-1m', '20': 'ml-20m', 'b': 'beauty', 'bd': 'beauty_dense' , 'g': 'games', 's': 'steam', 'y': 'yoochoose'} args.dataset_code = dataset_code[input('Input 1 / 20 for movielens, b for beauty, bd for dense beauty, g for games, s for steam and y for yoochoose: ')] if args.dataset_code == 'ml-1m': args.sliding_window_size = 0.5 args.bert_hidden_units = 64 args.bert_dropout = 0.1 args.bert_attn_dropout = 0.1 args.bert_max_len = 200 args.bert_mask_prob = 0.2 args.bert_max_predictions = 40 elif args.dataset_code == 'ml-20m': args.sliding_window_size = 0.5 args.bert_hidden_units = 64 args.bert_dropout = 0.1 args.bert_attn_dropout = 0.1 args.bert_max_len = 200 args.bert_mask_prob = 0.2 args.bert_max_predictions = 20 elif args.dataset_code in ['beauty', 'beauty_dense']: args.sliding_window_size = 0.5 args.bert_hidden_units = 64 args.bert_dropout = 0.5 args.bert_attn_dropout = 0.2 args.bert_max_len = 50 args.bert_mask_prob = 0.6 args.bert_max_predictions = 30 elif args.dataset_code == 'games': args.sliding_window_size = 0.5 args.bert_hidden_units = 64 args.bert_dropout = 0.5 args.bert_attn_dropout = 0.5 args.bert_max_len = 50 args.bert_mask_prob = 0.5 args.bert_max_predictions = 25 elif args.dataset_code == 'steam': args.sliding_window_size = 0.5 args.bert_hidden_units = 64 args.bert_dropout = 0.2 args.bert_attn_dropout = 0.2 args.bert_max_len = 50 args.bert_mask_prob = 0.4 args.bert_max_predictions = 20 elif args.dataset_code == 'yoochoose': args.sliding_window_size = 0.5 args.bert_hidden_units = 256 args.bert_dropout = 0.2 args.bert_attn_dropout = 0.2 args.bert_max_len = 50 args.bert_mask_prob = 0.4 args.bert_max_predictions = 20 batch = 128 args.train_batch_size = batch args.val_batch_size = batch args.test_batch_size = batch args.train_negative_sampler_code = 'random' args.train_negative_sample_size = 0 args.train_negative_sampling_seed = 0 args.test_negative_sampler_code = 'random' args.test_negative_sample_size = 100 args.test_negative_sampling_seed = 98765 model_codes = {'b': 'bert', 's':'sas', 'n':'narm'} args.model_code = model_codes[input('Input model code, b for BERT, s for SASRec and n for NARM: ')] if torch.cuda.is_available(): args.device = 'cuda:' + input('Input GPU ID: ') else: args.device = 'cpu' args.optimizer = 'AdamW' args.lr = 0.001 args.weight_decay = 0.01 args.enable_lr_schedule = True args.decay_step = 10000 args.gamma = 1. args.enable_lr_warmup = False args.warmup_steps = 100 args.num_epochs = 1000 args.metric_ks = [1, 5, 10] args.best_metric = 'NDCG@10' args.model_init_seed = 98765 args.bert_num_blocks = 2 args.bert_num_heads = 2 args.bert_head_size = None parser = argparse.ArgumentParser() ################ # Dataset ################ parser.add_argument('--dataset_code', type=str, default='ml-1m', choices=DATASETS.keys()) parser.add_argument('--min_rating', type=int, default=0) parser.add_argument('--min_uc', type=int, default=5) parser.add_argument('--min_sc', type=int, default=5) parser.add_argument('--split', type=str, default='leave_one_out') parser.add_argument('--dataset_split_seed', type=int, default=0) ################ # Dataloader ################ parser.add_argument('--dataloader_random_seed', type=float, default=0) parser.add_argument('--train_batch_size', type=int, default=64) parser.add_argument('--val_batch_size', type=int, default=64) parser.add_argument('--test_batch_size', type=int, default=64) parser.add_argument('--sliding_window_size', type=float, default=0.5) ################ # NegativeSampler ################ parser.add_argument('--train_negative_sampler_code', type=str, default='random', choices=['popular', 'random']) parser.add_argument('--train_negative_sample_size', type=int, default=0) parser.add_argument('--train_negative_sampling_seed', type=int, default=0) parser.add_argument('--test_negative_sampler_code', type=str, default='random', choices=['popular', 'random']) parser.add_argument('--test_negative_sample_size', type=int, default=100) parser.add_argument('--test_negative_sampling_seed', type=int, default=0) ################ # Trainer ################ # device # parser.add_argument('--device', type=str, default='cpu', choices=['cpu', 'cuda']) parser.add_argument('--num_gpu', type=int, default=1) # optimizer & lr# parser.add_argument('--optimizer', type=str, default='AdamW', choices=['AdamW', 'Adam', 'SGD']) parser.add_argument('--weight_decay', type=float, default=0) parser.add_argument('--adam_epsilon', type=float, default=1e-9) parser.add_argument('--momentum', type=float, default=None) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--enable_lr_schedule', type=bool, default=True) parser.add_argument('--decay_step', type=int, default=100) parser.add_argument('--gamma', type=float, default=1) parser.add_argument('--enable_lr_warmup', type=bool, default=True) parser.add_argument('--warmup_steps', type=int, default=100) # epochs # parser.add_argument('--num_epochs', type=int, default=100) # logger # parser.add_argument('--log_period_as_iter', type=int, default=12800) # evaluation # parser.add_argument('--metric_ks', nargs='+', type=int, default=[1, 5, 10, 20]) parser.add_argument('--best_metric', type=str, default='NDCG@10') ################ # Model ################ parser.add_argument('--model_code', type=str, default='bert', choices=['bert', 'sas', 'narm']) # BERT specs, used for SASRec and NARM as well # parser.add_argument('--bert_max_len', type=int, default=None) parser.add_argument('--bert_hidden_units', type=int, default=64) parser.add_argument('--bert_num_blocks', type=int, default=2) parser.add_argument('--bert_num_heads', type=int, default=2) parser.add_argument('--bert_head_size', type=int, default=32) parser.add_argument('--bert_dropout', type=float, default=0.1) parser.add_argument('--bert_attn_dropout', type=float, default=0.1) parser.add_argument('--bert_mask_prob', type=float, default=0.2) ################ # Distillation & Retraining ################ parser.add_argument('--num_generated_seqs', type=int, default=3000) parser.add_argument('--num_original_seqs', type=int, default=0) parser.add_argument('--num_poisoned_seqs', type=int, default=100) parser.add_argument('--num_alter_items', type=int, default=10) ################ args = parser.parse_args(args={}) print('\n'.join(f'{k}={v}' for k, v in vars(args).items())) set_template(args) print('\n'.join(f'{k}={v}' for k, v in vars(args).items())) def fix_random_seed_as(random_seed): random.seed(random_seed) np.random.seed(random_seed) torch.manual_seed(random_seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False fix_random_seed_as(args.model_init_seed) import dvc.api train_loader, val_loader, test_loader = dataloader_factory(args) ``` ## Model ``` import math import torch import torch.nn as nn import torch.nn.functional as F class TokenEmbedding(nn.Embedding): def __init__(self, vocab_size, embed_size=512): super().__init__(vocab_size, embed_size, padding_idx=0) class PositionalEmbedding(nn.Module): def __init__(self, max_len, d_model): super().__init__() self.d_model = d_model self.pe = nn.Embedding(max_len+1, d_model) def forward(self, x): pose = (x > 0) * (x > 0).sum(dim=-1).unsqueeze(1).repeat(1, x.size(-1)) pose += torch.arange(start=-(x.size(1)-1), end=1, step=1, device=x.device) pose = pose * (x > 0) return self.pe(pose) class GELU(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, d_ff): super().__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.activation = GELU() def forward(self, x): return self.w_2(self.activation(self.w_1(x))) # layer norm class LayerNorm(nn.Module): def __init__(self, features, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(features)) self.bias = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.weight * (x - mean) / (std + self.eps) + self.bias # layer norm and dropout (dropout and then layer norm) class SublayerConnection(nn.Module): def __init__(self, size, dropout): super().__init__() self.layer_norm = LayerNorm(size) self.dropout = nn.Dropout(dropout) def forward(self, x, sublayer): # return x + self.dropout(sublayer(self.norm(x))) # original implementation return self.layer_norm(x + self.dropout(sublayer(x))) # BERT4Rec implementation class Attention(nn.Module): def forward(self, query, key, value, mask=None, dropout=None, sas=False): scores = torch.matmul(query, key.transpose(-2, -1)) \ / math.sqrt(query.size(-1)) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) if sas: direction_mask = torch.ones_like(scores) direction_mask = torch.tril(direction_mask) scores = scores.masked_fill(direction_mask == 0, -1e9) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, head_size=None, dropout=0.1): super().__init__() assert d_model % h == 0 self.h = h self.d_k = d_model // h if head_size is not None: self.head_size = head_size else: self.head_size = d_model // h self.linear_layers = nn.ModuleList( [nn.Linear(d_model, self.h * self.head_size) for _ in range(3)]) self.attention = Attention() self.dropout = nn.Dropout(p=dropout) self.output_linear = nn.Linear(self.h * self.head_size, d_model) def forward(self, query, key, value, mask=None): batch_size = query.size(0) # 1) do all the linear projections in batch from d_model => h x d_k query, key, value = [l(x).view(batch_size, -1, self.h, self.head_size).transpose(1, 2) for l, x in zip(self.linear_layers, (query, key, value))] # 2) apply attention on all the projected vectors in batch. x, attn = self.attention( query, key, value, mask=mask, dropout=self.dropout) # 3) "concat" using a view and apply a final linear. x = x.transpose(1, 2).contiguous().view( batch_size, -1, self.h * self.head_size) return self.output_linear(x) class TransformerBlock(nn.Module): def __init__(self, hidden, attn_heads, head_size, feed_forward_hidden, dropout, attn_dropout=0.1): super().__init__() self.attention = MultiHeadedAttention( h=attn_heads, d_model=hidden, head_size=head_size, dropout=attn_dropout) self.feed_forward = PositionwiseFeedForward( d_model=hidden, d_ff=feed_forward_hidden) self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout) self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout) def forward(self, x, mask): x = self.input_sublayer( x, lambda _x: self.attention.forward(_x, _x, _x, mask=mask)) x = self.output_sublayer(x, self.feed_forward) return x class SASMultiHeadedAttention(nn.Module): def __init__(self, h, d_model, head_size=None, dropout=0.1): super().__init__() assert d_model % h == 0 self.h = h self.d_k = d_model // h if head_size is not None: self.head_size = head_size else: self.head_size = d_model // h self.linear_layers = nn.ModuleList( [nn.Linear(d_model, self.h * self.head_size) for _ in range(3)]) self.attention = Attention() self.dropout = nn.Dropout(p=dropout) self.layer_norm = LayerNorm(d_model) def forward(self, query, key, value, mask=None): batch_size = query.size(0) # 1) do all the linear projections in batch from d_model => h x d_k query_, key_, value_ = [l(x).view(batch_size, -1, self.h, self.head_size).transpose(1, 2) for l, x in zip(self.linear_layers, (query, key, value))] # 2) apply attention on all the projected vectors in batch. x, attn = self.attention( query_, key_, value_, mask=mask, dropout=self.dropout, sas=True) # 3) "concat" using a view and apply a final linear. x = x.transpose(1, 2).contiguous().view( batch_size, -1, self.h * self.head_size) return self.layer_norm(x + query) class SASPositionwiseFeedForward(nn.Module): def __init__(self, d_model, d_ff, dropout=0.1): super().__init__() self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) self.activation = nn.ReLU() self.dropout = nn.Dropout(dropout) self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) self.layer_norm = LayerNorm(d_model) def forward(self, x): x_ = self.dropout(self.activation(self.conv1(x.permute(0, 2, 1)))) return self.layer_norm(self.dropout(self.conv2(x_)).permute(0, 2, 1) + x) class SASTransformerBlock(nn.Module): def __init__(self, hidden, attn_heads, head_size, feed_forward_hidden, dropout, attn_dropout=0.1): super().__init__() self.layer_norm = LayerNorm(hidden) self.attention = SASMultiHeadedAttention( h=attn_heads, d_model=hidden, head_size=head_size, dropout=attn_dropout) self.feed_forward = SASPositionwiseFeedForward( d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout) def forward(self, x, mask): x = self.attention(self.layer_norm(x), x, x, mask) x = self.feed_forward(x) return x from torch import nn as nn import math class BERT(nn.Module): def __init__(self, args): super().__init__() self.args = args self.embedding = BERTEmbedding(self.args) self.model = BERTModel(self.args) self.truncated_normal_init() def truncated_normal_init(self, mean=0, std=0.02, lower=-0.04, upper=0.04): with torch.no_grad(): l = (1. + math.erf(((lower - mean) / std) / math.sqrt(2.))) / 2. u = (1. + math.erf(((upper - mean) / std) / math.sqrt(2.))) / 2. for n, p in self.model.named_parameters(): if not 'layer_norm' in n: p.uniform_(2 * l - 1, 2 * u - 1) p.erfinv_() p.mul_(std * math.sqrt(2.)) p.add_(mean) def forward(self, x): x, mask = self.embedding(x) scores = self.model(x, self.embedding.token.weight, mask) return scores class BERTEmbedding(nn.Module): def __init__(self, args): super().__init__() vocab_size = args.num_items + 2 hidden = args.bert_hidden_units max_len = args.bert_max_len dropout = args.bert_dropout self.token = TokenEmbedding( vocab_size=vocab_size, embed_size=hidden) self.position = PositionalEmbedding( max_len=max_len, d_model=hidden) self.layer_norm = LayerNorm(features=hidden) self.dropout = nn.Dropout(p=dropout) def get_mask(self, x): if len(x.shape) > 2: x = torch.ones(x.shape[:2]).to(x.device) return (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1) def forward(self, x): mask = self.get_mask(x) if len(x.shape) > 2: pos = self.position(torch.ones(x.shape[:2]).to(x.device)) x = torch.matmul(x, self.token.weight) + pos else: x = self.token(x) + self.position(x) return self.dropout(self.layer_norm(x)), mask class BERTModel(nn.Module): def __init__(self, args): super().__init__() hidden = args.bert_hidden_units heads = args.bert_num_heads head_size = args.bert_head_size dropout = args.bert_dropout attn_dropout = args.bert_attn_dropout layers = args.bert_num_blocks self.transformer_blocks = nn.ModuleList([TransformerBlock( hidden, heads, head_size, hidden * 4, dropout, attn_dropout) for _ in range(layers)]) self.linear = nn.Linear(hidden, hidden) self.bias = torch.nn.Parameter(torch.zeros(args.num_items + 2)) self.bias.requires_grad = True self.activation = GELU() def forward(self, x, embedding_weight, mask): for transformer in self.transformer_blocks: x = transformer.forward(x, mask) x = self.activation(self.linear(x)) scores = torch.matmul(x, embedding_weight.permute(1, 0)) + self.bias return scores ``` ## Run ``` if args.model_code == 'bert': model = BERT(args) # elif args.model_code == 'sas': # model = SASRec(args) # elif args.model_code == 'narm': # model = NARM(args) export_root = 'experiments/' + args.model_code + '/' + args.dataset_code resume=False if resume: try: model.load_state_dict(torch.load(os.path.join(export_root, 'models', 'best_acc_model.pth'), map_location='cpu').get(STATE_DICT_KEY)) except FileNotFoundError: print('Failed to load old model, continue training new model...') ``` ## Trainer ``` STATE_DICT_KEY = 'model_state_dict' OPTIMIZER_STATE_DICT_KEY = 'optimizer_state_dict' import os import torch from abc import ABCMeta, abstractmethod def save_state_dict(state_dict, path, filename): torch.save(state_dict, os.path.join(path, filename)) class LoggerService(object): def __init__(self, train_loggers=None, val_loggers=None): self.train_loggers = train_loggers if train_loggers else [] self.val_loggers = val_loggers if val_loggers else [] def complete(self, log_data): for logger in self.train_loggers: logger.complete(**log_data) for logger in self.val_loggers: logger.complete(**log_data) def log_train(self, log_data): for logger in self.train_loggers: logger.log(**log_data) def log_val(self, log_data): for logger in self.val_loggers: logger.log(**log_data) class AbstractBaseLogger(metaclass=ABCMeta): @abstractmethod def log(self, *args, **kwargs): raise NotImplementedError def complete(self, *args, **kwargs): pass class RecentModelLogger(AbstractBaseLogger): def __init__(self, checkpoint_path, filename='checkpoint-recent.pth'): self.checkpoint_path = checkpoint_path if not os.path.exists(self.checkpoint_path): os.mkdir(self.checkpoint_path) self.recent_epoch = None self.filename = filename def log(self, *args, **kwargs): epoch = kwargs['epoch'] if self.recent_epoch != epoch: self.recent_epoch = epoch state_dict = kwargs['state_dict'] state_dict['epoch'] = kwargs['epoch'] save_state_dict(state_dict, self.checkpoint_path, self.filename) def complete(self, *args, **kwargs): save_state_dict(kwargs['state_dict'], self.checkpoint_path, self.filename + '.final') class BestModelLogger(AbstractBaseLogger): def __init__(self, checkpoint_path, metric_key='mean_iou', filename='best_acc_model.pth'): self.checkpoint_path = checkpoint_path if not os.path.exists(self.checkpoint_path): os.mkdir(self.checkpoint_path) self.best_metric = 0. self.metric_key = metric_key self.filename = filename def log(self, *args, **kwargs): current_metric = kwargs[self.metric_key] if self.best_metric < current_metric: print("Update Best {} Model at {}".format( self.metric_key, kwargs['epoch'])) self.best_metric = current_metric save_state_dict(kwargs['state_dict'], self.checkpoint_path, self.filename) class MetricGraphPrinter(AbstractBaseLogger): def __init__(self, writer, key='train_loss', graph_name='Train Loss', group_name='metric'): self.key = key self.graph_label = graph_name self.group_name = group_name self.writer = writer def log(self, *args, **kwargs): if self.key in kwargs: self.writer.add_scalar( self.group_name + '/' + self.graph_label, kwargs[self.key], kwargs['accum_iter']) else: self.writer.add_scalar( self.group_name + '/' + self.graph_label, 0, kwargs['accum_iter']) def complete(self, *args, **kwargs): self.writer.close() import json import os import pprint as pp import random from datetime import date from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.backends.cudnn as cudnn from torch import optim as optim def ndcg(scores, labels, k): scores = scores.cpu() labels = labels.cpu() rank = (-scores).argsort(dim=1) cut = rank[:, :k] hits = labels.gather(1, cut) position = torch.arange(2, 2+k) weights = 1 / torch.log2(position.float()) dcg = (hits.float() * weights).sum(1) idcg = torch.Tensor([weights[:min(int(n), k)].sum() for n in labels.sum(1)]) ndcg = dcg / idcg return ndcg.mean() def recalls_and_ndcgs_for_ks(scores, labels, ks): metrics = {} scores = scores labels = labels answer_count = labels.sum(1) labels_float = labels.float() rank = (-scores).argsort(dim=1) cut = rank for k in sorted(ks, reverse=True): cut = cut[:, :k] hits = labels_float.gather(1, cut) metrics['Recall@%d' % k] = \ (hits.sum(1) / torch.min(torch.Tensor([k]).to( labels.device), labels.sum(1).float())).mean().cpu().item() position = torch.arange(2, 2+k) weights = 1 / torch.log2(position.float()) dcg = (hits * weights.to(hits.device)).sum(1) idcg = torch.Tensor([weights[:min(int(n), k)].sum() for n in answer_count]).to(dcg.device) ndcg = (dcg / idcg).mean() metrics['NDCG@%d' % k] = ndcg.cpu().item() return metrics def em_and_agreement(scores_rank, labels_rank): em = (scores_rank == labels_rank).float().mean() temp = np.hstack((scores_rank.numpy(), labels_rank.numpy())) temp = np.sort(temp, axis=1) agreement = np.mean(np.sum(temp[:, 1:] == temp[:, :-1], axis=1)) return em, agreement def kl_agreements_and_intersctions_for_ks(scores, soft_labels, ks, k_kl=100): metrics = {} scores = scores.cpu() soft_labels = soft_labels.cpu() scores_rank = (-scores).argsort(dim=1) labels_rank = (-soft_labels).argsort(dim=1) top_kl_scores = F.log_softmax(scores.gather(1, labels_rank[:, :k_kl]), dim=-1) top_kl_labels = F.softmax(soft_labels.gather(1, labels_rank[:, :k_kl]), dim=-1) kl = F.kl_div(top_kl_scores, top_kl_labels, reduction='batchmean') metrics['KL-Div'] = kl.item() for k in sorted(ks, reverse=True): em, agreement = em_and_agreement(scores_rank[:, :k], labels_rank[:, :k]) metrics['EM@%d' % k] = em.item() metrics['Agr@%d' % k] = (agreement / k).item() return metrics class AverageMeterSet(object): def __init__(self, meters=None): self.meters = meters if meters else {} def __getitem__(self, key): if key not in self.meters: meter = AverageMeter() meter.update(0) return meter return self.meters[key] def update(self, name, value, n=1): if name not in self.meters: self.meters[name] = AverageMeter() self.meters[name].update(value, n) def reset(self): for meter in self.meters.values(): meter.reset() def values(self, format_string='{}'): return {format_string.format(name): meter.val for name, meter in self.meters.items()} def averages(self, format_string='{}'): return {format_string.format(name): meter.avg for name, meter in self.meters.items()} def sums(self, format_string='{}'): return {format_string.format(name): meter.sum for name, meter in self.meters.items()} def counts(self, format_string='{}'): return {format_string.format(name): meter.count for name, meter in self.meters.items()} class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val self.count += n self.avg = self.sum / self.count def __format__(self, format): return "{self.val:{format}} ({self.avg:{format}})".format(self=self, format=format) !pip install faiss-cpu --no-cache !apt-get install libomp-dev # from config import STATE_DICT_KEY, OPTIMIZER_STATE_DICT_KEY # from .utils import * # from .loggers import * import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import json import faiss import numpy as np from abc import * from pathlib import Path class BERTTrainer(metaclass=ABCMeta): def __init__(self, args, model, train_loader, val_loader, test_loader, export_root): self.args = args self.device = args.device self.model = model.to(self.device) self.is_parallel = args.num_gpu > 1 if self.is_parallel: self.model = nn.DataParallel(self.model) self.num_epochs = args.num_epochs self.metric_ks = args.metric_ks self.best_metric = args.best_metric self.train_loader = train_loader self.val_loader = val_loader self.test_loader = test_loader self.optimizer = self._create_optimizer() if args.enable_lr_schedule: if args.enable_lr_warmup: self.lr_scheduler = self.get_linear_schedule_with_warmup( self.optimizer, args.warmup_steps, len(train_loader) * self.num_epochs) else: self.lr_scheduler = optim.lr_scheduler.StepLR( self.optimizer, step_size=args.decay_step, gamma=args.gamma) self.export_root = export_root self.writer, self.train_loggers, self.val_loggers = self._create_loggers() self.logger_service = LoggerService( self.train_loggers, self.val_loggers) self.log_period_as_iter = args.log_period_as_iter self.ce = nn.CrossEntropyLoss(ignore_index=0) def train(self): accum_iter = 0 self.validate(0, accum_iter) for epoch in range(self.num_epochs): accum_iter = self.train_one_epoch(epoch, accum_iter) self.validate(epoch, accum_iter) self.logger_service.complete({ 'state_dict': (self._create_state_dict()), }) self.writer.close() def train_one_epoch(self, epoch, accum_iter): self.model.train() average_meter_set = AverageMeterSet() tqdm_dataloader = tqdm(self.train_loader) for batch_idx, batch in enumerate(tqdm_dataloader): batch_size = batch[0].size(0) batch = [x.to(self.device) for x in batch] self.optimizer.zero_grad() loss = self.calculate_loss(batch) loss.backward() self.clip_gradients(5) self.optimizer.step() if self.args.enable_lr_schedule: self.lr_scheduler.step() average_meter_set.update('loss', loss.item()) tqdm_dataloader.set_description( 'Epoch {}, loss {:.3f} '.format(epoch+1, average_meter_set['loss'].avg)) accum_iter += batch_size if self._needs_to_log(accum_iter): tqdm_dataloader.set_description('Logging to Tensorboard') log_data = { 'state_dict': (self._create_state_dict()), 'epoch': epoch + 1, 'accum_iter': accum_iter, } log_data.update(average_meter_set.averages()) self.logger_service.log_train(log_data) return accum_iter def validate(self, epoch, accum_iter): self.model.eval() average_meter_set = AverageMeterSet() with torch.no_grad(): tqdm_dataloader = tqdm(self.val_loader) for batch_idx, batch in enumerate(tqdm_dataloader): batch = [x.to(self.device) for x in batch] metrics = self.calculate_metrics(batch) self._update_meter_set(average_meter_set, metrics) self._update_dataloader_metrics( tqdm_dataloader, average_meter_set) log_data = { 'state_dict': (self._create_state_dict()), 'epoch': epoch+1, 'accum_iter': accum_iter, } log_data.update(average_meter_set.averages()) self.logger_service.log_val(log_data) def test(self): best_model_dict = torch.load(os.path.join( self.export_root, 'models', 'best_acc_model.pth')).get(STATE_DICT_KEY) self.model.load_state_dict(best_model_dict) self.model.eval() average_meter_set = AverageMeterSet() all_scores = [] average_scores = [] with torch.no_grad(): tqdm_dataloader = tqdm(self.test_loader) for batch_idx, batch in enumerate(tqdm_dataloader): batch = [x.to(self.device) for x in batch] metrics = self.calculate_metrics(batch) # seqs, candidates, labels = batch # scores = self.model(seqs) # scores = scores[:, -1, :] # scores_sorted, indices = torch.sort(scores, dim=-1, descending=True) # all_scores += scores_sorted[:, :100].cpu().numpy().tolist() # average_scores += scores_sorted.cpu().numpy().tolist() # scores = scores.gather(1, candidates) # metrics = recalls_and_ndcgs_for_ks(scores, labels, self.metric_ks) self._update_meter_set(average_meter_set, metrics) self._update_dataloader_metrics( tqdm_dataloader, average_meter_set) average_metrics = average_meter_set.averages() with open(os.path.join(self.export_root, 'logs', 'test_metrics.json'), 'w') as f: json.dump(average_metrics, f, indent=4) return average_metrics def calculate_loss(self, batch): seqs, labels = batch logits = self.model(seqs) logits = logits.view(-1, logits.size(-1)) labels = labels.view(-1) loss = self.ce(logits, labels) return loss def calculate_metrics(self, batch): seqs, candidates, labels = batch scores = self.model(seqs) scores = scores[:, -1, :] scores = scores.gather(1, candidates) metrics = recalls_and_ndcgs_for_ks(scores, labels, self.metric_ks) return metrics def clip_gradients(self, limit=5): for p in self.model.parameters(): nn.utils.clip_grad_norm_(p, 5) def _update_meter_set(self, meter_set, metrics): for k, v in metrics.items(): meter_set.update(k, v) def _update_dataloader_metrics(self, tqdm_dataloader, meter_set): description_metrics = ['NDCG@%d' % k for k in self.metric_ks[:3] ] + ['Recall@%d' % k for k in self.metric_ks[:3]] description = 'Eval: ' + \ ', '.join(s + ' {:.3f}' for s in description_metrics) description = description.replace('NDCG', 'N').replace('Recall', 'R') description = description.format( *(meter_set[k].avg for k in description_metrics)) tqdm_dataloader.set_description(description) def _create_optimizer(self): args = self.args param_optimizer = list(self.model.named_parameters()) no_decay = ['bias', 'layer_norm'] optimizer_grouped_parameters = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}, ] if args.optimizer.lower() == 'adamw': return optim.AdamW(optimizer_grouped_parameters, lr=args.lr, eps=args.adam_epsilon) elif args.optimizer.lower() == 'adam': return optim.Adam(optimizer_grouped_parameters, lr=args.lr, weight_decay=args.weight_decay) elif args.optimizer.lower() == 'sgd': return optim.SGD(optimizer_grouped_parameters, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum) else: raise ValueError def get_linear_schedule_with_warmup(self, optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): # based on hugging face get_linear_schedule_with_warmup def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _create_loggers(self): root = Path(self.export_root) writer = SummaryWriter(root.joinpath('logs')) model_checkpoint = root.joinpath('models') train_loggers = [ MetricGraphPrinter(writer, key='epoch', graph_name='Epoch', group_name='Train'), MetricGraphPrinter(writer, key='loss', graph_name='Loss', group_name='Train'), ] val_loggers = [] for k in self.metric_ks: val_loggers.append( MetricGraphPrinter(writer, key='NDCG@%d' % k, graph_name='NDCG@%d' % k, group_name='Validation')) val_loggers.append( MetricGraphPrinter(writer, key='Recall@%d' % k, graph_name='Recall@%d' % k, group_name='Validation')) val_loggers.append(RecentModelLogger(model_checkpoint)) val_loggers.append(BestModelLogger( model_checkpoint, metric_key=self.best_metric)) return writer, train_loggers, val_loggers def _create_state_dict(self): return { STATE_DICT_KEY: self.model.module.state_dict() if self.is_parallel else self.model.state_dict(), OPTIMIZER_STATE_DICT_KEY: self.optimizer.state_dict(), } def _needs_to_log(self, accum_iter): return accum_iter % self.log_period_as_iter < self.args.train_batch_size and accum_iter != 0 ``` ## Run ``` if args.model_code == 'bert': trainer = BERTTrainer(args, model, train_loader, val_loader, test_loader, export_root) if args.model_code == 'sas': trainer = SASTrainer(args, model, train_loader, val_loader, test_loader, export_root) eli f args.model_code == 'narm': args.num_epochs = 100 trainer = RNNTrainer(args, model, train_loader, val_loader, test_loader, export_root) trainer.train() trainer.test() ```
github_jupyter
# Introduction refer to this solution :https://www.kaggle.com/jsaguiar/lightgbm-7th-place-solution https://www.tinymind.cn/articles/3655 https://github.com/Featuretools/Automated-Manual-Comparison/tree/master/Loan%20Repayment https://www.kaggle.com/willkoehrsen/start-here-a-gentle-introduction/ # Read Data ``` # import packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import featuretools as ft import lightgbm as lgb %matplotlib inline import seaborn as sns RSEED = 50 # read in data and check basic info def read_check_data(file_path): data = pd.read_csv(file_path) print('Training data shape: ', data.shape) print(data.head()) return data app_train = read_check_data("./data/application_train.csv") app_test = read_check_data("./data/application_test.csv") app = app_train.append(app_test,sort=False) app.tail() POS_CASH_balance = read_check_data("./data/POS_CASH_balance.csv") bureau_balance = read_check_data("./data/bureau_balance.csv") previous_application = read_check_data("./data/previous_application.csv") credit_card_balance = read_check_data("./data/previous_application.csv") bureau = read_check_data("./data/bureau.csv") ``` # Check Missing Values ``` def check_missing_col(df): missing_num = df.isnull().sum().sort_values(ascending = False) minssing_percent = (df.isnull().mean()*100).sort_values(ascending = False) missing_info = pd.concat([missing_num, minssing_percent], axis=1, keys=['missing_num', 'minssing_percent']) print(missing_info.head()) return missing_info def check_missing_row(df): missing_num = df.isnull().sum(axis=1).sort_values(ascending = False) minssing_percent = (df.isnull().mean(axis=1)*100).sort_values(ascending = False) missing_info = pd.concat([missing_num, minssing_percent], axis=1, keys=['missing_num', 'minssing_percent']) print(missing_info.head()) return missing_info def missing_hist(missing_info): plt.hist(missing_info.minssing_percent) print(missing_info.describe()) # check missing value of app_train miss_app_train = check_missing_col(app_train) missing_hist(miss_app_train) missing_hist(check_missing_row(app_train)) # check missing value of POS_CASH_balance miss_POS_CASH = check_missing_col(POS_CASH_balance) missing_hist(miss_POS_CASH) missing_hist(check_missing_row(POS_CASH_balance)) miss_bureau_balance = check_missing_col(bureau_balance) missing_hist(miss_bureau_balance) missing_hist(check_missing_row(bureau_balance)) #check_missing(previous_application) missing_hist(check_missing_col(previous_application)) missing_hist(check_missing_row(previous_application)) #check_missing(credit_card_balance) missing_hist(check_missing_col(credit_card_balance)) missing_hist(check_missing_row(credit_card_balance)) #check_missing(bureau) missing_hist(check_missing_col(bureau)) missing_hist(check_missing_row(bureau)) ``` # Select and Re-Encode Features ``` def select_feature_type(df,data_type): return df.select_dtypes(include=[data_type]) def count_feature_type(df): return df.dtypes.value_counts() def count_col_unique(df,data_type): '''count the total unique value number of each colunm data_type could be object,category''' return df.select_dtypes(data_type).apply(pd.Series.nunique, axis = 0) def count_col_each_unique(df,col_name): '''count total number of observations of each unique value of a colunm''' return df.groupby(col_name)[col_name].count() def check_distribution(df,col_name): print(df[col_name].describe()) print('Total missing value number: ',df[col_name].isnull().sum()) plt.figure(figsize=(12,5)) sns.distplot(df[col_name].dropna()) def label_encoder(df, categorical_columns=None): """Encode categorical values as integers (0,1,2,3...) with pandas.factorize. """ # if categorical_colunms are not given than treat object as categorical features if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] for col in categorical_columns: df[col], uniques = pd.factorize(df[col]) return df, categorical_columns def one_hot_encoder(df, categorical_columns=None, nan_as_category=True): """Create a new column for each categorical value in categorical columns. """ original_columns = list(df.columns) if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) categorical_columns = [c for c in df.columns if c not in original_columns] return df, categorical_columns ``` ## check value type and correct wrong values ``` count_feature_type(app) count_col_unique(app_train,'object') count_col_each_unique(app,'CODE_GENDER') check_distribution(app,'AMT_INCOME_TOTAL') # this discussion https://www.kaggle.com/c/home-credit-default-risk/discussion/57247#332033 # says 365243 means NA check_distribution(app,'DAYS_EMPLOYED') # total number of 1 missing value and add spike on value 0 # 0 could mean nan value check_distribution(app,'DAYS_LAST_PHONE_CHANGE') # remove 4 people code_gender value 'XNA' app = app[app['CODE_GENDER'] != 'XNA'] # 4 people with XNA code gender app.tail() app['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) app.tail() app['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True) app.tail() # change all categorical feature to numerical app_clean, categorical_columns = label_encoder(app, categorical_columns=None) app_clean.tail() # check bureau data count_feature_type(bureau) count_col_unique(bureau,'object') check_distribution(bureau,'DAYS_CREDIT') check_distribution(bureau,'AMT_CREDIT_SUM') ``` # Baseline Model ``` app_base_train = app_clean[app_clean['TARGET'].notnull()] app_base_test = app_clean[app_clean['TARGET'].isnull()] app_base_test.head() def cross_validate(train): """Compute cross validation ROC AUC of a gradient boosting model for a given training dataset""" # Extract the labels train_labels = np.array(train['TARGET'].astype(np.int32)).reshape((-1, )) train = train.drop(columns = ['TARGET', 'SK_ID_CURR']) # Create a lgb training set train_set = lgb.Dataset(train, label = train_labels) # Find default hyperparameters model = lgb.LGBMClassifier() params = model.get_params() # Number of estimators will be selected through early stopping del params['n_estimators'], params['silent'] # Early stoppping with 5 fold cross validation cv_results = lgb.cv(params, train_set, num_boost_round = 10000, metrics = 'auc', early_stopping_rounds = 100, seed = RSEED, nfold = 5) print('Cross Validation ROC AUC: {:.5f} with std: {:.5f}.'.format(cv_results['auc-mean'][-1], cv_results['auc-stdv'][-1])) print('Number of estimators trained: {}'.format(len(cv_results['auc-mean']))) return cv_results cv_results_baseline = cross_validate(app_base_train) def make_submission(cv_results, train, test): """Make a submission dataframe for the Kaggle competition for a given dataset.""" # Extract the labels train_labels = np.array(train['TARGET'].astype(np.int32)).reshape((-1, )) train = train.drop(columns = ['TARGET', 'SK_ID_CURR']) test_ids = list(test['SK_ID_CURR']) test = test.drop(columns = ['TARGET','SK_ID_CURR']) # Make model with optimal number of estimators and train on training data model = lgb.LGBMClassifier(n_estimators = len(cv_results['auc-mean']), random_state=RSEED) model.fit(train, train_labels) # Make predictions on the testing data preds = model.predict_proba(test)[:, 1] submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': preds}) return submission submission_baseline = make_submission(cv_results_baseline, app_base_train, app_base_test) submission_baseline.to_csv('./data/submission_baseline.csv', index = False) ``` # Semi-Auto Feature ``` def agg_numeric(df_child, parent_var, df_col_name): """ Groups and aggregates the numeric values in a child dataframe by the parent variable. Parameters -------- df_child (dataframe): the child dataframe to calculate the statistics on parent_var (string): the parent variable used for grouping and aggregating df_col_name (string): the variable used to rename the columns Return -------- agg (dataframe): a dataframe with the statistics aggregated by the `parent_var` for all numeric columns. The aggregate function are 'count', 'mean', 'max', 'min', 'sum' Each observation of the parent variable will have one row in the dataframe with the parent variable as the index. The columns are also renamed using the `df_col_name`. Columns with all duplicate values are removed. """ # Remove id variables other than grouping variable # e.g. SK_ID_BUREAU for col in df_child: if col != parent_var and 'SK_ID' in col: df_child = df_child.drop(columns = col) # Only want the numeric variables parent_ids = df_child[parent_var].copy() numeric_df = df_child.select_dtypes('number').copy() numeric_df[parent_var] = parent_ids # Group by the specified variable and calculate the statistics agg = numeric_df.groupby(parent_var).agg(['count', 'mean', 'max', 'min', 'sum']) # Need to create new column names columns = [] # Iterate through the variables names for var in agg.columns.levels[0]: if var != parent_var: # Iterate through the stat names for stat in agg.columns.levels[1]: # Make a new column name for the variable and stat columns.append('%s_%s_%s' % (df_col_name, var, stat)) agg.columns = columns # Remove the columns with all redundant values _, idx = np.unique(agg, axis = 1, return_index=True) agg = agg.iloc[:, idx] return agg bureau_agg = agg_numeric(bureau, 'SK_ID_CURR', 'BUREAU') bureau_agg.head() app_clean_second = pd.merge(app_clean,bureau_agg,on='SK_ID_CURR',how='left') app_base_train_second = app_clean_second[app_clean_second['TARGET'].notnull()] app_base_test_second = app_clean_second[app_clean_second['TARGET'].isnull()] app_base_train_second.head() app_base_test_second.head() cv_results_second = cross_validate(app_base_train_second) submission_second = make_submission(cv_results_second, app_base_train_second, app_base_test_second) submission_second.to_csv('./data/submission_second.csv', index = False) ```
github_jupyter
# Genentech Cervical Cancer - Feature Selection https://www.kaggle.com/c/cervical-cancer-screening/ ``` # imports import sys # for stderr import numpy as np import pandas as pd import sklearn as skl from sklearn import metrics import matplotlib.pyplot as plt %matplotlib inline # settings %logstop %logstart -o 'cc_feature_selection.log' rotate plt.style.use('ggplot') # constants # plt.rcParams['figure.figsize'] = (10.0, 10.0) # pd.set_option('display.max_rows', 50) # pd.set_option('display.max_columns', 50) # versions import sys print(pd.datetime.now()) print('Python: '+sys.version) print('numpy: '+np.__version__) print('pandas: '+pd.__version__) print('sklearn: '+skl.__version__) from sqlalchemy import create_engine engine = create_engine('postgresql://paulperry:@localhost:5432/ccancer') from pyace import ace ``` ## Load ``` fdir = './features/' train_file = './input/patients_train.csv.gz' train = pd.read_csv(train_file) train.drop('patient_gender', axis=1, inplace=True) train.set_index('patient_id', inplace=True) train[:3] files =[ 'diag_code_0_1000.csv.gz', 'diag_code_1000_2000.csv.gz', 'diag_code_2000_3000.csv.gz', 'diag_code_3000_4000.csv.gz', 'diag_code_4000_5000.csv.gz', 'diag_code_5000_6000.csv.gz', 'diag_code_6000_7000.csv.gz', 'diag_code_7000_8000.csv.gz', 'diag_code_8000_9000.csv.gz', 'diag_code_9000_10000.csv.gz', 'diag_code_10000_11000.csv.gz', 'diag_code_11000_12000.csv.gz', 'diag_code_12000_13000.csv.gz', 'diag_code_13000_14000.csv.gz', 'diag_code_14000_15000.csv.gz', 'diag_code_15000_16000.csv.gz', ] len(files) # # # tab = pd.read_csv(fdir+files[0]) # tab.set_index('patient_id', inplace=True) # for f in files[1:]: # tab2 = pd.read_csv(fdir+f) # tab2.set_index('patient_id', inplace=True) # tab = tab.merge(tab2, left_index=True, right_index=True, how='left') # gc.collect() # print(f) ``` ## Run ``` nnn = 4 import datetime start = datetime.datetime.now() print(start) tab = pd.read_csv(fdir+files[nnn]) tab.set_index('patient_id', inplace=True) tab.shape dfall = pd.merge(train, tab, left_index=True, right_index=True, how='left') cat_cols = ['patient_age_group','patient_state','ethinicity','household_income','education_level'] ranks = ace(dfall, 'is_screener', cat_cols=[]) df_ranks = pd.DataFrame(ranks, index=dfall.columns, columns=['ace','mean']) df_ranks = df_ranks.sort_values(by='ace', ascending=False) top_ranks = df_ranks[df_ranks.ace > 0] top_ranks[:20] top_ranks.to_csv('diagnosis_ranks_'+str(nnn)+'.csv') import gc gc.collect() end = datetime.datetime.now() print('run time: '+str(end-start)+' at: '+str(end)) break top_ranks = pd.read_csv('diagnosis_ranks_15.csv') top_ranks.set_index('Unnamed: 0', inplace=True) top_ranks[:10] qlist = list(top_ranks[top_ranks.ace > 0.003891].index) for c in cat_cols: qlist.remove(c) qlist_str = "('"+qlist[0]+"'" for c in qlist[1:]: qlist_str=qlist_str+",'"+c+"'" qlist_str=qlist_str+')' qlist_str q = 'select * from diagnosis_code where diagnosis_code in '+qlist_str diag_codes = pd.read_sql_query(q, engine) diag_codes diag_codes.to_csv('diagnosis_top_codes.csv', mode='a', header=False) qlist tab[qlist].to_csv('diagnosis_top_'+str(nnn)+'.csv') ``` ## Merge feature values ``` diag_top_features = [ 'diagnosis_top_0.csv', 'diagnosis_top_3.csv', 'diagnosis_top_4.csv', 'diagnosis_top_6.csv', 'diagnosis_top_10.csv', 'diagnosis_top_12.csv', 'diagnosis_top_15.csv' ] dff = pd.read_csv(diag_top_features[0]) dff.set_index('patient_id', inplace=True) print(dff.shape) for f in diag_top_features[1:]: df2 = pd.read_csv(f) df2.set_index('patient_id', inplace=True) dff = dff.merge(df2, left_index=True, right_index=True, how='outer') gc.collect() print(f) gc.collect() dff.shape dff[:5] dff.columns big_table = pd.read_csv(fdir+'train_big_table.csv.gz') big_table.set_index('patient_id', inplace=True) big_table.shape big_table[:2] dff[:2] dff.columns big_table.columns bad_cols = ['CLINIC', 'INPATIENT', 'OTHER', 'OUTPATIENT', 'UNKNOWN', '0001', '0002', '0003', '0004', '0005', '0006', 'HX01', 'HX02', 'HX03', 'HX04', 'HX05', 'HXPR', 'pract_screen_pct', 'cbsa_pct', 'age_pct', 'state_pct', '632','650', u'57452', u'57454', u'57455', u'57456', u'81252', u'90696', u'G0143', u'S4020', u'S4023'] # take only a subset of the features, the rest I think is junk cols = list(big_table.columns) cols = [x for x in cols if x not in bad_cols] test_cols = list(cols) test_cols.remove('is_screener') bigt = big_table[cols].merge(dff, left_index=True, right_index=True, how='left') bigt.columns bigt.shape bigt.to_csv(fdir+'train_big_table.csv') big_table_encoded = pd.read_csv(fdir+'train_big_table_encoded.csv.gz') big_table_encoded.set_index('patient_id', inplace=True) big_table_encoded.shape big_table_encoded[:2] dffd = dff.copy() dffd[dffd > 0] = 1 dffd[:10] bigte = big_table_encoded[cols].merge(dffd,left_index=True, right_index=True, how='left') bigte.shape bigte[:10] bigte.to_csv(fdir+'train_big_table_encoded.csv') ``` ## Procedures ``` procs = pd.read_csv(fdir+'procedure/procedure_counts_selected.csv.gz') procs.shape procs.set_index('patient_id', inplace=True) procs[:2] print(bigt.shape) bigtp = bigt.merge(procs, left_index=True, right_index=True, how='left') bigtp.shape bigtp.to_csv(fdir+'train_big_table.csv') print(bigte.shape) bigtep = bigte.merge(procs, left_index=True, right_index=True, how='left') bigtep.shape bigtep.to_csv(fdir+'train_big_table_encoded.csv') ``` ## test_top ``` test_diagnosis_top = pd.read_csv('test_diagnosis_top.csv') test_diagnosis_top.shape #test_diagnosis_top.set_index('patient_id', inplace=True) test_diagnosis_top[:5] test_pivot = test_diagnosis_top.pivot(index='patient_id', columns='diagnosis_code', values='diagnosis_code_count') test_pivot[:5] test_pivot.shape test_big_table = pd.read_csv(fdir+'test_big_table.csv.gz') test_big_table.set_index('patient_id', inplace=True) test_big_table = test_big_table[test_cols] test_big_table.shape test_big_table[:4] test_big_table.info() test_bigt = test_big_table.merge(test_pivot, left_index=True, right_index=True, how='left') test_bigt.shape test_bigt[:5] test_bigt.to_csv(fdir+'test_big_table.csv') test_big_table_encoded = pd.read_csv(fdir+'test_big_table_encoded.csv.gz') test_big_table_encoded.set_index('patient_id', inplace=True) test_big_table_encoded = test_big_table_encoded[test_cols] test_big_table_encoded.shape test_big_table_encoded.info() test_big_table_encoded[:2] test_dffd = test_pivot.copy() test_dffd[test_dffd > 0] = 1 test_dffd[:10] test_bigte = test_big_table_encoded.merge(test_dffd,left_index=True, right_index=True, how='left') test_bigte.shape test_bigte[:10] test_bigte.to_csv(fdir+'test_big_table_encoded.csv') dff.shape test_pivot.shape sorted_cols = [u'401.9', u'462', u'496', u'585.3', u'616.0', u'616.10', u'620.2', u'622.10', u'622.11', u'623.5', u'625.3', u'625.9', u'626.0', u'626.2', u'626.4', u'626.8', u'646.83', u'648.93', u'650', u'795.00', u'V22.0', u'V22.1', u'V22.2', u'V24.2', u'V25.2', u'V27.0', u'V28.3', u'V70.0', u'V74.5'] dff[sorted_cols].to_csv('train_diagnosis_top.csv') test_pivot.to_csv('test_diagnosis_top.csv') test_big_table.shape, big_table.shape test_bigt.columns len(test_bigt.columns) len(big_table.columns) len(bigte.columns) test_bigt.drop('632_y', axis=1, inplace=True) test_bigt.rename(columns={'632_x':'632'}, inplace=True) test_bigte.drop('632_y', axis=1, inplace=True) test_bigte.rename(columns={'632_x':'632'}, inplace=True) ``` ## Check results ``` train_diagnosis_top = fdir+'train_diagnosis_top.csv.gz' train_diagnosis_top = pd.read_csv(train_diagnosis_top) train_diagnosis_top.set_index('patient_id', inplace=True) train_diagnosis_top[:3] train_diagnosis_top.shape test_diagnosis_top = fdir+'test_diagnosis_top.csv.gz' test_diagnosis_top = pd.read_csv(test_diagnosis_top) test_diagnosis_top.set_index('patient_id', inplace=True) test_diagnosis_top[:3] test_diagnosis_top.shape set(test_diagnosis_top.columns) - set(train_diagnosis_top.columns) pd.read_csv(fdir+'diagnosis_top_codes.csv') test_diagnosis_top.columns train_632 = pd.read_csv(fdir+'train_big_table.csv.gz') train_632.set_index('patient_id', inplace=True) train_632['632'][:5] train_diagnosis_top['632'] = train_632['632'] train_diagnosis_top.sort_index(axis=1, inplace=True) train_diagnosis_top.columns train_diagnosis_top.to_csv(fdir+'train_diagnosis_top.csv') train_632.columns ```
github_jupyter
``` import nltk import difflib import time import gc import itertools import multiprocessing import pandas as pd import numpy as np import xgboost as xgb import lightgbm as lgb import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split from models_utils_fe import * from models_utils_gbm import * src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/scripts/features/' feats_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/uncleaned/' trans_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/lemmatized_fullclean/transformations/' wmd = pd.read_csv(src + 'train_WMD_cleaned_stemmed.csv') wmd = wmd.astype('float32') wmd.replace(np.inf, 1000, inplace = True) skip_thought = pd.read_csv(src + 'train_skipthoughts_Alex_distances.csv') skip_thought = skip_thought.astype('float32') compression = pd.read_csv(src + 'train_LZMAcompression_distance.csv') compression = compression.astype('float32') edit = pd.read_csv(src + 'train_EDITdistance.csv') edit = edit.astype('float32') moments = pd.read_csv(src + 'train_doc2vec_moments.csv') moments = moments.astype('float32') networks_NER = pd.read_csv(src + 'train_networkfeats_NER.csv') networks_NER = networks_NER.astype('float32') xgb_feats = pd.read_csv(feats_src + '/the_1owl/owl_train.csv') y_train = xgb_feats[['is_duplicate']] lsaq1 = pd.DataFrame(np.load(trans_src + 'train_lsa50_CV1gram.npy')[0]) lsaq1.columns = ['{}_lsaCV1_q1'.format(i) for i in range(lsaq1.shape[1])] lsaq2 = pd.DataFrame(np.load(trans_src + 'train_lsa50_CV1gram.npy')[1]) lsaq2.columns = ['{}_lsaCV1_q2'.format(i) for i in range(lsaq2.shape[1])] svdq1 = pd.DataFrame(np.load(trans_src + 'train_svd50_CV1gram.npy')[0]) svdq1.columns = ['{}_svdCV1_q1'.format(i) for i in range(svdq1.shape[1])] svdq2 = pd.DataFrame(np.load(trans_src + 'train_svd50_CV1gram.npy')[1]) svdq2.columns = ['{}_svdCV1_q2'.format(i) for i in range(svdq2.shape[1])] X_train = pd.read_pickle('Xtrain_500bestCols.pkl') X_train = pd.concat([X_train, wmd, skip_thought, compression, edit, moments, networks_NER, lsaq1, lsaq2, svdq1, svdq2], axis = 1) del xgb_feats, wmd, skip_thought, compression, edit, moments, networks_NER, \ lsaq1, lsaq2, svdq1, svdq2 gc.collect() best_cols = [ 'min_pagerank_sp_network_weighted', 'norm_wmd', 'word_match', '1wl_tfidf_l2_euclidean', 'm_vstack_svd_q1_q1_euclidean', '1wl_tfidf_cosine', 'sk_bi_skew_q2vec', 'm_q1_q2_tf_svd0', 'sk_bi_skew_q1vec', 'skew_q2vec', 'trigram_tfidf_cosine', 'sk_uni_skew_q2vec', 'sk_bi_canberra_distance', 'question1_3', 'sk_uni_skew_q1vec', 'sk_uni_kur_q2vec', 'min_eigenvector_centrality_np_network_weighted', 'avg_world_len2', 'z_word_match', 'sk_uni_kur_q1vec', 'skew_doc2vec_pretrained_lemmat'] rescale = False X_bin = bin_numerical(X_train, best_cols, 0.1) X_grouped = group_featbyfeat(X_train, best_cols, 'mean') X_grouped2 = group_featbyfeat(X_train, best_cols, 'sum') X_combinations = feature_combinations(X_train, best_cols[:5]) X_additional = pd.concat([X_bin, X_grouped, X_grouped2, X_combinations], axis = 1) X_additional = drop_duplicate_cols(X_additional) X_additional.replace(np.inf, 999, inplace = True) X_additional.replace(np.nan, -999, inplace = True) if rescale: colnames = X_additional.columns X_additional = pd.DataFrame(MinMaxScaler().fit_transform(X_additional)) X_additional.columns = colnames X_train = pd.concat([X_train, X_additional], axis = 1) X_train = X_train.astype('float32') print('Final training data shape:', X_train.shape) del X_bin, X_grouped, X_grouped2, X_combinations, X_additional gc.collect() src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/scripts/features/' feats_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/uncleaned/' trans_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/lemmatized_fullclean/transformations/' X_train = pd.read_pickle('Xtrain_814colsBest.pkl', compression = 'bz2') xgb_feats = pd.read_csv(feats_src + '/the_1owl/owl_train.csv') y_train = xgb_feats[['is_duplicate']] del xgb_feats gc.collect() xgb = True if xgb: run_xgb(X_train, y_train) else: run_lgb(X_train, y_train) gbm = xgb.Booster(model_file = 'saved_models/XGB/XGB_500cols_experiments.txt') dtrain = xgb.DMatrix(X_train, label = y_train) mapper = {'f{0}'.format(i): v for i, v in enumerate(dtrain.feature_names)} importance = {mapper[k]: v for k, v in gbm.get_fscore().items()} importance = sorted(importance.items(), key=lambda x:x[1], reverse=True)[:20] df_importance = pd.DataFrame(importance, columns=['feature', 'fscore']) df_importance['fscore'] = df_importance['fscore'] / df_importance['fscore'].sum() plt.figure() df_importance.plot() df_importance.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(10, 18)) plt.title('XGBoost Feature Importance') plt.xlabel('relative importance') retain_cols = df_importance['feature'] X_train2 = X_train.loc[:, retain_cols] retain_cols.to_pickle('Colnames_best500features.pkl') ```
github_jupyter
# Homework 1 The maximum score of this homework is 100+10 points. Grading is listed in this table: | Grade | Score range | | --- | --- | | 5 | 85+ | | 4 | 70-84 | | 3 | 55-69 | | 2 | 40-54 | | 1 | 0-39 | Most exercises include tests which should pass if your solution is correct. However successful test do not guarantee that your solution is correct. The homework is partially autograded using many hidden tests. Test cells cannot be modified and empty cells cannot be deleted. Your solution should replace placeholder lines such as: ### YOUR CODE HERE raise NotImplementedError() Please do not add new cells, they will be ignored by the autograder. **VERY IMPORTANT** Before submitting your solution (pushing to the git repo), run your notebook with `Kernel -> Restart & Run All` and make sure it runs without exceptions. ## Submission GitHub Classroom will accept your last pushed version before the deadline. You do not need to send the homework to the instructor. ## Plagiarism When preparing their homework, students are reminded to pay special attention to Title 32, Sections 92-93 of Code of Studies (quoted below). Any content from external sources must be stated in the students own words AND accompanied by citations. Copying and pasting from an external source should be avoided and any text copied must be placed between quotation marks. Reports that violate these rules cannot receive a passing grade. "**Section 92** (1) The works of another person will be used as follows: a) if a work of another person is used in whole or in part (e.g. by copying, citation, translation from another language or presentation), the source and the name of the author will be indicated if this name is included in the source or – in case of orally presented works – may be clearly identified; b) the work of another person or any part of that will be used – up to a quantity reasonably corresponding to the nature and purpose of the student work – identified as quotations. (2) Instructors are entitled to review compliance with requirements in this article with computer programmes and databases. (3) The use of works of another person and the acknowledgement of use will be governed by applicable laws and the relevant rules of the specific discipline. **Section 93** (1) If a student fails to meet rules regarding use of works of another person in whole or in part, the student work will be considered as not assessable and the student will not be allowed to obtain the credit of the concerned subject in the specific term. (2) It will be deemed a disciplinary offence if a student – in breach of the rules regarding use of works of another person – submits or presents a work of another person fully or in a significant part verbatim (word for word) or in terms of its basic concepts or the combined version of several works of another person(s) as their own work. (3) Based on subsection (1) of Section 52/A. of the Higher Education Act, compliance with the rules regarding the use of works of another person in a master thesis may be reviewed up to five years following the issue of the degree certificate. In case of violation of the above rules, section 52/A of the Higher Education Act will apply." (BME Code of Studies, p.50) ## 1. Modified Levenshtein distance (20 points) Standard Levenshtein distance assigns an integer edit distance to any two strings measuring how difficult it would be to turn one string into the other. See [Wikipedia](https://en.wikipedia.org/wiki/Levenshtein_distance). Create a modify version of Levenshtein distance which discounts letters that are close to each other on the English keyboard. The keyboard variable below contains the English keyboard organized into a table. Two letters are considered close to each other if both their row and column distance is at most 1. Close keys are at distance 0.5, others are at distance 1. This table lists a few examples: | | | distance | | ---- | ---- | | q | w | 0.5 | | q | e | 1 | | s | w | 0.5 | | f | t | 0.5 | | f | y | 1 | | f | f | 0 | Any letter outside the lowercase English alphabet (see the `keyboard` variable below) is not considered close and you do not need to discount them. ``` keyboard = [ ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p'], ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l'], ['z', 'x', 'c', 'v', 'b', 'n', 'm'], ] keyboard_mapping = {} for i, row in enumerate(keyboard): for j, c in enumerate(row): keyboard_mapping[c] = (i, j) def keyboard_edit_distance(str1, str2): # YOUR CODE HERE raise NotImplementedError() assert keyboard_edit_distance("abc", "abc") == 0 assert keyboard_edit_distance("abc", "ab") == 1 assert keyboard_edit_distance("a", "s") == 0.5 ``` ## 2. Replace rare words (10 points) Write a function that takes a text and a number $N$ as parameters and replaces every word other than the most common $N$ in the text with a common symbol. The symbol by default is `__RARE__` but it can be redefined. Your code should split on spaces only. You can derive the function definition from the tests. ``` # YOUR CODE HERE raise NotImplementedError() assert replace_rare_words("a b a a a b b b c d d d", 2) == "a b a a a b b b __RARE__ __RARE__ __RARE__ __RARE__" assert replace_rare_words("a b a b b c", 2, rare_symbol="rare") == "a b a b b rare" ``` ## 3. MutableString (30 points) Python strings are immutable. Create a mutable string class. The internal representation should be mutable too. Implement the following features (see the tests below). - initialization from `str`, - initialization from text file (loads the file's content into the string), - assignment (i.e. modifying a character), - if the index is out of range, it should fill the blanks with spaces (see the tests below) - conversion to built-in `str` and `list`. The latter is a list of the characters. - addition with other `MutableString` instances and built-in strings, - multiplication with integers. Multiplying a string with 3 means repeating the string 3 times. - built-in `len` function, - comparision with strings, - substring containment with both built-in strings and other MutableString objects, - in-place upper and lowercasing, - shallow copying, - iteration. Please read all of the tests before writing your solution. ``` class MutableString(object): # YOUR CODE HERE raise NotImplementedError() ``` ### Briefly describe what internal representation you chose and why you chose it. What other possibilities did you think about? (double-click on the text to edit it) YOUR ANSWER HERE ### MutableString tests ``` # initialization m = MutableString() assert m == "" and len(m) == 0 # initialization from file import tempfile import os with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as tmpfile: tmpfile.write("abc".encode("utf8")) m = MutableString.from_file(tmpfile.name) assert m == "abc" os.remove(tmpfile.name) # iteration m = MutableString("abc") for c in m: print(c) # comparison m1 = MutableString("abc") assert m1 == "abc" m2 = MutableString("abc") assert id(m1) != id(m2) assert m1 == m2 # item setting tests m1 = MutableString("abc") assert m1[0] == "a" and m1[2] == "c" m1[1] = "d" assert(m1[1] == "d") # slicing m1 = MutableString("abc") assert m1[1:] == "bc" m1[2:4] = "ad" assert m1 == "abad" # conversions m1 = MutableString("abc d") assert(list(m1) == list("abc d")) assert(str(m1) == "abc d") # concatenation m1 = MutableString("abc") m2 = MutableString("def") assert m1 + m2 == "abcdef" assert m2 + m1 == "defabc" # multiplication m1 = MutableString("abc") m3 = m1 * 3 assert m3 == "abcabcabc" # operator= m1 = MutableString("abc") m2 = m1 m2[0] = "A" assert m1[1:] == "bc" # copy from copy import copy m1 = MutableString("abc") m2 = copy(m1) m2[0] = "A" assert m2 == "Abc" # concatenation with strings m1 = MutableString("abc") m2 = m1 + "def" assert m2 == "abcdef" m3 = "def" + m1 assert m3 == "defabc" # in place lowercasing and uppercasing m1 = MutableString("aBc") m1.to_upper() assert m1 == "ABC" m1 = MutableString("aBc") m1.to_lower() # containment test m1 = MutableString("abcdef") assert "bcd" in m1 m2 = MutableString("bcd") assert m2 in m1 ``` # Text generation ## 3.1 (Same as a laboratory exercise) Write a function that computes N-gram frequencies in a string. (0 point) ``` # YOUR CODE HERE raise NotImplementedError() assert(count_ngram_freqs("abcc", 1) == {"a": 1, "b": 1, "c": 2}) assert(count_ngram_freqs("abccab", 2) == {"ab": 2, "bc": 1, "cc": 1, "ca": 1}) ``` ## 3.2 Define a random text generator function (20 points). The function takes 4 arguments: 1. starting text (at least $N-1$ long), 2. target length: length of the output string, 3. n-gram frequency dictionary, 4. N, length of the n-grams. The function generates one character at a time given the last $N-1$ characters. The probability of `c` being generated after `ab` is defined as: $$ P(c | a b ) = \frac{\text{freq}(a b c)}{\text{freq}(a b)}, $$ where $\text{freq}(a b c)$ is obtained by counting how many times `abc` occurs in the training corpus (`count_ngram_freqs` function). If the generated text ends with a $N-1$-gram that does not occur in the training data, generate the next character from the full character or ngram distribution. ``` # YOUR CODE HERE raise NotImplementedError() toy_freqs = count_ngram_freqs("abcabcda", 3) gen = generate_text("abc", 5, toy_freqs, 3) assert(len(gen) == 5) assert(set(gen) <= set("abcd")) ``` ## 3.3 Test your solution on a small Wikipedia corpus (10 points). Collect a sample of at least 1 million characters from Wikipedia using the wikipedia module. ``` # YOUR CODE HERE raise NotImplementedError() ``` ## \*3.4 Smoothing (extra exercise, 10 points) Implement one or more smoothing methods such as Jelinek-Mercer smoothing or Katz's backoff. Train it on your Wikipedia corpus and generate an example. https://nlp.stanford.edu/~wcmac/papers/20050421-smoothing-tutorial.pdf ``` # YOUR CODE HERE raise NotImplementedError() ``` ## Code cleanness and PEP8 (10 points) This cell is here for technical reasons, you will receive feedback on your code quality here. You do not need to write anything here. YOUR ANSWER HERE
github_jupyter
# Optimiztion with `mystic` ``` %matplotlib notebook ``` `mystic`: approximates that `scipy.optimize` interface ``` """ Example: - Minimize Rosenbrock's Function with Nelder-Mead. - Plot of parameter convergence to function minimum. Demonstrates: - standard models - minimal solver interface - parameter trajectories using retall """ # Nelder-Mead solver from mystic.solvers import fmin # Rosenbrock function from mystic.models import rosen # tools import pylab if __name__ == '__main__': # initial guess x0 = [0.8,1.2,0.7] # use Nelder-Mead to minimize the Rosenbrock function solution = fmin(rosen, x0, disp=0, retall=1) allvecs = solution[-1] # plot the parameter trajectories pylab.plot([i[0] for i in allvecs]) pylab.plot([i[1] for i in allvecs]) pylab.plot([i[2] for i in allvecs]) # draw the plot pylab.title("Rosenbrock parameter convergence") pylab.xlabel("Nelder-Mead solver iterations") pylab.ylabel("parameter value") pylab.legend(["x", "y", "z"]) pylab.show() ``` Diagnostic tools * Callbacks ``` """ Example: - Minimize Rosenbrock's Function with Nelder-Mead. - Dynamic plot of parameter convergence to function minimum. Demonstrates: - standard models - minimal solver interface - parameter trajectories using callback - solver interactivity """ # Nelder-Mead solver from mystic.solvers import fmin # Rosenbrock function from mystic.models import rosen # tools from mystic.tools import getch import pylab pylab.ion() # draw the plot def plot_frame(): pylab.title("Rosenbrock parameter convergence") pylab.xlabel("Nelder-Mead solver iterations") pylab.ylabel("parameter value") pylab.draw() return iter = 0 step, xval, yval, zval = [], [], [], [] # plot the parameter trajectories def plot_params(params): global iter, step, xval, yval, zval step.append(iter) xval.append(params[0]) yval.append(params[1]) zval.append(params[2]) pylab.plot(step,xval,'b-') pylab.plot(step,yval,'g-') pylab.plot(step,zval,'r-') pylab.legend(["x", "y", "z"]) pylab.draw() iter += 1 return if __name__ == '__main__': # initial guess x0 = [0.8,1.2,0.7] # suggest that the user interacts with the solver print("NOTE: while solver is running, press 'Ctrl-C' in console window") getch() plot_frame() # use Nelder-Mead to minimize the Rosenbrock function solution = fmin(rosen, x0, disp=1, callback=plot_params, handler=True) print(solution) # don't exit until user is ready getch() ``` **NOTE** IPython does not handle shell prompt interactive programs well, so the above should be run from a command prompt. An IPython-safe version is below. ``` """ Example: - Minimize Rosenbrock's Function with Powell's method. - Dynamic print of parameter convergence to function minimum. Demonstrates: - standard models - minimal solver interface - parameter trajectories using callback """ # Powell's Directonal solver from mystic.solvers import fmin_powell # Rosenbrock function from mystic.models import rosen iter = 0 # plot the parameter trajectories def print_params(params): global iter from numpy import asarray print("Generation %d has best fit parameters: %s" % (iter,asarray(params))) iter += 1 return if __name__ == '__main__': # initial guess x0 = [0.8,1.2,0.7] print_params(x0) # use Powell's method to minimize the Rosenbrock function solution = fmin_powell(rosen, x0, disp=1, callback=print_params, handler=False) print(solution) ``` * Monitors ``` """ Example: - Minimize Rosenbrock's Function with Powell's method. Demonstrates: - standard models - minimal solver interface - customized monitors """ # Powell's Directonal solver from mystic.solvers import fmin_powell # Rosenbrock function from mystic.models import rosen # tools from mystic.monitors import VerboseLoggingMonitor if __name__ == '__main__': print("Powell's Method") print("===============") # initial guess x0 = [1.5, 1.5, 0.7] # configure monitor stepmon = VerboseLoggingMonitor(1,1) # use Powell's method to minimize the Rosenbrock function solution = fmin_powell(rosen, x0, itermon=stepmon) print(solution) import mystic mystic.log_reader('log.txt') ``` * Solution trajectory and model plotting ``` import mystic mystic.model_plotter(mystic.models.rosen, 'log.txt', kwds='-d -x 1 -b "-2:2:.1, -2:2:.1, 1"') ``` Solver "tuning" and extension * Solver class interface ``` """ Example: - Solve 8th-order Chebyshev polynomial coefficients with DE. - Callable plot of fitting to Chebyshev polynomial. - Monitor Chi-Squared for Chebyshev polynomial. Demonstrates: - standard models - expanded solver interface - built-in random initial guess - customized monitors and termination conditions - customized DE mutation strategies - use of monitor to retrieve results information """ # Differential Evolution solver from mystic.solvers import DifferentialEvolutionSolver2 # Chebyshev polynomial and cost function from mystic.models.poly import chebyshev8, chebyshev8cost from mystic.models.poly import chebyshev8coeffs # tools from mystic.termination import VTR from mystic.strategy import Best1Exp from mystic.monitors import VerboseMonitor from mystic.tools import getch, random_seed from mystic.math import poly1d import pylab pylab.ion() # draw the plot def plot_exact(): pylab.title("fitting 8th-order Chebyshev polynomial coefficients") pylab.xlabel("x") pylab.ylabel("f(x)") import numpy x = numpy.arange(-1.2, 1.2001, 0.01) exact = chebyshev8(x) pylab.plot(x,exact,'b-') pylab.legend(["Exact"]) pylab.axis([-1.4,1.4,-2,8],'k-') pylab.draw() return # plot the polynomial def plot_solution(params,style='y-'): import numpy x = numpy.arange(-1.2, 1.2001, 0.01) f = poly1d(params) y = f(x) pylab.plot(x,y,style) pylab.legend(["Exact","Fitted"]) pylab.axis([-1.4,1.4,-2,8],'k-') pylab.draw() return if __name__ == '__main__': print("Differential Evolution") print("======================") # set range for random initial guess ndim = 9 x0 = [(-100,100)]*ndim random_seed(123) # draw frame and exact coefficients plot_exact() # configure monitor stepmon = VerboseMonitor(50) # use DE to solve 8th-order Chebyshev coefficients npop = 10*ndim solver = DifferentialEvolutionSolver2(ndim,npop) solver.SetRandomInitialPoints(min=[-100]*ndim, max=[100]*ndim) solver.SetGenerationMonitor(stepmon) solver.enable_signal_handler() solver.Solve(chebyshev8cost, termination=VTR(0.01), strategy=Best1Exp, \ CrossProbability=1.0, ScalingFactor=0.9, \ sigint_callback=plot_solution) solution = solver.Solution() # use monitor to retrieve results information iterations = len(stepmon) cost = stepmon.y[-1] print("Generation %d has best Chi-Squared: %f" % (iterations, cost)) # use pretty print for polynomials print(poly1d(solution)) # compare solution with actual 8th-order Chebyshev coefficients print("\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs)) # plot solution versus exact coefficients plot_solution(solution) from mystic.solvers import DifferentialEvolutionSolver print("\n".join([i for i in dir(DifferentialEvolutionSolver) if not i.startswith('_')])) ``` * Algorithm configurability * Termination conditions ``` from mystic.termination import VTR, ChangeOverGeneration, And, Or stop = Or(And(VTR(), ChangeOverGeneration()), VTR(1e-8)) from mystic.models import rosen from mystic.monitors import VerboseMonitor from mystic.solvers import DifferentialEvolutionSolver solver = DifferentialEvolutionSolver(3,40) solver.SetRandomInitialPoints([-10,-10,-10],[10,10,10]) solver.SetGenerationMonitor(VerboseMonitor(10)) solver.SetTermination(stop) solver.SetObjective(rosen) solver.SetStrictRanges([-10,-10,-10],[10,10,10]) solver.SetEvaluationLimits(generations=600) solver.Solve() print(solver.bestSolution) ``` * Solver population ``` from mystic.solvers import DifferentialEvolutionSolver from mystic.math import Distribution import numpy as np import pylab # build a mystic distribution instance dist = Distribution(np.random.normal, 5, 1) # use the distribution instance as the initial population solver = DifferentialEvolutionSolver(3,20) solver.SetSampledInitialPoints(dist) # visualize the initial population pylab.hist(np.array(solver.population).ravel()) pylab.show() ``` **EXERCISE:** Use `mystic` to find the minimum for the `peaks` test function, with the bound specified by the `mystic.models.peaks` documentation. **EXERCISE:** Use `mystic` to do a fit to the noisy data in the `scipy.optimize.curve_fit` example (the least squares fit). Constraints "operators" (i.e. kernel transformations) PENALTY: $\psi(x) = f(x) + k*p(x)$ CONSTRAINT: $\psi(x) = f(c(x)) = f(x')$ ``` from mystic.constraints import * from mystic.penalty import quadratic_equality from mystic.coupler import inner from mystic.math import almostEqual from mystic.tools import random_seed random_seed(213) def test_penalize(): from mystic.math.measures import mean, spread def mean_constraint(x, target): return mean(x) - target def range_constraint(x, target): return spread(x) - target @quadratic_equality(condition=range_constraint, kwds={'target':5.0}) @quadratic_equality(condition=mean_constraint, kwds={'target':5.0}) def penalty(x): return 0.0 def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin from numpy import array x = array([1,2,3,4,5]) y = fmin(cost, x, penalty=penalty, disp=False) assert round(mean(y)) == 5.0 assert round(spread(y)) == 5.0 assert round(cost(y)) == 4*(5.0) def test_solve(): from mystic.math.measures import mean def mean_constraint(x, target): return mean(x) - target def parameter_constraint(x): return x[-1] - x[0] @quadratic_equality(condition=mean_constraint, kwds={'target':5.0}) @quadratic_equality(condition=parameter_constraint) def penalty(x): return 0.0 x = solve(penalty, guess=[2,3,1]) assert round(mean_constraint(x, 5.0)) == 0.0 assert round(parameter_constraint(x)) == 0.0 assert issolution(penalty, x) def test_solve_constraint(): from mystic.math.measures import mean @with_mean(1.0) def constraint(x): x[-1] = x[0] return x x = solve(constraint, guess=[2,3,1]) assert almostEqual(mean(x), 1.0, tol=1e-15) assert x[-1] == x[0] assert issolution(constraint, x) def test_as_constraint(): from mystic.math.measures import mean, spread def mean_constraint(x, target): return mean(x) - target def range_constraint(x, target): return spread(x) - target @quadratic_equality(condition=range_constraint, kwds={'target':5.0}) @quadratic_equality(condition=mean_constraint, kwds={'target':5.0}) def penalty(x): return 0.0 ndim = 3 constraints = as_constraint(penalty, solver='fmin') #XXX: this is expensive to evaluate, as there are nested optimizations from numpy import arange x = arange(ndim) _x = constraints(x) assert round(mean(_x)) == 5.0 assert round(spread(_x)) == 5.0 assert round(penalty(_x)) == 0.0 def cost(x): return abs(sum(x) - 5.0) npop = ndim*3 from mystic.solvers import diffev y = diffev(cost, x, npop, constraints=constraints, disp=False, gtol=10) assert round(mean(y)) == 5.0 assert round(spread(y)) == 5.0 assert round(cost(y)) == 5.0*(ndim-1) def test_as_penalty(): from mystic.math.measures import mean, spread @with_spread(5.0) @with_mean(5.0) def constraint(x): return x penalty = as_penalty(constraint) from numpy import array x = array([1,2,3,4,5]) def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin y = fmin(cost, x, penalty=penalty, disp=False) assert round(mean(y)) == 5.0 assert round(spread(y)) == 5.0 assert round(cost(y)) == 4*(5.0) def test_with_penalty(): from mystic.math.measures import mean, spread @with_penalty(quadratic_equality, kwds={'target':5.0}) def penalty(x, target): return mean(x) - target def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin from numpy import array x = array([1,2,3,4,5]) y = fmin(cost, x, penalty=penalty, disp=False) assert round(mean(y)) == 5.0 assert round(cost(y)) == 4*(5.0) def test_with_mean(): from mystic.math.measures import mean, impose_mean @with_mean(5.0) def mean_of_squared(x): return [i**2 for i in x] from numpy import array x = array([1,2,3,4,5]) y = impose_mean(5, [i**2 for i in x]) assert mean(y) == 5.0 assert mean_of_squared(x) == y def test_with_mean_spread(): from mystic.math.measures import mean, spread, impose_mean, impose_spread @with_spread(50.0) @with_mean(5.0) def constrained_squared(x): return [i**2 for i in x] from numpy import array x = array([1,2,3,4,5]) y = impose_spread(50.0, impose_mean(5.0,[i**2 for i in x])) assert almostEqual(mean(y), 5.0, tol=1e-15) assert almostEqual(spread(y), 50.0, tol=1e-15) assert constrained_squared(x) == y def test_constrained_solve(): from mystic.math.measures import mean, spread @with_spread(5.0) @with_mean(5.0) def constraints(x): return x def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin_powell from numpy import array x = array([1,2,3,4,5]) y = fmin_powell(cost, x, constraints=constraints, disp=False) assert almostEqual(mean(y), 5.0, tol=1e-15) assert almostEqual(spread(y), 5.0, tol=1e-15) assert almostEqual(cost(y), 4*(5.0), tol=1e-6) if __name__ == '__main__': test_penalize() test_solve() test_solve_constraint() test_as_constraint() test_as_penalty() test_with_penalty() test_with_mean() test_with_mean_spread() test_constrained_solve() from mystic.coupler import and_, or_, not_ from mystic.constraints import and_ as _and, or_ as _or, not_ as _not if __name__ == '__main__': import numpy as np from mystic.penalty import linear_equality, quadratic_equality from mystic.constraints import as_constraint x = x1,x2,x3 = (5., 5., 1.) f = f1,f2,f3 = (np.sum, np.prod, np.average) k = 100 solver = 'fmin_powell' #'diffev' ptype = quadratic_equality # case #1: couple penalties into a single constraint p1 = lambda x: abs(x1 - f1(x)) p2 = lambda x: abs(x2 - f2(x)) p3 = lambda x: abs(x3 - f3(x)) p = (p1,p2,p3) p = [ptype(pi)(lambda x:0.) for pi in p] penalty = and_(*p, k=k) constraint = as_constraint(penalty, solver=solver) x = [1,2,3,4,5] x_ = constraint(x) assert round(f1(x_)) == round(x1) assert round(f2(x_)) == round(x2) assert round(f3(x_)) == round(x3) # case #2: couple constraints into a single constraint from mystic.math.measures import impose_product, impose_sum, impose_mean from mystic.constraints import as_penalty from mystic import random_seed random_seed(123) t = t1,t2,t3 = (impose_sum, impose_product, impose_mean) c1 = lambda x: t1(x1, x) c2 = lambda x: t2(x2, x) c3 = lambda x: t3(x3, x) c = (c1,c2,c3) k=1 solver = 'buckshot' #'diffev' ptype = linear_equality #quadratic_equality p = [as_penalty(ci, ptype) for ci in c] penalty = and_(*p, k=k) constraint = as_constraint(penalty, solver=solver) x = [1,2,3,4,5] x_ = constraint(x) assert round(f1(x_)) == round(x1) assert round(f2(x_)) == round(x2) assert round(f3(x_)) == round(x3) # etc: more coupling of constraints from mystic.constraints import with_mean, discrete @with_mean(5.0) def meanie(x): return x @discrete(list(range(11))) def integers(x): return x c = _and(integers, meanie) x = c([1,2,3]) assert x == integers(x) == meanie(x) x = c([9,2,3]) assert x == integers(x) == meanie(x) x = c([0,-2,3]) assert x == integers(x) == meanie(x) x = c([9,-200,344]) assert x == integers(x) == meanie(x) c = _or(meanie, integers) x = c([1.1234, 4.23412, -9]) assert x == meanie(x) and x != integers(x) x = c([7.0, 10.0, 0.0]) assert x == integers(x) and x != meanie(x) x = c([6.0, 9.0, 0.0]) assert x == integers(x) == meanie(x) x = c([3,4,5]) assert x == integers(x) and x != meanie(x) x = c([3,4,5.5]) assert x == meanie(x) and x != integers(x) c = _not(integers) x = c([1,2,3]) assert x != integers(x) and x != [1,2,3] and x == c(x) x = c([1.1,2,3]) assert x != integers(x) and x == [1.1,2,3] and x == c(x) c = _not(meanie) x = c([1,2,3]) assert x != meanie(x) and x == [1,2,3] and x == c(x) x = c([4,5,6]) assert x != meanie(x) and x != [4,5,6] and x == c(x) c = _not(_and(meanie, integers)) x = c([4,5,6]) assert x != meanie(x) and x != integers(x) and x != [4,5,6] and x == c(x) # etc: more coupling of penalties from mystic.penalty import quadratic_inequality p1 = lambda x: sum(x) - 5 p2 = lambda x: min(i**2 for i in x) p = p1,p2 p = [quadratic_inequality(pi)(lambda x:0.) for pi in p] p1,p2 = p penalty = and_(*p) x = [[1,2],[-2,-1],[5,-5]] for xi in x: assert p1(xi) + p2(xi) == penalty(xi) penalty = or_(*p) for xi in x: assert min(p1(xi),p2(xi)) == penalty(xi) penalty = not_(p1) for xi in x: assert bool(p1(xi)) != bool(penalty(xi)) penalty = not_(p2) for xi in x: assert bool(p2(xi)) != bool(penalty(xi)) ``` In addition to being able to generically apply information as a penalty, `mystic` provides the ability to construct constraints "operators" -- essentially applying kernel transformations that reduce optimizer search space to the space of solutions that satisfy the constraints. This can greatly accelerate convergence to a solution, as the space that the optimizer can explore is restricted. ``` """ Example: - Minimize Rosenbrock's Function with Powell's method. Demonstrates: - standard models - minimal solver interface - parameter constraints solver and constraints factory decorator - statistical parameter constraints - customized monitors """ # Powell's Directonal solver from mystic.solvers import fmin_powell # Rosenbrock function from mystic.models import rosen # tools from mystic.monitors import VerboseMonitor from mystic.math.measures import mean, impose_mean if __name__ == '__main__': print("Powell's Method") print("===============") # initial guess x0 = [0.8,1.2,0.7] # use the mean constraints factory decorator from mystic.constraints import with_mean # define constraints function @with_mean(1.0) def constraints(x): # constrain the last x_i to be the same value as the first x_i x[-1] = x[0] return x # configure monitor stepmon = VerboseMonitor(1) # use Powell's method to minimize the Rosenbrock function solution = fmin_powell(rosen, x0, constraints=constraints, itermon=stepmon) print(solution) ``` * Range (i.e. 'box') constraints Use `solver.SetStrictRange`, or the `bounds` keyword on the solver function interface. * Symbolic constraints interface ``` %%file spring.py "a Tension-Compression String" def objective(x): x0,x1,x2 = x return x0**2 * x1 * (x2 + 2) bounds = [(0,100)]*3 # with penalty='penalty' applied, solution is: xs = [0.05168906, 0.35671773, 11.28896619] ys = 0.01266523 from mystic.symbolic import generate_constraint, generate_solvers, solve from mystic.symbolic import generate_penalty, generate_conditions equations = """ 1.0 - (x1**3 * x2)/(71785*x0**4) <= 0.0 (4*x1**2 - x0*x1)/(12566*x0**3 * (x1 - x0)) + 1./(5108*x0**2) - 1.0 <= 0.0 1.0 - 140.45*x0/(x2 * x1**2) <= 0.0 (x0 + x1)/1.5 - 1.0 <= 0.0 """ pf = generate_penalty(generate_conditions(equations), k=1e12) if __name__ == '__main__': from mystic.solvers import diffev2 result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=40, gtol=500, disp=True, full_output=True) print(result[0]) equations = """ 1.0 - (x1**3 * x2)/(71785*x0**4) <= 0.0 (4*x1**2 - x0*x1)/(12566*x0**3 * (x1 - x0)) + 1./(5108*x0**2) - 1.0 <= 0.0 1.0 - 140.45*x0/(x2 * x1**2) <= 0.0 (x0 + x1)/1.5 - 1.0 <= 0.0 """ from mystic.symbolic import generate_constraint, generate_solvers, solve from mystic.symbolic import generate_penalty, generate_conditions ineql, eql = generate_conditions(equations) print("CONVERTED SYMBOLIC TO SINGLE CONSTRAINTS FUNCTIONS") print(ineql) print(eql) print("\nTHE INDIVIDUAL INEQUALITIES") for f in ineql: print(f.__doc__) print("\nGENERATED THE PENALTY FUNCTION FOR ALL CONSTRAINTS") pf = generate_penalty((ineql, eql)) print(pf.__doc__) x = [-0.1, 0.5, 11.0] print("\nPENALTY FOR {}: {}".format(x, pf(x))) ``` * Penatly functions ``` equations = """ 1.0 - (x1**3 * x2)/(71785*x0**4) <= 0.0 (4*x1**2 - x0*x1)/(12566*x0**3 * (x1 - x0)) + 1./(5108*x0**2) - 1.0 <= 0.0 1.0 - 140.45*x0/(x2 * x1**2) <= 0.0 (x0 + x1)/1.5 - 1.0 <= 0.0 """ "a Tension-Compression String" from spring import objective, bounds, xs, ys from mystic.penalty import quadratic_inequality def penalty1(x): # <= 0.0 return 1.0 - (x[1]**3 * x[2])/(71785*x[0]**4) def penalty2(x): # <= 0.0 return (4*x[1]**2 - x[0]*x[1])/(12566*x[0]**3 * (x[1] - x[0])) + 1./(5108*x[0]**2) - 1.0 def penalty3(x): # <= 0.0 return 1.0 - 140.45*x[0]/(x[2] * x[1]**2) def penalty4(x): # <= 0.0 return (x[0] + x[1])/1.5 - 1.0 @quadratic_inequality(penalty1, k=1e12) @quadratic_inequality(penalty2, k=1e12) @quadratic_inequality(penalty3, k=1e12) @quadratic_inequality(penalty4, k=1e12) def penalty(x): return 0.0 if __name__ == '__main__': from mystic.solvers import diffev2 result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, npop=40, gtol=500, disp=True, full_output=True) print(result[0]) ``` * "Operators" that directly constrain search space ``` """ Crypto problem in Google CP Solver. Prolog benchmark problem ''' Name : crypto.pl Original Source: P. Van Hentenryck's book Adapted by : Daniel Diaz - INRIA France Date : September 1992 ''' """ def objective(x): return 0.0 nletters = 26 bounds = [(1,nletters)]*nletters # with penalty='penalty' applied, solution is: # A B C D E F G H I J K L M N O P Q xs = [ 5, 13, 9, 16, 20, 4, 24, 21, 25, 17, 23, 2, 8, 12, 10, 19, 7, \ # R S T U V W X Y Z 11, 15, 3, 1, 26, 6, 22, 14, 18] ys = 0.0 # constraints equations = """ B + A + L + L + E + T - 45 == 0 C + E + L + L + O - 43 == 0 C + O + N + C + E + R + T - 74 == 0 F + L + U + T + E - 30 == 0 F + U + G + U + E - 50 == 0 G + L + E + E - 66 == 0 J + A + Z + Z - 58 == 0 L + Y + R + E - 47 == 0 O + B + O + E - 53 == 0 O + P + E + R + A - 65 == 0 P + O + L + K + A - 59 == 0 Q + U + A + R + T + E + T - 50 == 0 S + A + X + O + P + H + O + N + E - 134 == 0 S + C + A + L + E - 51 == 0 S + O + L + O - 37 == 0 S + O + N + G - 61 == 0 S + O + P + R + A + N + O - 82 == 0 T + H + E + M + E - 72 == 0 V + I + O + L + I + N - 100 == 0 W + A + L + T + Z - 34 == 0 """ var = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') # Let's say we know the vowels. bounds[0] = (5,5) # A bounds[4] = (20,20) # E bounds[8] = (25,25) # I bounds[14] = (10,10) # O bounds[20] = (1,1) # U from mystic.constraints import unique, near_integers, has_unique from mystic.symbolic import generate_penalty, generate_conditions pf = generate_penalty(generate_conditions(equations,var),k=1) from mystic.penalty import quadratic_equality @quadratic_equality(near_integers) @quadratic_equality(has_unique) def penalty(x): return pf(x) from numpy import round, hstack, clip def constraint(x): x = round(x).astype(int) # force round and convert type to int x = clip(x, 1,nletters) #XXX: hack to impose bounds x = unique(x, range(1,nletters+1)) return x if __name__ == '__main__': from mystic.solvers import diffev2 from mystic.monitors import Monitor, VerboseMonitor mon = VerboseMonitor(50) result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, constraints=constraint, npop=52, ftol=1e-8, gtol=1000, disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon) print(result[0]) ``` Special cases * Integer and mixed integer programming ``` """ Eq 10 in Google CP Solver. Standard benchmark problem. """ def objective(x): return 0.0 bounds = [(0,10)]*7 # with penalty='penalty' applied, solution is: xs = [6., 0., 8., 4., 9., 3., 9.] ys = 0.0 # constraints equations = """ 98527*x0 + 34588*x1 + 5872*x2 + 59422*x4 + 65159*x6 - 1547604 - 30704*x3 - 29649*x5 == 0.0 98957*x1 + 83634*x2 + 69966*x3 + 62038*x4 + 37164*x5 + 85413*x6 - 1823553 - 93989*x0 == 0.0 900032 + 10949*x0 + 77761*x1 + 67052*x4 - 80197*x2 - 61944*x3 - 92964*x5 - 44550*x6 == 0.0 73947*x0 + 84391*x2 + 81310*x4 - 1164380 - 96253*x1 - 44247*x3 - 70582*x5 - 33054*x6 == 0.0 13057*x2 + 42253*x3 + 77527*x4 + 96552*x6 - 1185471 - 60152*x0 - 21103*x1 - 97932*x5 == 0.0 1394152 + 66920*x0 + 55679*x3 - 64234*x1 - 65337*x2 - 45581*x4 - 67707*x5 - 98038*x6 == 0.0 68550*x0 + 27886*x1 + 31716*x2 + 73597*x3 + 38835*x6 - 279091 - 88963*x4 - 76391*x5 == 0.0 76132*x1 + 71860*x2 + 22770*x3 + 68211*x4 + 78587*x5 - 480923 - 48224*x0 - 82817*x6 == 0.0 519878 + 94198*x1 + 87234*x2 + 37498*x3 - 71583*x0 - 25728*x4 - 25495*x5 - 70023*x6 == 0.0 361921 + 78693*x0 + 38592*x4 + 38478*x5 - 94129*x1 - 43188*x2 - 82528*x3 - 69025*x6 == 0.0 """ from mystic.symbolic import generate_constraint, generate_solvers, solve cf = generate_constraint(generate_solvers(solve(equations))) if __name__ == '__main__': from mystic.solvers import diffev2 result = diffev2(objective, x0=bounds, bounds=bounds, constraints=cf, npop=4, gtol=1, disp=True, full_output=True) print(result[0]) ``` **EXERCISE:** Solve the `chebyshev8.cost` example exactly, by applying the knowledge that the last term in the chebyshev polynomial will always be be one. Use `numpy.round` or `mystic.constraints.integers` or to constrain solutions to the set of integers. Does using `mystic.suppressed` to supress small numbers accelerate the solution? **EXERCISE:** Replace the symbolic constraints in the following "Pressure Vessel Design" code with explicit penalty functions (i.e. use a compound penalty built with `mystic.penalty.quadratic_inequality`). ``` "Pressure Vessel Design" def objective(x): x0,x1,x2,x3 = x return 0.6224*x0*x2*x3 + 1.7781*x1*x2**2 + 3.1661*x0**2*x3 + 19.84*x0**2*x2 bounds = [(0,1e6)]*4 # with penalty='penalty' applied, solution is: xs = [0.72759093, 0.35964857, 37.69901188, 240.0] ys = 5804.3762083 from mystic.symbolic import generate_constraint, generate_solvers, solve from mystic.symbolic import generate_penalty, generate_conditions equations = """ -x0 + 0.0193*x2 <= 0.0 -x1 + 0.00954*x2 <= 0.0 -pi*x2**2*x3 - (4/3.)*pi*x2**3 + 1296000.0 <= 0.0 x3 - 240.0 <= 0.0 """ pf = generate_penalty(generate_conditions(equations), k=1e12) if __name__ == '__main__': from mystic.solvers import diffev2 from mystic.math import almostEqual result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=40, gtol=500, disp=True, full_output=True) print(result[0]) ``` * Linear and quadratic constraints ``` """ Minimize: f = 2*x[0] + 1*x[1] Subject to: -1*x[0] + 1*x[1] <= 1 1*x[0] + 1*x[1] >= 2 1*x[1] >= 0 1*x[0] - 2*x[1] <= 4 where: -inf <= x[0] <= inf """ def objective(x): x0,x1 = x return 2*x0 + x1 equations = """ -x0 + x1 - 1.0 <= 0.0 -x0 - x1 + 2.0 <= 0.0 x0 - 2*x1 - 4.0 <= 0.0 """ bounds = [(None, None),(0.0, None)] # with penalty='penalty' applied, solution is: xs = [0.5, 1.5] ys = 2.5 from mystic.symbolic import generate_conditions, generate_penalty pf = generate_penalty(generate_conditions(equations), k=1e3) from mystic.symbolic import generate_constraint, generate_solvers, simplify cf = generate_constraint(generate_solvers(simplify(equations))) if __name__ == '__main__': from mystic.solvers import fmin_powell from mystic.math import almostEqual result = fmin_powell(objective, x0=[0.0,0.0], bounds=bounds, constraint=cf, penalty=pf, disp=True, full_output=True, gtol=3) print(result[0]) ``` **EXERCISE:** Solve the `cvxopt` "qp" example with `mystic`. Use symbolic constaints, penalty functions, or constraints operators. If you get it quickly, do all three methods. Let's look at how `mystic` gives improved [solver workflow](workflow.ipynb)
github_jupyter
# libraries ``` import sys import os import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np import en_core_web_sm from spacy.matcher import Matcher %matplotlib inline # to import Database class from data_collection folder module_path = os.path.abspath(os.path.join('../..')+'/data/data_collection') if module_path not in sys.path: sys.path.append(module_path) # now that the folder is in the path, ../data_collection/database.py can be imported from storage_managers.database import Database ``` # Import reviews data and target feature ``` db = Database() # get halal-reviews (reviews that include the word 'halal') reviews_sql = '''SELECT * FROM reviews''' reviews_df = db.select_df(reviews_sql) print('- {} reviews containing the word halal were scraped'.format(reviews_df.shape[0])) # get target restaurants-of-interest list file_path = '/Users/wesamazaizeh/Desktop/Projects/halal_o_meter/src/features/target_feature/label_target.csv' target_df = pd.read_csv(file_path, index_col=0) target_df['halal'] = target_df['halal'].str.replace('FLASE', 'FALSE') target_df['halal'] = target_df['halal'].apply(lambda x: True if x =='TRUE' else False) halal_frac = target_df['halal'].sum()/target_df.shape[0] print('- {:.0f}% of the {} restaurants-of-interest are halal'.format(halal_frac*100, target_df.shape[0])) ``` # Feature Engineering ## 1. 'halal' in business name ``` # patch missing platform_ids and mismatch in target data # import original businesses data rest_sql = '''SELECT * FROM businesses WHERE url LIKE '%yelp%' ''' rest_df = db.select_df(rest_sql) # drop Aya Kitchen aya_id = 'y6BfLt9Gvrq2JsJvjkjdIQ' reviews_df.drop(reviews_df[reviews_df['restaurant_id'] == aya_id].index, inplace=True) # patch platform_id in target_df target_df = target_df.merge(rest_df[['platform_id', 'url']], how='left', on='url') target_df.drop('platform_id_x', inplace=True, axis=1) target_df = target_df.rename(columns={'platform_id_y' : 'platform_id'}) # group reviews per restaurant grouped_reviews_df = reviews_df.groupby('restaurant_id').agg(lambda x: ' '.join(x)) # combine review text grouped_reviews_df['review_date'] = grouped_reviews_df['review_date'].apply(lambda x: x.split()) # make dates list grouped_reviews_df['review_count'] = grouped_reviews_df['review_date'].apply(lambda x: len(x)) # count reviews per restaurnat grouped_reviews_df.head() # merge restaurant name grouped_reviews_df = grouped_reviews_df.merge(target_df[['platform_id', 'name', 'total_review_count', 'halal']], how='left', left_index=True, right_on='platform_id') grouped_reviews_df.index = grouped_reviews_df['platform_id'] grouped_reviews_df.drop('platform_id', inplace=True, axis=1) grouped_reviews_df = grouped_reviews_df.rename(columns={'name': 'restaurant_name', 'review_count' : 'halal_review_count'}) # add boolean column for 'halal' in rest_name grouped_reviews_df['halal_in_name'] = grouped_reviews_df.apply(lambda row: True if 'halal' in row['restaurant_name'].lower() else False, axis=1) plt.figure(figsize=(14,8)) g = sns.countplot(x='halal', hue='halal_in_name', data=grouped_reviews_df) g.set_title('Distribution of halal_in_name feature', size=14) g_labels = g.set_xticklabels(g.get_xticklabels(), rotation=45, horizontalalignment='right') for p in g.patches: g.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.3, p.get_height()), ha='center', va='bottom', color= 'black') ``` ## 2. Percentage of reviews including the word halal out of all reviews ``` # calculate percentage of halal-containing reviews out of total reviews grouped_reviews_df['halal_review_percent'] = grouped_reviews_df.apply(lambda row: row['halal_review_count']/row['total_review_count'], axis =1) # plot distribution for halal vs non-halal restaurants halal_target = grouped_reviews_df[grouped_reviews_df['halal']]['halal_review_percent'] non_halal_target = grouped_reviews_df[~grouped_reviews_df['halal']]['halal_review_percent'] fig = plt.figure(figsize=(14,8)) g1 = sns.distplot(halal_target*100, kde=False) g2 = sns.distplot(non_halal_target*100, kde=False) g1.set_title('Distribution of the percent of halal-reviews out of all reviews for restaurant-of-interest', size=14) g1.set_xlabel('% of halal review', size=14) g1.set_ylabel('Count', size=14) plt.legend(labels=['Halal restaurants','Non-halal restaurants'], prop={'size': 10}) plt.show() grouped_reviews_df[(grouped_reviews_df['halal_review_percent'] > 0.9) & (~grouped_reviews_df['halal'])] ``` - The higher percentage of halal-related reviews in non-halal restaurants is probably related to food carts or restaurants with few reviews to start with and they have comparison to halal-guys or halal street cuisine ## 3. Spacy nlp to find text patterns ### 3.1. tokenize and match pattern [lemma=be, lower=halal] ``` # # run nlp of grouped review text and save Doc to dataframe # nlp = en_core_web_sm.load() # grouped_reviews_df['doc'] = grouped_reviews_df['review_text'].apply(lambda x: nlp(x)) # initialize matcher matcher = Matcher(nlp.vocab) # specify match pattern pattern = [{'LEMMA': 'be'},{'LOWER': 'halal'}] matcher.add('be_halal', None, pattern) # match and print the first few setences c=10 for i, doc in grouped_reviews_df['doc'].iteritems(): match = matcher(doc) count = len(match) grouped_reviews_df.loc[i, 'be_halal_count'] = count grouped_reviews_df.loc[i, 'be_halal'] = True if count>0 else False if c>0: for match_id, start, end in match: print(doc[start:end].sent) c -= 1 # plot distribution of number of matches found for halal vs. non-halal restaurants halal_target = grouped_reviews_df[grouped_reviews_df['halal']]['be_halal_count'] non_halal_target = grouped_reviews_df[~grouped_reviews_df['halal']]['be_halal_count'] fig = plt.figure(figsize=(14,8)) g1 = sns.distplot(halal_target, kde=False, norm_hist=True,) g2 = sns.distplot(non_halal_target, kde=False, norm_hist=True,) g1.set_title('Distribution of the count of is halal in reviews', size=14) g1.set_xlabel('Count of is_halal mention', size=14) plt.legend(labels=['Halal restaurants','Non-halal restaurants'], prop={'size': 10}) plt.show() # barplot of categorical be_halal plt.figure(figsize=(14,8)) g = sns.countplot(x='halal', hue='be_halal', data=grouped_reviews_df) g.set_title('Distribution of is halal mention in reviews', size=14) g_labels = g.set_xticklabels(g.get_xticklabels(), rotation=45, horizontalalignment='right') for p in g.patches: g.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.3, p.get_height()), ha='center', va='bottom', color= 'black') ``` ### 3.2. Match pattern 'halal guys/ truck' and count how many it occurs out of all the mentiones of halal ``` # initialize matcher matcher = Matcher(nlp.vocab) # find all incidences of halal pattern = [{'LOWER': 'halal'}] matcher.add('halal', None, pattern) for i, doc in grouped_reviews_df['doc'].iteritems(): match = matcher(doc) grouped_reviews_df.loc[i, 'halal_match_count'] = len(match) # reinitialize matcher matcher = Matcher(nlp.vocab) # match 'halal guys' pattern1 = [{'LOWER': 'halal'}, {'LOWER': 'guys'}] pattern2 = [{'LOWER': 'halal'}, {'LOWER': 'truck'}] pattern3 = [{'LOWER': 'halal'}, {'LOWER': 'trucks'}] matcher.add('halal_guys', None, pattern1) matcher.add('halal_guys', None, pattern2) matcher.add('halal_guys', None, pattern3) # match and print the first few setences c=10 for i, doc in grouped_reviews_df['doc'].iteritems(): match = matcher(doc) grouped_reviews_df.loc[i, 'halal_guys_count'] = len(match) if c>0: for match_id, start, end in match: print(doc[start:end].sent) c -= 1 grouped_reviews_df['halal_guys_percent'] = grouped_reviews_df.apply(lambda row: row['halal_guys_count'] / row['halal_match_count'], axis=1) # plot distribution of 'halal guys' mention out of all 'halal' mentions for halal vs. non-halal restaurants halal_target = grouped_reviews_df[grouped_reviews_df['halal']]['halal_guys_percent'] non_halal_target = grouped_reviews_df[~grouped_reviews_df['halal']]['halal_guys_percent'] fig = plt.figure(figsize=(14,8)) g1 = sns.distplot(halal_target*100, kde=False, norm_hist=True,) g2 = sns.distplot(non_halal_target*100, kde=False, norm_hist=True,) g1.set_title('Distribution of the halal guys/halal mentions out of all reviews for restaurant-of-interest', size=14) g1.set_xlabel('% of halal guys mentios out of all mentions of halal', size=14) g1.set_ylabel('Count', size=14) plt.legend(labels=['Halal restaurants','Non-halal restaurants'], prop={'size': 10}) plt.show() ``` ### 3.3. mention of halal chicken vs. halal burger ``` # initialize matcher matcher = Matcher(nlp.vocab) # contains halal chicken pattern1 = [{'LOWER': 'halal'}, {'LOWER': 'chicken'}] matcher.add('chicken', None, pattern1) for i, doc in grouped_reviews_df['doc'].iteritems(): match = matcher(doc) grouped_reviews_df.loc[i, 'chicken'] = True if len(match) else False # initialize matcher matcher = Matcher(nlp.vocab) # contains halal burger pattern2 = [{'LOWER': 'halal'}, {'LOWER': 'burger'}] matcher.add('burger', None, pattern2) for i, doc in grouped_reviews_df['doc'].iteritems(): match = matcher(doc) grouped_reviews_df.loc[i, 'burger'] = True if len(match) else False # initialize matcher matcher = Matcher(nlp.vocab) # contains CreekStone # a company with halal certified steaks and beef but these products are often cooked on the # grill alongside non-halal ingerdients pattern2 = [{'LOWER': 'creekstone'}] matcher.add('creekstone', None, pattern2) for i, doc in grouped_reviews_df['doc'].iteritems(): match = matcher(doc) grouped_reviews_df.loc[i, 'creekstone'] = True if len(match) else False # barplot of chicken, burger and CreekStone fig, axes = plt.subplots(ncols=3, nrows=1, figsize=(14,10)) columns = ['chicken', 'burger', 'creekstone'] for col, ax in zip(columns, axes.flat): g = sns.countplot(x='halal', hue=col, data=grouped_reviews_df, ax=ax) g_labels = g.set_xticklabels(g.get_xticklabels(), rotation=45, horizontalalignment='right') for p in g.patches: g.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.3, p.get_height()), ha='center', va='bottom', color= 'black') ax.set_xlabel('halal', size=14) ax.set_xticklabels(labels=ax.get_xticks(), rotation=45, ha='right') plt.tight_layout() plt.show() ``` - When combined with halal chicken and burger do not provide significant information. - Consider trying more combinations with 'chicken', such as 'chicken over rice' or chicken as DEP=compound noun. - CreekStone has 2:1 chance of being mentioned in non-halal vs. halal restaurant. ### 3.4. Mention of '100% halal' ``` # initialize matcher matcher = Matcher(nlp.vocab) # contains halal chicken pattern3 = [{'IS_DIGIT': True, 'LOWER': '100'}, {'IS_PUNCT': True}, {'LOWER': 'halal'}] matcher.add('100%', None, pattern3) for i, doc in grouped_reviews_df['doc'].iteritems(): match = matcher(doc) grouped_reviews_df.loc[i, '100%'] = True if len(match) else False plt.figure(figsize=(14,8)) g = sns.countplot(x='halal', hue='100%', data=grouped_reviews_df) g.set_title('Mention of 100% halal', size=14) g_labels = g.set_xticklabels(g.get_xticklabels(), rotation=45, horizontalalignment='right') for p in g.patches: g.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.3, p.get_height()), ha='center', va='bottom', color= 'black') ``` ### 3.5. Mention of non-halal terms ## 4. Examine the term frequency in the labeling notes ``` import spacy.attrs halal_dicts = [] non_halal_dicts = [] # for i, note in target_df['note_1'].iteritems(): # if note: # doc = nlp(str(note)) # counts_dict = doc.count_by(spacy.attrs.IDS['LEMMA']) # halal_dicts.append(counts_dict) doc=nlp(target_df['note_1'].iloc[704].lower()) counts_dict = doc.count_by(spacy.attrs.IDS['LEMMA']) # Print the human readable part of speech tags for lemma, count in counts_dict.items(): human_readable_tag = doc.vocab[lemma].text print(human_readable_tag,lemma, count) doc=nlp(target_df['note_1'].iloc[705].lower()) counts_dict = doc.count_by(spacy.attrs.IDS['LEMMA']) # Print the human readable part of speech tags for lemma, count in counts_dict.items(): human_readable_tag = doc.vocab[lemma].text print(human_readable_tag,lemma, count) ```
github_jupyter
``` import pandas as pd import numpy as np import biogeme.database as db import biogeme.biogeme as bio import biogeme.models as models import biogeme.messaging as message from biogeme.expressions import Beta ``` # Intra-zonal trips ## Parameter estimation Assignment of inner-zonal trips is not possible with common methods of transport modelling. A Logit regression based on zonal attributes is required. ``` input_path = '../input/' output_path = '../output/' model_path = '../model/' ``` ### Model formulation The Logit regression model consists of observable utility functions, one for each mode j: > V_ij = ASC_ij + b_ac_i * AC_j + b_pop_i * POP + b_cars_i * CARS With explainatory variables * AC: accessibility as average distance to and from PT stops in km or binary for car usage * POP: population density * CARS: car ownership density * ROADS: road density in km/km² -- not implemented * INCOME: household income -- not implemented Index i marks the demand group. I = {'commuting' (1), 'education' (2), 'shopping/medical' (3), 'official' (4), 'private' (6)} ``` # Load calibration data set df = pd.read_csv(input_path + 'transport_demand/calibration_intra-cellular_trips_MiD2017.csv') print(df.shape) col_dict = {'mode_model': 'MODE', 'purpose_vp': 'PURPOSE', 'pop_density': 'POP', 'car_density': 'CARS', 'accessibility_rail': 'AC_RAIL', 'accessibility_bus': 'AC_BUS', 'accessibility_car': 'AC_CAR', 'accessibility_walk': 'AC_NM'} df.rename(columns=col_dict, inplace=True) # Remove unused columns df = df[[col for _, col in col_dict.items()]] # Remove trips where mode is car but the car availability is zero # because it irritates the MLE algorithm mask = ((df['MODE']==6) & (df['AC_CAR']==0)) print('Share of car trips dropped: {}. New number of observations is {}'.format( len(df.loc[mask])/len(df.loc[df['MODE']==6]), len(df.loc[~mask]))) df = df.loc[~mask] ``` ### Build the calibration model with Biogeme ``` database = db.Database('MiD', df.copy()) globals().update(database.variables) database.getSampleSize() # Define Betas asc_rail = Beta('asc_rail', 0, None, None, 0) asc_bus = Beta('asc_bus', 0, None, None, 0) asc_car = Beta('asc_car', 0, None, None, 1) asc_nm = Beta('asc_nm', 0, None, None, 0) b_ac_rail = Beta('b_ac_rail', 0, None, None, 0) b_pop_rail = Beta('b_pop_rail', 0, None, None, 0) b_cars_rail = Beta('b_cars_rail', 0, None, None, 0) b_ac_bus = Beta('b_ac_bus', 0, None, None, 0) b_pop_bus = Beta('b_pop_bus', 0, None, None, 0) b_cars_bus = Beta('b_cars_bus', 0, None, None, 0) b_ac_car = Beta('b_ac_car', 0, None, None, 0) b_pop_car = Beta('b_pop_car', 0, None, None, 0) b_cars_car = Beta('b_cars_car', 0, None, None, 0) b_ac_nm = Beta('b_ac_nm', 0, None, None, 0) b_pop_nm = Beta('b_pop_nm', 0, None, None, 0) b_cars_nm = Beta('b_cars_nm', 0, None, None, 0) # Parameter for the nested logit structure mu_pt = Beta('mu_pt', 1, 1, 10, 0) # Utility functions V_RAIL = asc_rail + b_ac_rail * AC_RAIL + b_pop_rail * POP + b_cars_rail * CARS V_BUS = asc_bus + b_ac_bus * AC_BUS + b_pop_bus * POP + b_cars_bus * CARS V_CAR = asc_car + b_ac_car * AC_CAR + b_pop_car * POP + b_cars_car * CARS V_NM = asc_nm + b_ac_nm * AC_NM + b_pop_nm * POP + b_cars_nm * CARS # Define level of verbosity logger = message.bioMessage() #logger.setSilent() logger.setWarning() #logger.setGeneral() #logger.setDetailed() # Map modes to utility functions V = {1:V_RAIL, 2:V_RAIL, 4:V_BUS, 6:V_CAR, 7:V_NM} # Map the availability of alternatives with MODE as key # Except for the car, it is always one av = {1:1, 2:1, 4:1, 6:AC_CAR, 7:1} # Mode nests as tuples with nest name and dictionary where # alternative IDs are mapped to alpha values. Missing ID's alpha is zero nests = ((mu_pt, [1,2, 4]), # PT (1, [6]), # Car (1, [7])) # Non-motorised # Choose the logarithmic nested logit model nl = models.lognested(V, av, nests, MODE) # All purposes model_nl = bio.BIOGEME(database, nl) model_nl.modelName = 'NL' results = model_nl.estimate() # Write results to a file writer = pd.ExcelWriter(input_path + 'estimation_results_inner_cell.xlsx', engine='xlsxwriter') params = results.getEstimatedParameters() for key, val in results.getGeneralStatistics().items(): params.loc[key] = [val[0], val[1]] + ['' for i in range(len(params.columns)-2)] params params.to_excel(writer, sheet_name=model_nl.modelName) # Run all purposes results = [] for p in [1,2,3,4,6]: database = db.Database('MiD2017', df.copy()) database.remove(PURPOSE!=p) print('Sample size for purpose {}: {}'.format(p, database.getSampleSize())) model = bio.BIOGEME(database, nl) # Choose the model formulation model.modelName = 'NL_Fz' + str(p) # Name it results.append(model.estimate()) # Estimation output = results[-1].getEstimatedParameters() # Add results to the Excel file for key, val in results[-1].getGeneralStatistics().items(): output.loc[key] = [val[0], val[1]] + ['' for i in range(len(output.columns)-2)] output.to_excel(writer, sheet_name=model.modelName) writer.save() ```
github_jupyter