code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
import pandas as pd
import numpy as np
import nibabel as nib
import nbimporter
from functions import *
from numpy import *
import matplotlib.pyplot as plt
%matplotlib inline
import os
import subprocess
```
# 1- make structure for files
```
if not os.path.exists('/home/mahdi/Desktop/valid'):
os.makedirs('/home/mahdi/Desktop/valid')
for i in range(1,82):
os.makedirs('/home/mahdi/Desktop/valid/'+str(i))
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/main_seg')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/sct_seg')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/my_seg/first_ref')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/my_seg/zero_ref')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/my_seg/mean_ref')
```
# 2- resize main data to valid folder
```
def resize (file_path: str,output_direction):
if not (file_path.endswith(".nii") or file_path.endswith(".nii.gz")):
raise ValueError(
f"Nifti file path must end with .nii or .nii.gz, got {file_path}."
)
img = nib.load(file_path)
img_data = img.get_fdata()
img_data = img_data[23:87,23:87,:,:]
header=img.header
## edit the header for shape
header['dim'][1:5]=img_data.shape
img_mask_affine = img.affine
img_reshape = nib.Nifti1Image(img_data, affine=img_mask_affine, header=header)
return nib.save(img_reshape,output_direction)
D7_dir='/home/mahdi/Desktop/data_selection_D7'
n,name=count(D7_dir)
out_dir='/home/mahdi/Desktop/valid'
n1,name1=count(out_dir)
name1
name[10]
name1[10]
for i in range(n):
resize(D7_dir+'/'+name[i][0],out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0])
```
# 3- make mean data
```
for j in range(20,81):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/'+name[j][0]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/main_seg/mean.nii'
subprocess.Popen(['sct_maths','-i',data_dir,'-mean','t' ,'-o',out ])
```
# 4- get result from my algo to related folder
### mean ref
```
maximum_intensity=1705
model='200epoch_with_val.h5'
for i in range(40,n):
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/mean.nii'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
```
### zero ref
```
for i in range(n):
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='0'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
i=8
maximum_intensity=1705
model='200epoch_with_val.h5'
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/'+name[i][0][:x]+'_plus_mid.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='101'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
img = nib.load(input_direction)
img.shape
i=8
maximum_intensity=1705
model='200epoch_with_val.h5'
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/'+name[i][0][:x]+'_plus_zero_mean.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/mean.nii'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
name[8][0]
```
### mid ref
```
for i in range(n):
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
y=int(nib.load(input_direction).shape[3]/2)
reference=str(y)
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
i=8
maximum_intensity=1705
model='200epoch_with_val_landa_0.01.h5'
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
y=int(nib.load(input_direction).shape[3]/2)
reference=str(y)
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
```
# 5- resize sct data
```
for j in range(n):
input_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg/fmri_rmvol_moco.nii.gz'
out_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg/fmri_rmvol_moco.nii.gz'
resize(input_dir,out_dir)
```
# 6- make TSNR data
### For main
```
for j in range(20,n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/'+name[j][0]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/main_seg/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
```
### For mine
```
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
```
### For sct
```
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'sct_seg'+'/'+'fmri_rmvol_moco.nii.gz'
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
```
# 7- resize and copy segmentation file
```
def resize_seg (file_path: str,output_direction):
if not (file_path.endswith(".nii") or file_path.endswith(".nii.gz")):
raise ValueError(
f"Nifti file path must end with .nii or .nii.gz, got {file_path}."
)
img = nib.load(file_path)
img_data = img.get_fdata()
img_data = img_data[23:87,23:87,:]
header=img.header
## edit the header for shape
header['dim'][1:4]=img_data.shape
img_mask_affine = img.affine
img_reshape = nib.Nifti1Image(img_data, affine=img_mask_affine, header=header)
return nib.save(img_reshape,output_direction)
```
### resize csf part
```
for i in range(n):
data_dir1=out_dir+'/'+name1[i][0]+'/'+'/'+'mask_seg2_Dil_CSF.nii.gz'
output1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/csf_part.nii'
output3_1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/csf_part.nii'
output3_2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/csf_part.nii'
resize_seg(data_dir1,output1)
resize_seg(data_dir1,output2)
resize_seg(data_dir1,output3_1)
resize_seg(data_dir1,output3_2)
resize_seg(data_dir1,output3_3)
```
### resize spine part
```
for i in range(n):
data_dir2=out_dir+'/'+name1[i][0]+'/'+'/'+'tmp_t2s_seg_reg.nii.gz'
output4='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output5='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/spine_part.nii'
output6_1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/spine_part.nii'
output6_2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/spine_part.nii'
output6_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/spine_part.nii'
resize_seg(data_dir2,output4)
resize_seg(data_dir2,output5)
resize_seg(data_dir2,output6_1)
resize_seg(data_dir2,output6_2)
resize_seg(data_dir2,output6_3)
```
# 8- edit all segmentation
# 9- create mask
### sct mask
```
for i in range(n):
second_1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/tsnr.nii'
input1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input1,'-mul',second_1 ,'-o',output1])
input1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input1,'-mul',second_1 ,'-o',output1])
```
### main mask
```
for i in range(n):
second_2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/tsnr.nii'
input2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input2,'-mul',second_2 ,'-o', output2])
input2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input2,'-mul',second_2 ,'-o', output2])
```
## my mask
### zero_ref
```
for i in range(40,n):
second3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/tsnr.nii'
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
```
### mid_ref
```
for i in range(40,n):
second3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/tsnr.nii'
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
```
### mean_ref
```
for i in range(40,n):
second3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/tsnr.nii'
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
```
# 10-Tsnr mean and compare result
```
csf_main1=[]
csf_sct1 =[]
csf_mine1=[]
csf_mine2=[]
csf_mine3=[]
spine_main1=[]
spine_mine1=[]
spine_mine2=[]
spine_mine3=[]
spine_sct1 =[]
start_time=time.time()
for i in range(n):
output_sct_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_mask.nii'
output_main_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/csf_mask.nii'
output_mine_zeroref_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/csf_mask.nii'
output_mine_midref_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/csf_mask.nii'
output_mine_meanref_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/csf_mask.nii'
output_sct_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_mask.nii'
output_main_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/spine_mask.nii'
output_mine_zeroref_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/spine_mask.nii'
output_mine_midref_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/spine_mask.nii'
output_mine_meanref_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/spine_mask.nii'
csf_main=mean_all(output_main_csf)
csf_main1.append(csf_main)
csf_sct=mean_all(output_sct_csf)
csf_sct1.append(csf_sct)
csf_mine=mean_all(output_mine_zeroref_csf)
csf_mine1.append(csf_mine)
csf_mine_mid=mean_all(output_mine_midref_csf)
csf_mine2.append(csf_mine_mid)
csf_mine_mean=mean_all(output_mine_meanref_csf)
csf_mine3.append(csf_mine_mean)
spine_main=mean_all(output_main_spine)
spine_main1.append(spine_main)
spine_sct=mean_all(output_sct_spine)
spine_sct1.append(spine_sct)
spine_mine=mean_all(output_mine_zeroref_spine)
spine_mine1.append(spine_mine)
spine_mine_mid=mean_all(output_mine_midref_spine)
spine_mine2.append(spine_mine_mid)
spine_mine_mean=mean_all(output_mine_meanref_spine)
spine_mine3.append(spine_mine_mean)
csf_main=np.mean(csf_main1)
csf_sct=np.mean(csf_sct1)
csf_mine_zero=np.mean(csf_mine1)
csf_mine_mid=np.mean(csf_mine2)
csf_mine_mean=np.mean(csf_mine3)
spine_main=np.mean(spine_main1)
spine_sct=np.mean(spine_sct1)
spine_mine_zero=np.mean(spine_mine1)
spine_mine_mid=np.mean(spine_mine2)
spine_mine_mean=np.mean(spine_mine3)
print("--- %s second ---" % (time.time() - start_time))
pd.DataFrame([csf_main,csf_sct,csf_mine_zero,csf_mine_mid,csf_mine_mean,spine_main,spine_sct,spine_mine_zero,spine_mine_mid,spine_mine_mean],
index=['main_csf_tsnr', 'sct_csf_tsnr', 'my_csf_tsnr_first','my_csf_tsnr_mid','my_csf_tsnr_mean',
'main_spine_tsnr','sct_spine_tsnr','my_spine_tsnr_first','my_spine_tsnr_mid','my_spine_tsnr_mean']
)
(4.038673+7.104279)/2
print(p.shape)
64*64
final=np.array([csf_main1 , csf_sct1,csf_mine1,csf_mine2,csf_mine3,
spine_main1,spine_sct1,spine_mine1,spine_mine2,spine_mine3])
pd.DataFrame(final,index=['csf_nomoco','csf_sct','csf_myalgo_zero_ref','csf_myalgo_mid_ref','csf_myalgo_mean_ref','spine_nomoco','spine_sct','spine_myalgo_zero_ref','spine_myalgo_mid_ref','spine_myalgo_mean_ref']).to_csv('/home/mahdi/Desktop/result/tsnr_result.csv')
```
# 11- calculate Dvars
### for main
```
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/'+name[j][0]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/main_seg'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
```
### for sct
```
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'sct_seg'+'/'+'fmri_rmvol_moco.nii.gz'
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
```
### for mine
```
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
```
# 12- Dvars
### main
```
main_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/Dvars'
main_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_main_Dvars=np.mean(main_Dvars)
pd.DataFrame(main_Dvars)
#pd.DataFrame(main_Dvars).to_csv('/home/mahdi/Desktop/result/main_Dvars.csv')
```
### sct
```
sct_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'sct_seg'+'/Dvars'
sct_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_sct_Dvars=np.mean(sct_Dvars)
pd.DataFrame(sct_Dvars)
#pd.DataFrame(sct_Dvars).to_csv('/home/mahdi/Desktop/result/sct_Dvars.csv')
```
## my algo
### zero ref
```
my_zero_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/my_seg/zero_ref/Dvars'
my_zero_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_my_zero_Dvars=np.mean(my_zero_Dvars)
pd.DataFrame(my_zero_Dvars)
#pd.DataFrame(my_zero_Dvars).to_csv('/home/mahdi/Desktop/result/my_zero_Dvars.csv')
```
### mid_ref
```
my_mid_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/my_seg/first_ref/Dvars'
my_mid_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_my_mid_Dvars=np.mean(my_mid_Dvars)
pd.DataFrame(my_mid_Dvars)
#pd.DataFrame(my_mid_Dvars).to_csv('/home/mahdi/Desktop/result/my_mid_Dvars.csv')
```
### mean ref
```
my_mean_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/my_seg/mean_ref/Dvars'
my_mean_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_my_mean_Dvars=np.mean(my_mean_Dvars)
#pd.DataFrame(my_mean_Dvars)
#pd.DataFrame(my_mean_Dvars).to_csv('/home/mahdi/Desktop/result/my_mean_Dvars.csv')
```
## Dvars result
```
pd.DataFrame([mean_main_Dvars,mean_sct_Dvars,mean_my_zero_Dvars,mean_my_mid_Dvars,mean_my_mean_Dvars],
index=['mean_main_Dvars', 'mean_sct_Dvars', 'mean_my_zero_Dvars','mean_my_mid_Dvars','mean_my_mean_Dvars',
])
final_Dvars=np.array([main_Dvars ,sct_Dvars ,my_zero_Dvars ,my_mid_Dvars ,my_mean_Dvars])
final_Dvars=final_Dvars[:,:,0]
pd.DataFrame(final_Dvars,index=['main_Dvars','sct_Dvars','myalgo_zero_Dvars','myalgo_mid_Dvars','myalgo_mean_Dvars']).to_csv('/home/mahdi/Desktop/result/Dvars_result.csv')
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import nibabel as nib
import nbimporter
from functions import *
from numpy import *
import matplotlib.pyplot as plt
%matplotlib inline
import os
import subprocess
if not os.path.exists('/home/mahdi/Desktop/valid'):
os.makedirs('/home/mahdi/Desktop/valid')
for i in range(1,82):
os.makedirs('/home/mahdi/Desktop/valid/'+str(i))
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/main_seg')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/sct_seg')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/my_seg/first_ref')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/my_seg/zero_ref')
os.makedirs('/home/mahdi/Desktop/valid/'+str(i)+'/my_seg/mean_ref')
def resize (file_path: str,output_direction):
if not (file_path.endswith(".nii") or file_path.endswith(".nii.gz")):
raise ValueError(
f"Nifti file path must end with .nii or .nii.gz, got {file_path}."
)
img = nib.load(file_path)
img_data = img.get_fdata()
img_data = img_data[23:87,23:87,:,:]
header=img.header
## edit the header for shape
header['dim'][1:5]=img_data.shape
img_mask_affine = img.affine
img_reshape = nib.Nifti1Image(img_data, affine=img_mask_affine, header=header)
return nib.save(img_reshape,output_direction)
D7_dir='/home/mahdi/Desktop/data_selection_D7'
n,name=count(D7_dir)
out_dir='/home/mahdi/Desktop/valid'
n1,name1=count(out_dir)
name1
name[10]
name1[10]
for i in range(n):
resize(D7_dir+'/'+name[i][0],out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0])
for j in range(20,81):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/'+name[j][0]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/main_seg/mean.nii'
subprocess.Popen(['sct_maths','-i',data_dir,'-mean','t' ,'-o',out ])
maximum_intensity=1705
model='200epoch_with_val.h5'
for i in range(40,n):
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/mean.nii'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
for i in range(n):
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='0'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
i=8
maximum_intensity=1705
model='200epoch_with_val.h5'
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/'+name[i][0][:x]+'_plus_mid.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='101'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
img = nib.load(input_direction)
img.shape
i=8
maximum_intensity=1705
model='200epoch_with_val.h5'
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/'+name[i][0][:x]+'_plus_zero_mean.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
reference='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/mean.nii'
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
name[8][0]
for i in range(n):
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
y=int(nib.load(input_direction).shape[3]/2)
reference=str(y)
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
i=8
maximum_intensity=1705
model='200epoch_with_val_landa_0.01.h5'
x=(len(name[i][0][:])-7)
output_direction_mean='/home/mahdi/Desktop/'+name[i][0][:x]+'_plus.'+name[i][0][-6:]
input_direction=out_dir+'/'+name1[i][0]+'/'+'main_seg'+'/'+name[i][0]
y=int(nib.load(input_direction).shape[3]/2)
reference=str(y)
main(input_direction,reference,output_direction_mean,maximum_intensity,model)
for j in range(n):
input_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg/fmri_rmvol_moco.nii.gz'
out_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg/fmri_rmvol_moco.nii.gz'
resize(input_dir,out_dir)
for j in range(20,n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/'+name[j][0]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/main_seg/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'sct_seg'+'/'+'fmri_rmvol_moco.nii.gz'
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg/tsnr.nii'
subprocess.Popen(['sct_fmri_compute_tsnr','-i',data_dir,'-o',out ])
def resize_seg (file_path: str,output_direction):
if not (file_path.endswith(".nii") or file_path.endswith(".nii.gz")):
raise ValueError(
f"Nifti file path must end with .nii or .nii.gz, got {file_path}."
)
img = nib.load(file_path)
img_data = img.get_fdata()
img_data = img_data[23:87,23:87,:]
header=img.header
## edit the header for shape
header['dim'][1:4]=img_data.shape
img_mask_affine = img.affine
img_reshape = nib.Nifti1Image(img_data, affine=img_mask_affine, header=header)
return nib.save(img_reshape,output_direction)
for i in range(n):
data_dir1=out_dir+'/'+name1[i][0]+'/'+'/'+'mask_seg2_Dil_CSF.nii.gz'
output1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/csf_part.nii'
output3_1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/csf_part.nii'
output3_2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/csf_part.nii'
resize_seg(data_dir1,output1)
resize_seg(data_dir1,output2)
resize_seg(data_dir1,output3_1)
resize_seg(data_dir1,output3_2)
resize_seg(data_dir1,output3_3)
for i in range(n):
data_dir2=out_dir+'/'+name1[i][0]+'/'+'/'+'tmp_t2s_seg_reg.nii.gz'
output4='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output5='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/spine_part.nii'
output6_1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/spine_part.nii'
output6_2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/spine_part.nii'
output6_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/spine_part.nii'
resize_seg(data_dir2,output4)
resize_seg(data_dir2,output5)
resize_seg(data_dir2,output6_1)
resize_seg(data_dir2,output6_2)
resize_seg(data_dir2,output6_3)
for i in range(n):
second_1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/tsnr.nii'
input1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input1,'-mul',second_1 ,'-o',output1])
input1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output1='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input1,'-mul',second_1 ,'-o',output1])
for i in range(n):
second_2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/tsnr.nii'
input2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input2,'-mul',second_2 ,'-o', output2])
input2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output2='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input2,'-mul',second_2 ,'-o', output2])
for i in range(40,n):
second3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/tsnr.nii'
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
for i in range(40,n):
second3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/tsnr.nii'
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
for i in range(40,n):
second3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/tsnr.nii'
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/csf_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
input3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_part.nii'
output3_3='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/spine_mask.nii'
subprocess.Popen(['sct_maths','-i',input3_3,'-mul',second3_3 ,'-o', output3_3])
csf_main1=[]
csf_sct1 =[]
csf_mine1=[]
csf_mine2=[]
csf_mine3=[]
spine_main1=[]
spine_mine1=[]
spine_mine2=[]
spine_mine3=[]
spine_sct1 =[]
start_time=time.time()
for i in range(n):
output_sct_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/csf_mask.nii'
output_main_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/csf_mask.nii'
output_mine_zeroref_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/csf_mask.nii'
output_mine_midref_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/csf_mask.nii'
output_mine_meanref_csf='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/csf_mask.nii'
output_sct_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/sct_seg/spine_mask.nii'
output_main_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/main_seg/spine_mask.nii'
output_mine_zeroref_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/zero_ref/spine_mask.nii'
output_mine_midref_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/first_ref/spine_mask.nii'
output_mine_meanref_spine='/home/mahdi/Desktop/valid/'+name1[i][0]+'/my_seg/mean_ref/spine_mask.nii'
csf_main=mean_all(output_main_csf)
csf_main1.append(csf_main)
csf_sct=mean_all(output_sct_csf)
csf_sct1.append(csf_sct)
csf_mine=mean_all(output_mine_zeroref_csf)
csf_mine1.append(csf_mine)
csf_mine_mid=mean_all(output_mine_midref_csf)
csf_mine2.append(csf_mine_mid)
csf_mine_mean=mean_all(output_mine_meanref_csf)
csf_mine3.append(csf_mine_mean)
spine_main=mean_all(output_main_spine)
spine_main1.append(spine_main)
spine_sct=mean_all(output_sct_spine)
spine_sct1.append(spine_sct)
spine_mine=mean_all(output_mine_zeroref_spine)
spine_mine1.append(spine_mine)
spine_mine_mid=mean_all(output_mine_midref_spine)
spine_mine2.append(spine_mine_mid)
spine_mine_mean=mean_all(output_mine_meanref_spine)
spine_mine3.append(spine_mine_mean)
csf_main=np.mean(csf_main1)
csf_sct=np.mean(csf_sct1)
csf_mine_zero=np.mean(csf_mine1)
csf_mine_mid=np.mean(csf_mine2)
csf_mine_mean=np.mean(csf_mine3)
spine_main=np.mean(spine_main1)
spine_sct=np.mean(spine_sct1)
spine_mine_zero=np.mean(spine_mine1)
spine_mine_mid=np.mean(spine_mine2)
spine_mine_mean=np.mean(spine_mine3)
print("--- %s second ---" % (time.time() - start_time))
pd.DataFrame([csf_main,csf_sct,csf_mine_zero,csf_mine_mid,csf_mine_mean,spine_main,spine_sct,spine_mine_zero,spine_mine_mid,spine_mine_mean],
index=['main_csf_tsnr', 'sct_csf_tsnr', 'my_csf_tsnr_first','my_csf_tsnr_mid','my_csf_tsnr_mean',
'main_spine_tsnr','sct_spine_tsnr','my_spine_tsnr_first','my_spine_tsnr_mid','my_spine_tsnr_mean']
)
(4.038673+7.104279)/2
print(p.shape)
64*64
final=np.array([csf_main1 , csf_sct1,csf_mine1,csf_mine2,csf_mine3,
spine_main1,spine_sct1,spine_mine1,spine_mine2,spine_mine3])
pd.DataFrame(final,index=['csf_nomoco','csf_sct','csf_myalgo_zero_ref','csf_myalgo_mid_ref','csf_myalgo_mean_ref','spine_nomoco','spine_sct','spine_myalgo_zero_ref','spine_myalgo_mid_ref','spine_myalgo_mean_ref']).to_csv('/home/mahdi/Desktop/result/tsnr_result.csv')
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/'+name[j][0]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/main_seg'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'sct_seg'+'/'+'fmri_rmvol_moco.nii.gz'
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/sct_seg'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/mean_ref'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/zero_ref'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
for j in range(n):
x=(len(name[j][0][:])-7)
data_dir='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref/'+name[j][0][:x]+'_plus.'+name[j][0][-6:]
out='/home/mahdi/Desktop/valid/'+name1[j][0]+'/my_seg/first_ref'
subprocess.Popen(['fsl_motion_outliers','-i',data_dir,'-s',out+'/Dvars','-o',out+'/e' ])
main_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'main_seg'+'/Dvars'
main_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_main_Dvars=np.mean(main_Dvars)
pd.DataFrame(main_Dvars)
#pd.DataFrame(main_Dvars).to_csv('/home/mahdi/Desktop/result/main_Dvars.csv')
sct_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/'+'sct_seg'+'/Dvars'
sct_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_sct_Dvars=np.mean(sct_Dvars)
pd.DataFrame(sct_Dvars)
#pd.DataFrame(sct_Dvars).to_csv('/home/mahdi/Desktop/result/sct_Dvars.csv')
my_zero_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/my_seg/zero_ref/Dvars'
my_zero_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_my_zero_Dvars=np.mean(my_zero_Dvars)
pd.DataFrame(my_zero_Dvars)
#pd.DataFrame(my_zero_Dvars).to_csv('/home/mahdi/Desktop/result/my_zero_Dvars.csv')
my_mid_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/my_seg/first_ref/Dvars'
my_mid_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_my_mid_Dvars=np.mean(my_mid_Dvars)
pd.DataFrame(my_mid_Dvars)
#pd.DataFrame(my_mid_Dvars).to_csv('/home/mahdi/Desktop/result/my_mid_Dvars.csv')
my_mean_Dvars=[]
for j in range(n):
data_dir=out_dir+'/'+name1[j][0]+'/my_seg/mean_ref/Dvars'
my_mean_Dvars.append(np.mean(pd.read_csv(data_dir)))
mean_my_mean_Dvars=np.mean(my_mean_Dvars)
#pd.DataFrame(my_mean_Dvars)
#pd.DataFrame(my_mean_Dvars).to_csv('/home/mahdi/Desktop/result/my_mean_Dvars.csv')
pd.DataFrame([mean_main_Dvars,mean_sct_Dvars,mean_my_zero_Dvars,mean_my_mid_Dvars,mean_my_mean_Dvars],
index=['mean_main_Dvars', 'mean_sct_Dvars', 'mean_my_zero_Dvars','mean_my_mid_Dvars','mean_my_mean_Dvars',
])
final_Dvars=np.array([main_Dvars ,sct_Dvars ,my_zero_Dvars ,my_mid_Dvars ,my_mean_Dvars])
final_Dvars=final_Dvars[:,:,0]
pd.DataFrame(final_Dvars,index=['main_Dvars','sct_Dvars','myalgo_zero_Dvars','myalgo_mid_Dvars','myalgo_mean_Dvars']).to_csv('/home/mahdi/Desktop/result/Dvars_result.csv')
| 0.214445 | 0.304223 |
### Life Expectancy Linear Regression
- Preprocessing
- Outlier processing
- Missing data preprocessing
- Scaler<br><br>
- Model
- Linear Regression
- Decision Tree Regressor
- XGBoost Regressor
- RandomForest Regressor<br><br>
- Cross Validation
- KFold<br><br>
- OLS
- RMSE
- R-squared
- P<br><br>
- Feature Extraction
- PCA
- KMeans
```
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import mean_squared_error
from sklearn.decomposition import PCA
import statsmodels.api as sm
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from xgboost import XGBRegressor
from xgboost import plot_importance
# preprocessing
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
def add_feature(original, filename=None):
path = "../datas/worldbank_"
original.columns = [cols.upper() for cols in original.columns.tolist()]
if not filename == None:
df = pd.read_csv(f"{path}{filename}.csv").groupby('Country Code').mean()
df.drop(columns=['2016', '2017','2018','2019','2020'], axis=1, inplace=True)
col_name = filename.upper()
original[col_name] = [df.loc[original['COUNTRYCODE'][i]][str(original['YEAR'][i])]
for i in range(len(original))]
return original
def preprocessing(data):
# GDP per capita ๋ฐ์ดํฐ ์ถ๊ฐ
data = add_feature(data, "gdppercap")
# Nan๊ฐ GDP/POP์ผ๋ก ๋์ฒด
data["GDPPERCAP"].fillna(data["GDP"] / data["POPULATION"], inplace=True)
data.columns = [cols.upper() for cols in original.columns.tolist()]
if 'STATUS' in data.columns.tolist():
data = pd.get_dummies(original, columns=['STATUS'], drop_first=True)
return data
# corr
def get_top_features(data, drop_n=None):
if drop_n is None:
drop_n = len(data.columns)
# LIFE_EXPECTANCY์ ๋ํ ๋๋จธ์ง feature๋ค์ ์๊ด๊ด๊ณ
corr_matrix = data.drop(['COUNTRYCODE','ISO3166','COUNTRY','YEAR', 'REGION','INCOMEGROUP'], axis=1).corr()
corr_matrix['LIFE_EXPECTANCY'].sort_values(ascending=False)
# LIFE_EXPECTANCY์ ๋์ ์๊ด๊ด๊ณ๋ฅผ ๊ฐ์ง๋ ํผ์ฒ ์ ์ ๋ ฌ
top_corr = abs(corr_matrix['LIFE_EXPECTANCY']).sort_values(ascending=False)[1:drop_n]
top_features = top_corr.index.tolist()
return top_features
# lower fence, upper fence
def get_fence(data, top_features):
region = data['REGION'].unique().tolist()
fence = {}
for r in region:
fence[r] = {}
for i, f in enumerate(top_features):
q1 = np.percentile(original[data['REGION'] == r][top_features[i]].values, 25)
q3 = np.percentile(original[data['REGION'] == r][top_features[i]].values, 75)
iqr = q3 - q1
upper_fence = ((iqr * 1.5) + q3).round(3)
lower_fence = (q1 - (iqr * 1.5)).round(3)
fence[r][f] = [lower_fence, upper_fence]
return fence
# outlier processing
def drop_outlier(data, fence, top_features):
region = data['REGION'].unique().tolist()
drop_list, target_idx = [], []
for r in region:
target_df = data[data['REGION'] == r]
for f in top_features:
drop_idx = target_df[(target_df[f] < fence[r][f][0]) |
(target_df[f] > fence[r][f][1])].index.tolist()
drop_list.append(drop_idx)
# ์ ๊ฑฐ ๋์ ์ธ๋ฑ์ค
target_idx = set([idx for lst in drop_list for idx in lst])
data = data.drop(target_idx, axis=0)
return data
# sortion X, y
def original_sortion_xy(data=None):
if data is None:
data = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
data.columns = [cols.upper() for cols in data.columns.tolist()]
if 'STATUS' in data.columns.tolist():
original = pd.get_dummies(data, columns=['STATUS'])
X = original.drop(['COUNTRYCODE','ISO3166','COUNTRY','YEAR','LIFE_EXPECTANCY','REGION','INCOMEGROUP'], axis=1)
y = original['LIFE_EXPECTANCY']
return X, y
# RandomForest Regressor
def rf_regressor_score(data, count):
X, y = original_sortion_xy(data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = RandomForestRegressor()
model.fit(X_train, y_train)
model_score = model.score(X_test, y_test)
model_rmse = np.sqrt(model.score(X_test, y_test)).mean()
return model_score, model_rmse
# cross validation
def cross_validation(X, y, count, n_split=5):
kfold = KFold(n_splits=n_split, shuffle=True, random_state=13)
model = RandomForestRegressor()
cv_score = cross_val_score(model, X, y, cv=kfold)
cv_score = cv_score.mean()
cv_std = cv_score.std()
rfcv_rmse_score = np.sqrt(cv_score).mean()
rfcv_rmse_std = np.sqrt(cv_score).std()
return cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std
```
### Preprocessing
- #### outlier processing<br>
- Target๊ณผ ์๊ด๊ณ์๊ฐ ๋์ Feature ์์๋๋ก ๋์ ์ํค๋ฉด์ ๊ฐ Feature์ ๋ํ ์ด์์น๋ฅผ ๋ ๋ง์ด ์ ๊ฑฐํ๋ค.
- RandomForest Regressor
- RMSE
- R-squared
- Cross Validation Score
```
data = preprocessing(original)
top_features = get_top_features(data)
count = len(top_features)
start = 2
eval_df = pd.DataFrame()
for num in range(start, count):
data = preprocessing(original)
top_features = get_top_features(data, num)
fence = get_fence(data, top_features)
data = drop_outlier(data, fence, top_features)
X, y = original_sortion_xy(data)
rf_score, rf_rmse = rf_regressor_score(data, num)
cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std = cross_validation(X, y, num)
res = {'RF R-squared': rf_score.round(3),
'RF RMSE': rf_rmse.round(3),
'CV Score': cv_score.round(3),
'CV Std': cv_std.round(3),
'RF + CV RMSE score': rfcv_rmse_score.round(3),
'RF + CV RMSE std': rfcv_rmse_std.round(3)}
eval_df = eval_df.append(res, ignore_index=True)
print('num:', num)
print('rf_score, rf_rmse, cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std:', rf_score, rf_rmse, cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std)
eval_df = eval_df.sort_values('RF R-squared', ascending=False)
eval_df
```
### Modeling
#### 1. Linear Regression
- Linear Regression original<br><br>
- $R^2$ Score 91
- RMSE 2.72
```
# fit datas
def fit_datas(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = LinearRegression()
model.fit(X_train, y_train)
pred_tr = model.predict(X_train)
pred_test = model.predict(X_test)
return (X_train, X_test), (y_train, y_test), (pred_tr, pred_test)
# Linear Regression
def linear_regression():
X, y = original_sortion_xy()
X_tuple, y_tuple, pred_tuple = fit_datas(X, y)
lm = sm.OLS(y_tuple[1], X_tuple[1]).fit()
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Linear Regression Raw RMSE of train data:', rmse_tr)
print('Linear Regression Raw RMSE of test data:', rmse_test)
print('\n', lm.summary())
# return np.sqrt(np.mean(np.square(y_tuple[1] - pred_tuple[1])))
linear_regression()
```
- Linear Regression: Feature Selection<br><br>
- Drop Features: 'Schooling', 'GDP'
- Add Feature: 'GDP per Capita'
- $R^2$ Score 92 (Original ๋๋น +0.3%)
- RMSE 2.67
```
def linear_regression_fs(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
#Linear Regression ํจ์ fit_datas(X, y)
X_tuple, y_tuple, pred_tuple = fit_datas(X, y)
lm = sm.OLS(y_tuple[1], X_tuple[1]).fit()
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Linear Regression Feature Selection RMSE of train data:', rmse_tr)
print('Linear Regression Feature Selection RMSE of test data:', rmse_test)
print('\n', lm.summary())
# x, y ๊ตฌ๋ถ + DROP (Schooling, GDP) + ADD(GDP per Capita)
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
a = linear_regression_fs(original)
```
- Linear Regression: Scaling<br><br>
- $R^2$ Score 92 (๋ณํ์์)
```
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
# X์ ์ค์ผ์ผ๋ฌ๋ค์ ์ ์ฉํด๋ณด๋, accuracy๋ ์ ์ฉ ์ ํ๊ฐ ๋๊ฐ๊ณ ,
# coef, std err ์์น๊ฐ ์ข ๊น๋ํ๊ฒ ๋์ค๋ ์ฐจ์ด๊ฐ ์๋ค
def linear_regression_mm(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
mm = MinMaxScaler()
X_mm = mm.fit_transform(X)
X_tuple, y_tuple, pred_tuple = fit_datas(X_mm, y)
lm = sm.OLS(y_tuple[1], X_tuple[1]).fit()
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Linear Regression MinMax Scaler RMSE of train data:', rmse_tr)
print('Linear Regression MinMax Scaler RMSE of test data:', rmse_test)
print('\n', lm.summary())
linear_regression_mm(original)
```
#### 2. Decision Tree Regressor
- $R^2$ Score 94.2 (Linear Regression ๋๋น +2.2)
- RMSE 2.72
```
# DecisionTreeRegressor
def decision_tree_regressor(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = DecisionTreeRegressor()
model.fit(X_train, y_train)
print("Decision Tree Regressor R-squared :", model.score(X_test, y_test))
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Decision Tree regressor RMSE of train data:', rmse_tr)
print('Decision Tree regressor RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print('\n', lm.summary())
decision_tree_regressor(original)
```
#### 3. XGBoost Regressor
- $R^2$ Score 96.8 (Linear Regression ๋๋น +4.8)
- cross validation mean: 96.6%
- RMSE 2.72
```
def xgb_regressor(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = XGBRegressor()
model.fit(X_train, y_train, eval_metric="error", verbose=True)
pred_test = model.predict(X_test)
predictions = [round(value) for value in pred_test]
# evaluate predictions
r2 = model.score(X_test, y_test)
print("R-squared :", r2)
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('XGB RMSE of train data:', rmse_tr)
print('XGB RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print('\n', lm.summary())
return model
model = xgb_regressor(original)
plot_importance(model)
plt.show()
# xgboost cross validation
def xgb_cv(data, n_splits=5):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(data)
kfold = KFold(n_splits=5, shuffle=True, random_state=13)
model = XGBRegressor()
accuracy = cross_val_score(model, X, y, cv=kfold)
print('Cross Validation : ', accuracy)
print('Cross Validation Mean :', accuracy.mean())
print('Cross Validation std :', accuracy.std())
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('XGB RMSE of train data:', rmse_tr)
print('XGB RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print('\n', lm.summary())
xgb_cv(original)
```
#### 4. Random Forest Regressor
- $R^2$ 97.3 (Linear Regression ๋๋น +5.3)
- cross validation mean: 97.0%
- RMSE 2.72
```
# RandomForest Regressor
def rf_regressor(data):
original = add_feature(data, filename="gdppercap")
original["GDPPERCAP"].fillna(data["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = RandomForestRegressor()
model.fit(X_train, y_train)
print("Random Forest Regressor R-squared :", model.score(X_test, y_test))
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Random Forest Regressor RMSE of train data:', rmse_tr)
print('Random Forest Regressor RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print(lm.summary(), '\n')
return X_test, y_test
rf_regressor(original)
# RandomForest Regressor cross validation
def rf_regressor_cv(data, n_splits=5):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(data)
kfold = KFold(n_splits=5, shuffle=True, random_state=13)
model = RandomForestRegressor()
accuracy = cross_val_score(model, X, y, cv=kfold)
print('Cross Validation : ', accuracy)
print('Cross Validation Mean :', accuracy.mean())
print('Cross Validation std :', accuracy.std())
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Random Forest Regressor RMSE of train data:', rmse_tr)
print('Random Forest Regressor RMSE of test data:', rmse_test)
rf_regressor_cv(original)
X_test, y_test = rf_regressor(original)
y_pred = model.predict(X_test)
plt.scatter(y_test, y_pred)
plt.xlabel("Life expectancy")
plt.ylabel("Predicted Life expectancy")
plt.title("Random Forest accuracy results");
```
### Linear Regression model predict
- Linear Regression
```
def get_settings(original):
data = preprocessing(original)
top_features = get_top_features(data, drop_n=5)
fence = get_fence(data, top_features)
data = drop_outlier(data, fence, top_features)
data = data.drop(['COUNTRYCODE', 'ISO3166', 'YEAR', 'INCOMEGROUP'], axis=1)
return data
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
def model_pred(model, pred_data):
data = get_settings(original)
top_features = get_top_features(original)
X = data[top_features]
y = data['LIFE_EXPECTANCY']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=13)
model = model
model.fit(X_train, y_train)
y_pred = model.predict(pred_data)
return y_pred
# Sample data
korea = data[data['COUNTRY'] == 'Korea, Rep.'][-1:]
pred_kor = korea[top_features]
model_pred(LinearRegression(), pred_kor)
```
### Feature Extraction
#### 1. PCA
- based on REGION
- scree plot
- biplot
- PCA1 score<br><br>
- based on COUNTRY
- PCA1 score
```
# data processing
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
data = get_settings(original)
# standard scaler
def fit_scaler(data):
original_ss = StandardScaler().fit_transform(data)
ss_df = pd.DataFrame(original_ss, index=data.index, columns=data.columns)
return ss_df
def fit_pca(data, n_component=2):
# standard scaler
ss_df = fit_scaler(data)
pca = PCA(n_components=n_component)
pca_res = pca.fit_transform(ss_df)
pc_values = np.arange(pca.n_components_) + 1
pca_var = pca.explained_variance_ratio_
# ๊ฐ ์ฃผ์ฑ๋ถ ๊ฐ์ ๋ฐ์ดํฐ๋ก ํ๋ ๋ฐ์ดํฐํ๋ ์์ ์์ฑํ๋ค.
pca_df = pd.DataFrame(pca_res, index=ss_df.index,
columns=[f"pca{num+1}" for num in range(n_component)])
return pca_df, pc_values, pca_var
# pca scree plot
def show_screeplot(data, n_component=2):
target = data['LIFE_EXPECTANCY']
data = data.drop(['LIFE_EXPECTANCY'], axis=1)
# fit pca
pca_df, pc_values, pca_var = fit_pca(data, n_component)
# explained_variance_ration (pca_var): ์ฃผ์ฑ๋ถ ๋ฒกํฐ๋ฅผ ์ด์ฉํด ํฌ์ํ ํ ๋ถ์ฐ์ ๋น์จ
plt.plot(pc_values, pca_var, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('Principal Component') # ์ฃผ์ฑ๋ถ ๊ฐ์
plt.ylabel('Proportion of Variance Explained') # ๊ณ ์ ๊ฐ (๋ถ์ฐ ๊ฐ)
plt.show()
return pca_df
# n_componenet : region num
data = data.groupby('REGION').mean().round(3)
n_component = len(data.index)
show_screeplot(data, n_component)
from pca import pca
def show_biplot(data, n_component=2, feature_num=5):
data = data.drop(['LIFE_EXPECTANCY'], axis=1)
model = pca(n_components=n_component)
# fit scaler
ss_df = fit_scaler(data)
# pca
results = model.fit_transform(ss_df)
fig, ax = model.biplot(n_feat=feature_num, legend=False)
n_component = 2
feature_num = 5 # ํ์ํ ์ฃผ์ feature ๊ฐ์
show_biplot(data, n_component, feature_num)
# 1์ฃผ์ฑ๋ถ์ ๊ธฐ์ค์ผ๋ก ๋ฆฌ์ / ๋๋ผ๋ฅผ ์ ๋ ฌํ์ ๋ 1์ฃผ์ฑ๋ถ ๋ฒกํฐ๊ฐ์ด ๊ฐ์ฅ ๋์ ๋ฆฌ์ / ๋๋ผ
def sortion_pca1(n_component=7):
pca_df, pc_values, pca_var = fit_pca(data, n_component)
pca_1 = pca_df[['pca1']]
pca_1 = pca_1.sort_values(by='pca1', ascending=False)
pca_1 = pca_1.reset_index()
pca_1.index = pca_1.index+1
pca_1.index.name = 'Ranking'
return pca_1
# region ranking
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
# data processing
data = get_settings(original)
# n_componenet : region num
data = data.groupby('REGION').mean().round(3)
n_component = len(data.index)
pca1_df = pd.DataFrame(sortion_pca1(n_component))
pca1_df
# country ranking
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
# data processing
data = get_settings(original)
# n_componenet : country num
data = data.groupby('COUNTRY').mean().round(3)
n_component = len(data.index)
pca1_df = pd.DataFrame(sortion_pca1())
pca1_df
```
|
github_jupyter
|
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import mean_squared_error
from sklearn.decomposition import PCA
import statsmodels.api as sm
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from xgboost import XGBRegressor
from xgboost import plot_importance
# preprocessing
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
def add_feature(original, filename=None):
path = "../datas/worldbank_"
original.columns = [cols.upper() for cols in original.columns.tolist()]
if not filename == None:
df = pd.read_csv(f"{path}{filename}.csv").groupby('Country Code').mean()
df.drop(columns=['2016', '2017','2018','2019','2020'], axis=1, inplace=True)
col_name = filename.upper()
original[col_name] = [df.loc[original['COUNTRYCODE'][i]][str(original['YEAR'][i])]
for i in range(len(original))]
return original
def preprocessing(data):
# GDP per capita ๋ฐ์ดํฐ ์ถ๊ฐ
data = add_feature(data, "gdppercap")
# Nan๊ฐ GDP/POP์ผ๋ก ๋์ฒด
data["GDPPERCAP"].fillna(data["GDP"] / data["POPULATION"], inplace=True)
data.columns = [cols.upper() for cols in original.columns.tolist()]
if 'STATUS' in data.columns.tolist():
data = pd.get_dummies(original, columns=['STATUS'], drop_first=True)
return data
# corr
def get_top_features(data, drop_n=None):
if drop_n is None:
drop_n = len(data.columns)
# LIFE_EXPECTANCY์ ๋ํ ๋๋จธ์ง feature๋ค์ ์๊ด๊ด๊ณ
corr_matrix = data.drop(['COUNTRYCODE','ISO3166','COUNTRY','YEAR', 'REGION','INCOMEGROUP'], axis=1).corr()
corr_matrix['LIFE_EXPECTANCY'].sort_values(ascending=False)
# LIFE_EXPECTANCY์ ๋์ ์๊ด๊ด๊ณ๋ฅผ ๊ฐ์ง๋ ํผ์ฒ ์ ์ ๋ ฌ
top_corr = abs(corr_matrix['LIFE_EXPECTANCY']).sort_values(ascending=False)[1:drop_n]
top_features = top_corr.index.tolist()
return top_features
# lower fence, upper fence
def get_fence(data, top_features):
region = data['REGION'].unique().tolist()
fence = {}
for r in region:
fence[r] = {}
for i, f in enumerate(top_features):
q1 = np.percentile(original[data['REGION'] == r][top_features[i]].values, 25)
q3 = np.percentile(original[data['REGION'] == r][top_features[i]].values, 75)
iqr = q3 - q1
upper_fence = ((iqr * 1.5) + q3).round(3)
lower_fence = (q1 - (iqr * 1.5)).round(3)
fence[r][f] = [lower_fence, upper_fence]
return fence
# outlier processing
def drop_outlier(data, fence, top_features):
region = data['REGION'].unique().tolist()
drop_list, target_idx = [], []
for r in region:
target_df = data[data['REGION'] == r]
for f in top_features:
drop_idx = target_df[(target_df[f] < fence[r][f][0]) |
(target_df[f] > fence[r][f][1])].index.tolist()
drop_list.append(drop_idx)
# ์ ๊ฑฐ ๋์ ์ธ๋ฑ์ค
target_idx = set([idx for lst in drop_list for idx in lst])
data = data.drop(target_idx, axis=0)
return data
# sortion X, y
def original_sortion_xy(data=None):
if data is None:
data = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
data.columns = [cols.upper() for cols in data.columns.tolist()]
if 'STATUS' in data.columns.tolist():
original = pd.get_dummies(data, columns=['STATUS'])
X = original.drop(['COUNTRYCODE','ISO3166','COUNTRY','YEAR','LIFE_EXPECTANCY','REGION','INCOMEGROUP'], axis=1)
y = original['LIFE_EXPECTANCY']
return X, y
# RandomForest Regressor
def rf_regressor_score(data, count):
X, y = original_sortion_xy(data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = RandomForestRegressor()
model.fit(X_train, y_train)
model_score = model.score(X_test, y_test)
model_rmse = np.sqrt(model.score(X_test, y_test)).mean()
return model_score, model_rmse
# cross validation
def cross_validation(X, y, count, n_split=5):
kfold = KFold(n_splits=n_split, shuffle=True, random_state=13)
model = RandomForestRegressor()
cv_score = cross_val_score(model, X, y, cv=kfold)
cv_score = cv_score.mean()
cv_std = cv_score.std()
rfcv_rmse_score = np.sqrt(cv_score).mean()
rfcv_rmse_std = np.sqrt(cv_score).std()
return cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std
data = preprocessing(original)
top_features = get_top_features(data)
count = len(top_features)
start = 2
eval_df = pd.DataFrame()
for num in range(start, count):
data = preprocessing(original)
top_features = get_top_features(data, num)
fence = get_fence(data, top_features)
data = drop_outlier(data, fence, top_features)
X, y = original_sortion_xy(data)
rf_score, rf_rmse = rf_regressor_score(data, num)
cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std = cross_validation(X, y, num)
res = {'RF R-squared': rf_score.round(3),
'RF RMSE': rf_rmse.round(3),
'CV Score': cv_score.round(3),
'CV Std': cv_std.round(3),
'RF + CV RMSE score': rfcv_rmse_score.round(3),
'RF + CV RMSE std': rfcv_rmse_std.round(3)}
eval_df = eval_df.append(res, ignore_index=True)
print('num:', num)
print('rf_score, rf_rmse, cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std:', rf_score, rf_rmse, cv_score, cv_std, rfcv_rmse_score, rfcv_rmse_std)
eval_df = eval_df.sort_values('RF R-squared', ascending=False)
eval_df
# fit datas
def fit_datas(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = LinearRegression()
model.fit(X_train, y_train)
pred_tr = model.predict(X_train)
pred_test = model.predict(X_test)
return (X_train, X_test), (y_train, y_test), (pred_tr, pred_test)
# Linear Regression
def linear_regression():
X, y = original_sortion_xy()
X_tuple, y_tuple, pred_tuple = fit_datas(X, y)
lm = sm.OLS(y_tuple[1], X_tuple[1]).fit()
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Linear Regression Raw RMSE of train data:', rmse_tr)
print('Linear Regression Raw RMSE of test data:', rmse_test)
print('\n', lm.summary())
# return np.sqrt(np.mean(np.square(y_tuple[1] - pred_tuple[1])))
linear_regression()
def linear_regression_fs(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
#Linear Regression ํจ์ fit_datas(X, y)
X_tuple, y_tuple, pred_tuple = fit_datas(X, y)
lm = sm.OLS(y_tuple[1], X_tuple[1]).fit()
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Linear Regression Feature Selection RMSE of train data:', rmse_tr)
print('Linear Regression Feature Selection RMSE of test data:', rmse_test)
print('\n', lm.summary())
# x, y ๊ตฌ๋ถ + DROP (Schooling, GDP) + ADD(GDP per Capita)
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
a = linear_regression_fs(original)
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
# X์ ์ค์ผ์ผ๋ฌ๋ค์ ์ ์ฉํด๋ณด๋, accuracy๋ ์ ์ฉ ์ ํ๊ฐ ๋๊ฐ๊ณ ,
# coef, std err ์์น๊ฐ ์ข ๊น๋ํ๊ฒ ๋์ค๋ ์ฐจ์ด๊ฐ ์๋ค
def linear_regression_mm(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
mm = MinMaxScaler()
X_mm = mm.fit_transform(X)
X_tuple, y_tuple, pred_tuple = fit_datas(X_mm, y)
lm = sm.OLS(y_tuple[1], X_tuple[1]).fit()
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Linear Regression MinMax Scaler RMSE of train data:', rmse_tr)
print('Linear Regression MinMax Scaler RMSE of test data:', rmse_test)
print('\n', lm.summary())
linear_regression_mm(original)
# DecisionTreeRegressor
def decision_tree_regressor(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = DecisionTreeRegressor()
model.fit(X_train, y_train)
print("Decision Tree Regressor R-squared :", model.score(X_test, y_test))
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Decision Tree regressor RMSE of train data:', rmse_tr)
print('Decision Tree regressor RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print('\n', lm.summary())
decision_tree_regressor(original)
def xgb_regressor(data):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = XGBRegressor()
model.fit(X_train, y_train, eval_metric="error", verbose=True)
pred_test = model.predict(X_test)
predictions = [round(value) for value in pred_test]
# evaluate predictions
r2 = model.score(X_test, y_test)
print("R-squared :", r2)
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('XGB RMSE of train data:', rmse_tr)
print('XGB RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print('\n', lm.summary())
return model
model = xgb_regressor(original)
plot_importance(model)
plt.show()
# xgboost cross validation
def xgb_cv(data, n_splits=5):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(data)
kfold = KFold(n_splits=5, shuffle=True, random_state=13)
model = XGBRegressor()
accuracy = cross_val_score(model, X, y, cv=kfold)
print('Cross Validation : ', accuracy)
print('Cross Validation Mean :', accuracy.mean())
print('Cross Validation std :', accuracy.std())
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('XGB RMSE of train data:', rmse_tr)
print('XGB RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print('\n', lm.summary())
xgb_cv(original)
# RandomForest Regressor
def rf_regressor(data):
original = add_feature(data, filename="gdppercap")
original["GDPPERCAP"].fillna(data["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(original)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
model = RandomForestRegressor()
model.fit(X_train, y_train)
print("Random Forest Regressor R-squared :", model.score(X_test, y_test))
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Random Forest Regressor RMSE of train data:', rmse_tr)
print('Random Forest Regressor RMSE of test data:', rmse_test)
lm = sm.OLS(y_test, X_test).fit()
print(lm.summary(), '\n')
return X_test, y_test
rf_regressor(original)
# RandomForest Regressor cross validation
def rf_regressor_cv(data, n_splits=5):
original = add_feature(data, "gdppercap")
original["GDPPERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
X, y = original_sortion_xy(data)
kfold = KFold(n_splits=5, shuffle=True, random_state=13)
model = RandomForestRegressor()
accuracy = cross_val_score(model, X, y, cv=kfold)
print('Cross Validation : ', accuracy)
print('Cross Validation Mean :', accuracy.mean())
print('Cross Validation std :', accuracy.std())
rmse_tr = np.sqrt(mean_squared_error(y_tuple[0], pred_tuple[0]))
rmse_test = np.sqrt(mean_squared_error(y_tuple[1], pred_tuple[1]))
print('Random Forest Regressor RMSE of train data:', rmse_tr)
print('Random Forest Regressor RMSE of test data:', rmse_test)
rf_regressor_cv(original)
X_test, y_test = rf_regressor(original)
y_pred = model.predict(X_test)
plt.scatter(y_test, y_pred)
plt.xlabel("Life expectancy")
plt.ylabel("Predicted Life expectancy")
plt.title("Random Forest accuracy results");
def get_settings(original):
data = preprocessing(original)
top_features = get_top_features(data, drop_n=5)
fence = get_fence(data, top_features)
data = drop_outlier(data, fence, top_features)
data = data.drop(['COUNTRYCODE', 'ISO3166', 'YEAR', 'INCOMEGROUP'], axis=1)
return data
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
def model_pred(model, pred_data):
data = get_settings(original)
top_features = get_top_features(original)
X = data[top_features]
y = data['LIFE_EXPECTANCY']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=13)
model = model
model.fit(X_train, y_train)
y_pred = model.predict(pred_data)
return y_pred
# Sample data
korea = data[data['COUNTRY'] == 'Korea, Rep.'][-1:]
pred_kor = korea[top_features]
model_pred(LinearRegression(), pred_kor)
# data processing
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
data = get_settings(original)
# standard scaler
def fit_scaler(data):
original_ss = StandardScaler().fit_transform(data)
ss_df = pd.DataFrame(original_ss, index=data.index, columns=data.columns)
return ss_df
def fit_pca(data, n_component=2):
# standard scaler
ss_df = fit_scaler(data)
pca = PCA(n_components=n_component)
pca_res = pca.fit_transform(ss_df)
pc_values = np.arange(pca.n_components_) + 1
pca_var = pca.explained_variance_ratio_
# ๊ฐ ์ฃผ์ฑ๋ถ ๊ฐ์ ๋ฐ์ดํฐ๋ก ํ๋ ๋ฐ์ดํฐํ๋ ์์ ์์ฑํ๋ค.
pca_df = pd.DataFrame(pca_res, index=ss_df.index,
columns=[f"pca{num+1}" for num in range(n_component)])
return pca_df, pc_values, pca_var
# pca scree plot
def show_screeplot(data, n_component=2):
target = data['LIFE_EXPECTANCY']
data = data.drop(['LIFE_EXPECTANCY'], axis=1)
# fit pca
pca_df, pc_values, pca_var = fit_pca(data, n_component)
# explained_variance_ration (pca_var): ์ฃผ์ฑ๋ถ ๋ฒกํฐ๋ฅผ ์ด์ฉํด ํฌ์ํ ํ ๋ถ์ฐ์ ๋น์จ
plt.plot(pc_values, pca_var, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('Principal Component') # ์ฃผ์ฑ๋ถ ๊ฐ์
plt.ylabel('Proportion of Variance Explained') # ๊ณ ์ ๊ฐ (๋ถ์ฐ ๊ฐ)
plt.show()
return pca_df
# n_componenet : region num
data = data.groupby('REGION').mean().round(3)
n_component = len(data.index)
show_screeplot(data, n_component)
from pca import pca
def show_biplot(data, n_component=2, feature_num=5):
data = data.drop(['LIFE_EXPECTANCY'], axis=1)
model = pca(n_components=n_component)
# fit scaler
ss_df = fit_scaler(data)
# pca
results = model.fit_transform(ss_df)
fig, ax = model.biplot(n_feat=feature_num, legend=False)
n_component = 2
feature_num = 5 # ํ์ํ ์ฃผ์ feature ๊ฐ์
show_biplot(data, n_component, feature_num)
# 1์ฃผ์ฑ๋ถ์ ๊ธฐ์ค์ผ๋ก ๋ฆฌ์ / ๋๋ผ๋ฅผ ์ ๋ ฌํ์ ๋ 1์ฃผ์ฑ๋ถ ๋ฒกํฐ๊ฐ์ด ๊ฐ์ฅ ๋์ ๋ฆฌ์ / ๋๋ผ
def sortion_pca1(n_component=7):
pca_df, pc_values, pca_var = fit_pca(data, n_component)
pca_1 = pca_df[['pca1']]
pca_1 = pca_1.sort_values(by='pca1', ascending=False)
pca_1 = pca_1.reset_index()
pca_1.index = pca_1.index+1
pca_1.index.name = 'Ranking'
return pca_1
# region ranking
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
# data processing
data = get_settings(original)
# n_componenet : region num
data = data.groupby('REGION').mean().round(3)
n_component = len(data.index)
pca1_df = pd.DataFrame(sortion_pca1(n_component))
pca1_df
# country ranking
original = pd.read_csv("../datas/life_expectancy_data_fillna.csv")
# data processing
data = get_settings(original)
# n_componenet : country num
data = data.groupby('COUNTRY').mean().round(3)
n_component = len(data.index)
pca1_df = pd.DataFrame(sortion_pca1())
pca1_df
| 0.523908 | 0.836888 |
<a href="https://colab.research.google.com/github/Shuvo31/Fake-News-Detection/blob/master/fakenewsdetection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import nltk
nltk.download('stopwords')
#printing the stopwords in English
print(stopwords.words('english'))
#loading the news dataset
news_dataset = pd.read_csv('/content/train.csv')
news_dataset.shape
# print the first 5 rows of the dataframe
news_dataset.head()
# counting the number of missing values in the dataset
news_dataset.isnull().sum()
# replacing the null values with empty string
news_dataset = news_dataset.fillna('')
# merging the author name and news title
news_dataset['content'] = news_dataset['author']+' '+news_dataset['title']
print(news_dataset['content'])
# separating the data & label
X = news_dataset.drop(columns='label', axis=1)
Y = news_dataset['label']
print(X)
print(Y)
#stemming
port_stem = PorterStemmer()
def stemming(content):
stemmed_content = re.sub('[^a-zA-Z]',' ',content)
stemmed_content = stemmed_content.lower()
stemmed_content = stemmed_content.split()
stemmed_content = [port_stem.stem(word) for word in stemmed_content if not word in stopwords.words('english')]
stemmed_content = ' '.join(stemmed_content)
return stemmed_content
news_dataset['content'] = news_dataset['content'].apply(stemming)
print(news_dataset['content'])
#separating the data and label
X = news_dataset['content'].values
Y = news_dataset['label'].values
print(X)
print(Y)
Y.shape
# converting the textual data to numerical data
vectorizer = TfidfVectorizer()
vectorizer.fit(X)
X = vectorizer.transform(X)
print(X)
#splitting the dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, stratify=Y, random_state=2)
#logistics regression
model = LogisticRegression()
model.fit(X_train, Y_train)
# accuracy score on the training data
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print('Accuracy score of the training data : ', training_data_accuracy)
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, Y_test)
print('Accuracy score of the test data : ', test_data_accuracy)
#making predective system
X_new = X_test[3]
prediction = model.predict(X_new)
print(prediction)
if (prediction[0]==0):
print('The news is Real')
else:
print('The news is Fake')
print(Y_test[3])
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import nltk
nltk.download('stopwords')
#printing the stopwords in English
print(stopwords.words('english'))
#loading the news dataset
news_dataset = pd.read_csv('/content/train.csv')
news_dataset.shape
# print the first 5 rows of the dataframe
news_dataset.head()
# counting the number of missing values in the dataset
news_dataset.isnull().sum()
# replacing the null values with empty string
news_dataset = news_dataset.fillna('')
# merging the author name and news title
news_dataset['content'] = news_dataset['author']+' '+news_dataset['title']
print(news_dataset['content'])
# separating the data & label
X = news_dataset.drop(columns='label', axis=1)
Y = news_dataset['label']
print(X)
print(Y)
#stemming
port_stem = PorterStemmer()
def stemming(content):
stemmed_content = re.sub('[^a-zA-Z]',' ',content)
stemmed_content = stemmed_content.lower()
stemmed_content = stemmed_content.split()
stemmed_content = [port_stem.stem(word) for word in stemmed_content if not word in stopwords.words('english')]
stemmed_content = ' '.join(stemmed_content)
return stemmed_content
news_dataset['content'] = news_dataset['content'].apply(stemming)
print(news_dataset['content'])
#separating the data and label
X = news_dataset['content'].values
Y = news_dataset['label'].values
print(X)
print(Y)
Y.shape
# converting the textual data to numerical data
vectorizer = TfidfVectorizer()
vectorizer.fit(X)
X = vectorizer.transform(X)
print(X)
#splitting the dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, stratify=Y, random_state=2)
#logistics regression
model = LogisticRegression()
model.fit(X_train, Y_train)
# accuracy score on the training data
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print('Accuracy score of the training data : ', training_data_accuracy)
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, Y_test)
print('Accuracy score of the test data : ', test_data_accuracy)
#making predective system
X_new = X_test[3]
prediction = model.predict(X_new)
print(prediction)
if (prediction[0]==0):
print('The news is Real')
else:
print('The news is Fake')
print(Y_test[3])
| 0.327453 | 0.839339 |
# Scalable GP Regression (w/ KISS-GP)
## Introduction
For 2-4D functions, SKI (or KISS-GP) can work very well out-of-the-box on larger datasets (100,000+ data points).
Kernel interpolation for scalable structured Gaussian processes (KISS-GP) was introduced in this paper:
http://proceedings.mlr.press/v37/wilson15.pdf
One thing to watch out for with multidimensional SKI - you can't use as fine-grain of a grid. If you have a high dimensional problem, you may want to try one of the other scalable regression methods.
This is the same as [the standard KISSGP 1D notebook](../04_Scalable_GP_Regression_1D/KISSGP_Regression_1D.ipynb), but applied to more dimensions.
```
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
```
### Set up train data
Here we're learning a simple sin function - but in 2 dimensions
```
# We make an nxn grid of training points spaced every 1/(n-1) on [0,1]x[0,1]
n = 40
train_x = torch.zeros(pow(n, 2), 2)
for i in range(n):
for j in range(n):
train_x[i * n + j][0] = float(i) / (n-1)
train_x[i * n + j][1] = float(j) / (n-1)
# True function is sin( 2*pi*(x0+x1))
train_y = torch.sin((train_x[:, 0] + train_x[:, 1]) * (2 * math.pi)) + torch.randn_like(train_x[:, 0]).mul(0.01)
```
## The model
As with the 1D case, applying SKI to a multidimensional kernel is as simple as wrapping that kernel with a `GridInterpolationKernel`. You'll want to be sure to set `num_dims` though!
SKI has only one hyperparameter that you need to worry about: the grid size. For 1D functions, a good starting place is to use as many grid points as training points. (Don't worry - the grid points are really cheap to use!). You can use the `gpytorch.utils.grid.choose_grid_size` helper to get a good starting point.
If you want, you can also explicitly determine the grid bounds of the SKI approximation using the `grid_bounds` argument. However, it's easier if you don't use this argument - then GPyTorch automatically chooses the best bounds for you.
```
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that
grid_size = gpytorch.utils.grid.choose_grid_size(train_x)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(ard_num_dims=2),
), grid_size=grid_size, num_dims=2
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
```
## Train the model hyperparameters
```
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam([
{'params': model.parameters()}, # Includes GaussianLikelihood parameters
], lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
def train():
training_iterations = 30
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))
optimizer.step()
%time train()
```
## Make predictions with the model
```
# Set model and likelihood into evaluation mode
model.eval()
likelihood.eval()
# Generate nxn grid of test points spaced on a grid of size 1/(n-1) in [0,1]x[0,1]
n = 10
test_x = torch.zeros(int(pow(n, 2)), 2)
for i in range(n):
for j in range(n):
test_x[i * n + j][0] = float(i) / (n-1)
test_x[i * n + j][1] = float(j) / (n-1)
with torch.no_grad(), gpytorch.fast_pred_var():
observed_pred = likelihood(model(test_x))
pred_labels = observed_pred.mean.view(n, n)
# Calc abosolute error
test_y_actual = torch.sin(((test_x[:, 0] + test_x[:, 1]) * (2 * math.pi))).view(n, n)
delta_y = torch.abs(pred_labels - test_y_actual).detach().numpy()
# Define a plotting function
def ax_plot(f, ax, y_labels, title):
im = ax.imshow(y_labels)
ax.set_title(title)
f.colorbar(im)
# Plot our predictive means
f, observed_ax = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax, pred_labels, 'Predicted Values (Likelihood)')
# Plot the true values
f, observed_ax2 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax2, test_y_actual, 'Actual Values (Likelihood)')
# Plot the absolute errors
f, observed_ax3 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax3, delta_y, 'Absolute Error Surface')
```
|
github_jupyter
|
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
# We make an nxn grid of training points spaced every 1/(n-1) on [0,1]x[0,1]
n = 40
train_x = torch.zeros(pow(n, 2), 2)
for i in range(n):
for j in range(n):
train_x[i * n + j][0] = float(i) / (n-1)
train_x[i * n + j][1] = float(j) / (n-1)
# True function is sin( 2*pi*(x0+x1))
train_y = torch.sin((train_x[:, 0] + train_x[:, 1]) * (2 * math.pi)) + torch.randn_like(train_x[:, 0]).mul(0.01)
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that
grid_size = gpytorch.utils.grid.choose_grid_size(train_x)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(ard_num_dims=2),
), grid_size=grid_size, num_dims=2
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam([
{'params': model.parameters()}, # Includes GaussianLikelihood parameters
], lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
def train():
training_iterations = 30
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))
optimizer.step()
%time train()
# Set model and likelihood into evaluation mode
model.eval()
likelihood.eval()
# Generate nxn grid of test points spaced on a grid of size 1/(n-1) in [0,1]x[0,1]
n = 10
test_x = torch.zeros(int(pow(n, 2)), 2)
for i in range(n):
for j in range(n):
test_x[i * n + j][0] = float(i) / (n-1)
test_x[i * n + j][1] = float(j) / (n-1)
with torch.no_grad(), gpytorch.fast_pred_var():
observed_pred = likelihood(model(test_x))
pred_labels = observed_pred.mean.view(n, n)
# Calc abosolute error
test_y_actual = torch.sin(((test_x[:, 0] + test_x[:, 1]) * (2 * math.pi))).view(n, n)
delta_y = torch.abs(pred_labels - test_y_actual).detach().numpy()
# Define a plotting function
def ax_plot(f, ax, y_labels, title):
im = ax.imshow(y_labels)
ax.set_title(title)
f.colorbar(im)
# Plot our predictive means
f, observed_ax = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax, pred_labels, 'Predicted Values (Likelihood)')
# Plot the true values
f, observed_ax2 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax2, test_y_actual, 'Actual Values (Likelihood)')
# Plot the absolute errors
f, observed_ax3 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax3, delta_y, 'Absolute Error Surface')
| 0.872619 | 0.969149 |
# Introduction
In previous work, Martyn has identified that there was an excessive amount of transport reactions in the Matteo's version of the model. He has looked into these reactions by hand and come to the conclusion which should be removed and which should be maintained. He provided this in an excel file, stored in '../databases/Transports allowed to remain_Martyn_Bennett'.
Here I will look into this a bit more and remove unnecessary transport reactions from the working model.
```
import cameo
import pandas as pd
import cobra.io
import escher
model = cobra.io.read_sbml_model('../model/p-thermo.xml')
#generate a list of transport reactions
transport =[]
for rct in model.reactions:
if rct.id[-1:] in 't':
transport.append(rct)
else:
continue
len(transport)
```
So in the model, there are currently 152 transport reactions. I will try to remove all the reactions that Martyn recommended and reflect on their effect on biomass prediction whether they should be removed or not.
Again, here we have the problem that the file Martyn provided has rct ID's that are numerical and not BiGG compliant. This makes the script a bit more complex. The transport reactions don't have KEGG Ids so we cannot map the ID via this way. Instead, we will take the information about what metabolite is being transported to find the new metabolite ID and hence the new transport ID.
```
matteo = cobra.io.read_sbml_model('../databases/g-thermo-Matteo.xml')
#first convert the list of transport reactions to keep from martyns file to the model IDs.
transport_keep = ['M_14_e_out','M_29_e_out','M_1754_e_out','M_7_e_out','M_11_e_out','M_71_e_out','M_280_e_out','M_Biomass_e_out','M_154_e_out','M_320_e_out','M_222_e_out','M_31_e_out','M_214_e_out','M_38_e_out','M_204_e_out','M_79_e_out','M_229_e_out','M_21_e_out','M_3200_e_out','M_163_e_out','M_1_e_out']
len(transport_keep)
#dataframe of all metabolites and kegg IDs in working model
#make lists for all metabolite names from the working model
model_met_ID = []
model_met_name = []
model_met_kegg = []
for met in model.metabolites:
model_met_ID.append(met.id)
model_met_name.append(met.name)
try:
model_met_kegg.append(met.notes['KEGG'])
except KeyError:
model_met_kegg.append('--')
#make into a dataframe
model_met_df = pd.DataFrame({'Model ID' : model_met_ID, 'Model name' : model_met_name, 'Model Kegg':model_met_kegg})
model_met_df[0:5]
#make list of metabolites that should be transported
transported_mets = []
for rct in transport_keep:
met = rct[2:-4]
try:
met_kegg = matteo.metabolites.get_by_id(met).notes['KEGG']
met_id_model = model_met_df.loc[model_met_df['Model Kegg'] == met_kegg,'Model ID'].values[0]
transported_mets.append(met_id_model[:-2])
except KeyError:
print (met)
```
So we've been able to map all metabolites to the new ID system in the working model. Now I will gather these into a 'should be' transported list.
```
transported_mets_rct =[]
for met in transported_mets:
rct = met.upper() + 't'
transported_mets_rct.append(rct)
len(transported_mets_rct)
#now test what happens when we remove all transports except these martyn identified:
with model:
for rct in transport:
with model:
if rct.id in transported_mets_rct:
continue
else:
model.remove_reactions(rct)
biomass = model.optimize().objective_value
if biomass > 0.75:
print ('removing', rct, 'gives biomass', biomass)
elif biomass <0.72:
print ('removing', rct, 'gives biomass', biomass)
```
The above analysis shows that removing these reactions will give a decrease in biomass formation when removed as individual transport reactions. Some of these reactions you would not expect should cause a difference. So this is something we should look into further later.
I will also check what happens when we remove the reactions cumulatively. (here i will not include the reactions that individually kill biomass already)
```
additional_trans = ['CLt', 'ASN__Lt', 'PYDX5Pt', 'QH2t', 'GTHRDt','BIOMASSt','THMTPt']
more_transport = transported_mets_rct
for rct in additional_trans:
more_transport.append(rct)
len(more_transport)
with model:
for rct in transport:
if rct.id in more_transport:
continue
else:
model.remove_reactions(rct)
biomass = model.optimize().objective_value
if biomass > 0.75 or biomass <0.70:
print ('removing', rct, 'gives biomass', biomass)
else:
continue
more_transport.append('NADPt')
```
From each pass, one can see what the first metabolite is that kills biomass formation. This can then be added to the list of transports to keep and then re-run to find the total list of transports to maintain.
The only other transport that totally kills biomass is NADPt. This is wierd as normally this should not be supplied to a cell. Keep this in mind for further analysis.
with the total list of transports to maintain, we can remove all the others from the actual model.
```
additional_trans = ['CLt', 'ASN__Lt', 'PYDX5Pt', 'QH2t', 'GTHRDt','BIOMASSt','THMTPt', 'NADPt']
tot_transport = transported_mets_rct
for rct in additional_trans:
tot_transport.append(rct)
len(tot_transport)
for rct in transport:
if rct.id in tot_transport:
continue
else:
model.remove_reactions(rct)
model.optimize().objective_value
#save&commit
cobra.io.write_sbml_model(model,'../model/p-thermo.xml')
# what about other sugars?? e.g. xylose etc. See figure of which sugars geobacillus grows on!
```
|
github_jupyter
|
import cameo
import pandas as pd
import cobra.io
import escher
model = cobra.io.read_sbml_model('../model/p-thermo.xml')
#generate a list of transport reactions
transport =[]
for rct in model.reactions:
if rct.id[-1:] in 't':
transport.append(rct)
else:
continue
len(transport)
matteo = cobra.io.read_sbml_model('../databases/g-thermo-Matteo.xml')
#first convert the list of transport reactions to keep from martyns file to the model IDs.
transport_keep = ['M_14_e_out','M_29_e_out','M_1754_e_out','M_7_e_out','M_11_e_out','M_71_e_out','M_280_e_out','M_Biomass_e_out','M_154_e_out','M_320_e_out','M_222_e_out','M_31_e_out','M_214_e_out','M_38_e_out','M_204_e_out','M_79_e_out','M_229_e_out','M_21_e_out','M_3200_e_out','M_163_e_out','M_1_e_out']
len(transport_keep)
#dataframe of all metabolites and kegg IDs in working model
#make lists for all metabolite names from the working model
model_met_ID = []
model_met_name = []
model_met_kegg = []
for met in model.metabolites:
model_met_ID.append(met.id)
model_met_name.append(met.name)
try:
model_met_kegg.append(met.notes['KEGG'])
except KeyError:
model_met_kegg.append('--')
#make into a dataframe
model_met_df = pd.DataFrame({'Model ID' : model_met_ID, 'Model name' : model_met_name, 'Model Kegg':model_met_kegg})
model_met_df[0:5]
#make list of metabolites that should be transported
transported_mets = []
for rct in transport_keep:
met = rct[2:-4]
try:
met_kegg = matteo.metabolites.get_by_id(met).notes['KEGG']
met_id_model = model_met_df.loc[model_met_df['Model Kegg'] == met_kegg,'Model ID'].values[0]
transported_mets.append(met_id_model[:-2])
except KeyError:
print (met)
transported_mets_rct =[]
for met in transported_mets:
rct = met.upper() + 't'
transported_mets_rct.append(rct)
len(transported_mets_rct)
#now test what happens when we remove all transports except these martyn identified:
with model:
for rct in transport:
with model:
if rct.id in transported_mets_rct:
continue
else:
model.remove_reactions(rct)
biomass = model.optimize().objective_value
if biomass > 0.75:
print ('removing', rct, 'gives biomass', biomass)
elif biomass <0.72:
print ('removing', rct, 'gives biomass', biomass)
additional_trans = ['CLt', 'ASN__Lt', 'PYDX5Pt', 'QH2t', 'GTHRDt','BIOMASSt','THMTPt']
more_transport = transported_mets_rct
for rct in additional_trans:
more_transport.append(rct)
len(more_transport)
with model:
for rct in transport:
if rct.id in more_transport:
continue
else:
model.remove_reactions(rct)
biomass = model.optimize().objective_value
if biomass > 0.75 or biomass <0.70:
print ('removing', rct, 'gives biomass', biomass)
else:
continue
more_transport.append('NADPt')
additional_trans = ['CLt', 'ASN__Lt', 'PYDX5Pt', 'QH2t', 'GTHRDt','BIOMASSt','THMTPt', 'NADPt']
tot_transport = transported_mets_rct
for rct in additional_trans:
tot_transport.append(rct)
len(tot_transport)
for rct in transport:
if rct.id in tot_transport:
continue
else:
model.remove_reactions(rct)
model.optimize().objective_value
#save&commit
cobra.io.write_sbml_model(model,'../model/p-thermo.xml')
# what about other sugars?? e.g. xylose etc. See figure of which sugars geobacillus grows on!
| 0.1443 | 0.86681 |
### Master Telefรณnica Big Data & Analytics
# **Prueba de Evaluaciรณn del Tema 4:**
## **Topic Modelling.**
Date: 2016/04/10
Para realizar esta prueba es necesario tener actualizada la mรกquina virtual con la versiรณn mรกs reciente de MLlib.
Para la actualizaciรณn, debe seguir los pasos que se indican a continuaciรณn:
### Pasos para actualizar MLlib:
1. Entrar en la vm como root:
`vagrant ssh`
`sudo bash`
Ir a `/usr/local/bin`
2. Descargar la รบltima versiรณn de spark desde la vm mediante
`wget http://www-eu.apache.org/dist/spark/spark-1.6.1/spark-1.6.1-bin-hadoop2.6.tgz`
3. Desempaquetar:
`tar xvf spark-1.6.1-bin-hadoop2.6.tgz` (y borrar el tgz)
4. Lo siguiente es un parche, pero suficiente para que funcione:
Guardar copia de `spark-1.3: mv spark-1.3.1-bin-hadoop2.6/ spark-1.3.1-bin-hadoop2.6_old`
Crear enlace a `spark-1.6: ln -s spark-1.6.1-bin-hadoop2.6/ spark-1.3.1-bin-hadoop2.6`
## Librerรญas
Puede utilizar este espacio para importar todas las librerรญas que necesite para realizar el examen.
## 0. Adquisiciรณn de un corpus.
Descargue el contenido del corpus `reuters` de `nltk`.
import nltk
nltk.download()
Selecciona el identificador `reuters`.
```
#nltk.download()
mycorpus = nltk.corpus.reuters
```
Para evitar problemas de sobrecarga de memoria, o de tiempo de procesado, puede reducir el tamaรฑo el corpus, modificando el valor de la variable n_docs a continuaciรณn.
```
n_docs = 500000
filenames = mycorpus.fileids()
fn_train = [f for f in filenames if f[0:5]=='train']
corpus_text = [mycorpus.raw(f) for f in fn_train]
# Reduced dataset:
n_docs = min(n_docs, len(corpus_text))
corpus_text = [corpus_text[n] for n in range(n_docs)]
print 'Loaded {0} files'.format(len(corpus_text))
```
A continuaciรณn cargaremos los datos en un RDD
```
corpusRDD = sc.parallelize(corpus_text, 4)
print "\nRDD created with {0} elements".format(corpusRDD.count())
```
## 1. Ejercicios
#### **Ejercicio 1**: (0.6 ptos) Preprocesamiento de datos.
Prepare los datos para aplicar un algoritmo de modelado de tรณpicos en `pyspark`. Para ello, aplique los pasos siguientes:
1. *Tokenizaciรณn*: convierta cada texto a utf-8, y transforme la cadena en una lista de tokens.
2. Homogeneizaciรณn: pase todas las palabras a minรบsculas y elimine todos los tokens no alfanumรฉricos.
3. Limpieza: Elimine todas las stopwords utilizando el fichero de stopwords disponible en NLTK para el idioma inglรฉs.
Guarde el resultado en la variable `corpus_tokensRDD`
#### **Ejercicio 2**: (0.6 ptos) Stemming
Aplique un procedimiento de *stemming* al corpus, utilizando el `SnowballStemmer` de NLTK. Guarde el resultado en `corpus_stemRDD`.
#### **Ejercicio 3**: (0.6 ptos) Vectorizaciรณn
En este punto cada documento del corpus es una lista de tokens.
Calcule un nuevo RDD que contenga, para cada documento, una lista de tuplas. La clave (*key*) de cada lista serรก un token y su valor el nรบmero de repeticiones del mismo en el documento.
Imprima una muestra de 20 tuplas uno de los documentos del corpus.
#### **Ejercicio 4**: (0.8 ptos) Cรกlculo del diccionario de tokens
Construya, a partir de `corpus_wcRDD`, un nuevo diccionario con todos los tokens del corpus. El resultado serรก un diccionario python de nombre `wcDict`, cuyas entradas serรกn los tokens y sus valores el nรบmero de repeticiones del token en todo el corpus.
`wcDict = {token1: valor1, token2, valor2, ...}`
Imprima el nรบmero de repeticiones del token `interpret`
#### **Ejercicio 5**: (0.6 ptos) Nรบmero de tokens.
Determine el nรบmero total de tokens en el diccionario. Imprima el resultado.
#### **Ejercicio 6**: (0.8 ptos) Tรฉrminos demasiado frecuentes:
Determine los 5 tokens mรกs frecuentes del corpus. Imprima el resultado.
#### **Ejercicio 7**: (0.8 ptos) Nรบmero de documentos del token mรกs frecuente.
Determine en quรฉ porcentaje de documentos aparece el token mรกs frecuente.
#### **Ejercicio 8**: (1 ptos) Filtrado de tรฉrminos.
Elimine del corpus los dรณs tรฉrminos mรกs frecuentes. Guarde el resultado en un nuevo RDD denominado corpus_wcRDD2, con la misma estructura que corpus_wcRDD (es decir, cada documento una lista de tuplas).
#### **Ejercicio 9**: (0.8 ptos) Lista de tokens y diccionario inverso.
Determine la lista de topicos de todo el corpus, y construya un dictionario inverso, `invD`, cuyas entradas sean cada uno de los tokens, y sus salidas los nรบmeros consecutivos de 0 al nรบmero total de tokens, es decir
invD = {token0: 0, token1: 1, token2: 2, ...}
#### **Ejercicio 10**: (0.6 ptos) Algoritmo LDA.
Para aplicar el algoritmo LDA, es necesario reemplazar las tuplas `(token, valor)` de `wcRDD` por tuplas del tipo `(token_id, value)`, sustituyendo cada token por un identificador entero.
El cรณdigo siguiente se encarga de completar este proceso:
```
# Compute RDD replacing tokens by token_ids
corpus_sparseRDD = corpus_wcRDD2.map(lambda x: [(invD[t[0]], t[1]) for t in x])
# Convert list of tuplas into Vectors.sparse object.
corpus_sparseRDD = corpus_sparseRDD.map(lambda x: Vectors.sparse(n_tokens, x))
corpus4lda = corpus_sparseRDD.zipWithIndex().map(lambda x: [x[1], x[0]]).cache()
```
Aplique el algoritmo LDA con 4 tรณpicos sobre el corpus obtenido en `corpus4lda`, para un valor de `topicConcentration = 2.0` y `docConcentration = 3.0`. (Tenga en cuenta que estos parรกmetros de entrada deben de ser tipo float).
#### **Ejercicio 11**: (0.8 ptos) Tokens principales.
Imprima los dos tokens de mayor peso de cada tรณpico. (Debe imprimir el texto del token, no su รญndice).
#### **Ejercicio 12**: (0.8 ptos) Pesos de un token.
Imprima el peso del token `bank` en cada tรณpico.
#### **Test 13**: (0.6 ptos) Indique cuรกles de las siguientes afirmaciones se puede asegurar que son verdaderas:
1. En LSI, cada documento se asigna a un sรณlo tรณpico.
2. De acuerdo con el modelo LDA, todos los tokens de un documento han sido generados por el mismo tรณpico
3. LSI descompone la matriz de datos de entrada en el producto de 3 matrices cuadradas.
4. Si el rango de la matriz de entrada a un modelo LSI es igual al nรบmero de tรณpicos. La descomposiciรณn SVD del modelo LSI es exacta (no es una aproximaciรณn).
#### **Test 14**: (0.6 ptos) Indique cuรกles de las siguientes afirmaciones se puede asegurar que son verdaderas:
1. En un modelo LDA, la distribuciรณn de Dirichlet se utiliza para generar distribuciones de probabilidad de tokens.
2. Si una palabra aparece en pocos documentos del corpus, su IDF es mayor.
3. El resultado de la lematizaciรณn de una palabra es una palabra
4. El resultado del stemming de una palabra es una palabra
|
github_jupyter
|
#nltk.download()
mycorpus = nltk.corpus.reuters
n_docs = 500000
filenames = mycorpus.fileids()
fn_train = [f for f in filenames if f[0:5]=='train']
corpus_text = [mycorpus.raw(f) for f in fn_train]
# Reduced dataset:
n_docs = min(n_docs, len(corpus_text))
corpus_text = [corpus_text[n] for n in range(n_docs)]
print 'Loaded {0} files'.format(len(corpus_text))
corpusRDD = sc.parallelize(corpus_text, 4)
print "\nRDD created with {0} elements".format(corpusRDD.count())
# Compute RDD replacing tokens by token_ids
corpus_sparseRDD = corpus_wcRDD2.map(lambda x: [(invD[t[0]], t[1]) for t in x])
# Convert list of tuplas into Vectors.sparse object.
corpus_sparseRDD = corpus_sparseRDD.map(lambda x: Vectors.sparse(n_tokens, x))
corpus4lda = corpus_sparseRDD.zipWithIndex().map(lambda x: [x[1], x[0]]).cache()
| 0.328853 | 0.864196 |
```
import cvxpy as cp
import matplotlib.pyplot as plt
import numpy as np
import torch
from resalloc.fungible import AllocationProblem
from resalloc.fungible import utilities
from latexify import latexify
latexify()
device = 'cpu'
torch.manual_seed(0)
np.random.seed(0)
USE_DOUBLES = False
SOLVE_WITH_MOSEK = True
def _matching(col, prev_cols):
return torch.stack([col == pcol for pcol in prev_cols]).any(axis=0)
def make_problem(n_jobs, n_resources, device):
resource_limits = torch.zeros(n_resources, device=device)
scale = n_jobs
for i in range(n_resources):
resource_limits[i] = torch.tensor(np.random.uniform(low=0.1, high=1) * scale, device=device).float()
scale = scale / 1.5
throughput_matrix = torch.zeros((n_jobs, n_resources), device=device)
mids = np.linspace(0.3, 1., n_resources)
prev_cols = [torch.zeros(n_jobs, device=device)]
for i in range(n_resources):
col = torch.tensor(np.random.uniform(low=0.1, high=mids[i], size=n_jobs), device=device).float()
match_at = _matching(col, prev_cols)
while match_at.any():
subcol = torch.tensor(np.random.uniform(low=0.1, high=mids[i], size=match_at.sum()), device=device).float()
col[match_at] = subcol
match_at = _matching(col, prev_cols)
throughput_matrix[:, i] = col
prev_cols.append(col)
utility_fn = utilities.Log()
alloc_problem = AllocationProblem(
throughput_matrix,
resource_limits=resource_limits,
utility_function=utility_fn
)
return alloc_problem
def make_cp_problem(prob):
X = cp.Variable(prob.A.shape, nonneg=True)
A_param = prob.A.cpu().numpy()
R_param = prob.resource_limits.cpu().numpy()[1:]
throughput = cp.sum(cp.multiply(A_param, X), axis=1)
utility = cp.sum(prob.utility_fn.cvxpy_utility(throughput))
resource_used = cp.sum(X[:, 1:], axis=0)
problem = cp.Problem(
cp.Maximize(utility),
[
cp.sum(X, axis=1) <= 1,
resource_used <= R_param
],
)
return problem
alloc_problem = make_problem(int(1e6), 4, 'cpu')
print(alloc_problem.resource_limits)
cvxpy_problem = make_cp_problem(alloc_problem)
def solve_w_mosek(cvxpy_problem, alloc_problem):
cvxpy_problem.solve(cp.MOSEK,
mosek_params={
'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': 1e-3,
'MSK_DPAR_INTPNT_CO_TOL_DFEAS': 1e-3,
'MSK_DPAR_INTPNT_CO_TOL_MU_RED': 1e-3,
'MSK_DPAR_INTPNT_CO_TOL_PFEAS': 1e-3
})
print(cvxpy_problem._solve_time)
util = alloc_problem.utility(alloc_problem.make_feasible(torch.tensor(cvxpy_problem.variables()[0].value, device=alloc_problem.A.device))) / alloc_problem.n_jobs
print(util)
return cvxpy_problem._solve_time, util
def solve_w_ours(alloc_problem, verbose=False):
_, stats = alloc_problem.solve(max_iter=25, verbose=verbose)
print(stats.solve_time)
util = alloc_problem.utility(alloc_problem.make_feasible(alloc_problem.X)) / alloc_problem.n_jobs
print(alloc_problem.utility(alloc_problem.make_feasible(alloc_problem.X)))
return stats.solve_time, util
from tqdm.auto import tqdm
def benchmark(jobs_and_resources, n_trials, device, cvxpy=True, verbose=False):
torch.manual_seed(0)
np.random.seed(0)
times_ours = []
times_mosek = []
utils_ours = []
utils_mosek = []
for n_jobs, n_resources in tqdm(jobs_and_resources):
t_ours = []
u_ours = []
t_mosek = []
u_mosek = []
for trial_num in tqdm(range(n_trials)):
alloc_problem = make_problem(n_jobs, n_resources, device)
cvxpy_problem = make_cp_problem(alloc_problem)
t, u = solve_w_ours(alloc_problem, verbose)
t_ours.append(t)
u_ours.append(u)
if cvxpy:
print('mosek ...')
t_m, u_m = solve_w_mosek(cvxpy_problem, alloc_problem)
t_mosek.append(t_m)
u_mosek.append(u_m)
times_ours.append(np.array(t_ours))
times_mosek.append(np.array(t_mosek))
utils_ours.append(np.array(u_ours))
utils_mosek.append(np.array(u_mosek))
return map(np.stack, [times_ours, times_mosek, utils_ours, utils_mosek])
jobs = list(map(int, [1e2, 1e3, 1e4, 1e5, 1e6]))
matrix = [(j, 4) for j in jobs]
jobs_t_ours_cuda, _, jobs_u_ours_cuda, _ = benchmark(matrix, 5, 'cuda', cvxpy=False)
np.save('jobs_t_ours_cuda', jobs_t_ours_cuda)
np.save('jobs_u_ours_cuda', jobs_u_ours_cuda)
jobs_t_ours_cpu, jobs_t_mosek, jobs_u_ours_cpu, jobs_u_mosek = benchmark(matrix, 5, 'cpu', cvxpy=True)
np.save('jobs_t_ours_cpu', jobs_t_ours_cpu)
np.save('jobs_t_mosek', jobs_t_mosek)
np.save('jobs_u_ours_cpu', jobs_u_ours_cpu)
np.save('jobs_u_mosek', jobs_u_mosek)
resources = [2, 4, 8, 16]
matrix = [(int(1e6), r) for r in resources]
r_t_ours_cuda, _, r_u_ours_cuda, _ = benchmark(matrix, 5, 'cuda', cvxpy=False)
np.save('r_t_ours_cuda', r_t_ours_cuda)
np.save('r_u_ours_cuda', r_u_ours_cuda)
r_t_ours_cpu, r_t_mosek, r_u_ours_cpu, r_u_mosek = benchmark(matrix, 5, 'cpu', cvxpy=True, verbose=True)
np.save('r_t_ours_cpu', r_t_ours_cpu)
np.save('r_t_mosek', r_t_mosek)
np.save('r_u_ours_cpu', r_u_ours_cpu)
np.save('r_u_mosek', r_u_mosek)
import latexify
latexify.latexify()
import matplotlib.pyplot as plt
jobs_t_ours_cuda = np.load('jobs_t_ours_cuda.npy')
jobs_u_ours_cuda = np.load('jobs_u_ours_cuda.npy')
jobs_t_ours_cpu = np.load('jobs_t_ours_cpu.npy')
jobs_t_mosek = np.load('jobs_t_mosek.npy')
jobs_u_ours_cpu = np.load('jobs_u_ours_cpu.npy')
jobs_u_mosek = np.load('jobs_u_mosek.npy')
fig, axs = plt.subplots(2, 1, sharex=True)
fig.set_size_inches((5.4, 7.2/1.1))
axs[0].plot(jobs, jobs_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[0].plot(jobs, jobs_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[0].plot(jobs, jobs_t_mosek.mean(axis=1), label='mosek')
axs[0].set_xscale('log')
axs[0].set_yscale('log')
axs[0].set_ylabel('seconds')
axs[0].legend()
axs[1].bar(jobs,
jobs_u_ours_cpu.mean(axis=1) - jobs_u_mosek.mean(axis=1),
width=np.array([10, 100, 1000, 10000])*100, color='gray')
#axs[1].plot(jobs, jobs_u_ours_cpu.mean(axis=1) - jobs_u_mosek.mean(axis=1))
axs[1].axhline(0, linestyle='--', color='k')
axs[1].set_ylim(bottom=-0.001)
axs[1].set_xscale('log')
axs[1].set_ylabel('$(U(X) - U(X_{\\textnormal{msk}}))/n$')
axs[1].set_xlabel('number of jobs')
axs[1].ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.tight_layout()
plt.savefig('v_mosek_jobs.pdf')
import latexify
latexify.latexify()
import matplotlib.pyplot as plt
jobs_t_ours_cpu = np.load('jobs_t_ours_cpu.npy')
jobs_t_mosek = np.load('jobs_t_mosek.npy')
jobs_u_ours_cpu = np.load('jobs_u_ours_cpu.npy')
jobs_u_mosek = np.load('jobs_u_mosek.npy')
fig, axs = plt.subplots(2, 1)
fig.set_size_inches((5.4, 7.2/1.1))
axs[0].plot(jobs, jobs_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[0].plot(jobs, jobs_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[0].plot(jobs, jobs_t_mosek.mean(axis=1), label='mosek')
axs[0].set_xscale('log')
axs[0].set_yscale('log')
axs[0].set_ylabel('seconds')
axs[0].legend()
#plt.show()
plt.tight_layout()
plt.savefig('v_mosek_jobs.pdf')
import latexify
latexify.latexify()
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, 1)
fig.set_size_inches((5.4, 7.2/1.1))
axs[0].plot(resources, r_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[0].plot(resources, r_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[0].plot(resources, r_t_mosek.mean(axis=1), label='mosek')
axs[0].set_xscale('log', basex=2)
axs[0].set_yscale('log')
axs[0].set_ylabel('seconds')
axs[0].legend(loc='upper left')
axs[0].set_xlabel('number of resources')
#plt.show()
jobs = list(map(int, [1e2, 1e3, 1e4, 1e5, 1e6]))
axs[1].plot(jobs, jobs_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[1].plot(jobs, jobs_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[1].plot(jobs, jobs_t_mosek.mean(axis=1), label='mosek')
axs[1].set_xscale('log')
axs[1].set_yscale('log')
axs[1].set_ylabel('seconds')
axs[1].legend()
axs[1].set_xlabel('number of jobs')
plt.tight_layout()
plt.savefig('v_mosek.pdf')
```
|
github_jupyter
|
import cvxpy as cp
import matplotlib.pyplot as plt
import numpy as np
import torch
from resalloc.fungible import AllocationProblem
from resalloc.fungible import utilities
from latexify import latexify
latexify()
device = 'cpu'
torch.manual_seed(0)
np.random.seed(0)
USE_DOUBLES = False
SOLVE_WITH_MOSEK = True
def _matching(col, prev_cols):
return torch.stack([col == pcol for pcol in prev_cols]).any(axis=0)
def make_problem(n_jobs, n_resources, device):
resource_limits = torch.zeros(n_resources, device=device)
scale = n_jobs
for i in range(n_resources):
resource_limits[i] = torch.tensor(np.random.uniform(low=0.1, high=1) * scale, device=device).float()
scale = scale / 1.5
throughput_matrix = torch.zeros((n_jobs, n_resources), device=device)
mids = np.linspace(0.3, 1., n_resources)
prev_cols = [torch.zeros(n_jobs, device=device)]
for i in range(n_resources):
col = torch.tensor(np.random.uniform(low=0.1, high=mids[i], size=n_jobs), device=device).float()
match_at = _matching(col, prev_cols)
while match_at.any():
subcol = torch.tensor(np.random.uniform(low=0.1, high=mids[i], size=match_at.sum()), device=device).float()
col[match_at] = subcol
match_at = _matching(col, prev_cols)
throughput_matrix[:, i] = col
prev_cols.append(col)
utility_fn = utilities.Log()
alloc_problem = AllocationProblem(
throughput_matrix,
resource_limits=resource_limits,
utility_function=utility_fn
)
return alloc_problem
def make_cp_problem(prob):
X = cp.Variable(prob.A.shape, nonneg=True)
A_param = prob.A.cpu().numpy()
R_param = prob.resource_limits.cpu().numpy()[1:]
throughput = cp.sum(cp.multiply(A_param, X), axis=1)
utility = cp.sum(prob.utility_fn.cvxpy_utility(throughput))
resource_used = cp.sum(X[:, 1:], axis=0)
problem = cp.Problem(
cp.Maximize(utility),
[
cp.sum(X, axis=1) <= 1,
resource_used <= R_param
],
)
return problem
alloc_problem = make_problem(int(1e6), 4, 'cpu')
print(alloc_problem.resource_limits)
cvxpy_problem = make_cp_problem(alloc_problem)
def solve_w_mosek(cvxpy_problem, alloc_problem):
cvxpy_problem.solve(cp.MOSEK,
mosek_params={
'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': 1e-3,
'MSK_DPAR_INTPNT_CO_TOL_DFEAS': 1e-3,
'MSK_DPAR_INTPNT_CO_TOL_MU_RED': 1e-3,
'MSK_DPAR_INTPNT_CO_TOL_PFEAS': 1e-3
})
print(cvxpy_problem._solve_time)
util = alloc_problem.utility(alloc_problem.make_feasible(torch.tensor(cvxpy_problem.variables()[0].value, device=alloc_problem.A.device))) / alloc_problem.n_jobs
print(util)
return cvxpy_problem._solve_time, util
def solve_w_ours(alloc_problem, verbose=False):
_, stats = alloc_problem.solve(max_iter=25, verbose=verbose)
print(stats.solve_time)
util = alloc_problem.utility(alloc_problem.make_feasible(alloc_problem.X)) / alloc_problem.n_jobs
print(alloc_problem.utility(alloc_problem.make_feasible(alloc_problem.X)))
return stats.solve_time, util
from tqdm.auto import tqdm
def benchmark(jobs_and_resources, n_trials, device, cvxpy=True, verbose=False):
torch.manual_seed(0)
np.random.seed(0)
times_ours = []
times_mosek = []
utils_ours = []
utils_mosek = []
for n_jobs, n_resources in tqdm(jobs_and_resources):
t_ours = []
u_ours = []
t_mosek = []
u_mosek = []
for trial_num in tqdm(range(n_trials)):
alloc_problem = make_problem(n_jobs, n_resources, device)
cvxpy_problem = make_cp_problem(alloc_problem)
t, u = solve_w_ours(alloc_problem, verbose)
t_ours.append(t)
u_ours.append(u)
if cvxpy:
print('mosek ...')
t_m, u_m = solve_w_mosek(cvxpy_problem, alloc_problem)
t_mosek.append(t_m)
u_mosek.append(u_m)
times_ours.append(np.array(t_ours))
times_mosek.append(np.array(t_mosek))
utils_ours.append(np.array(u_ours))
utils_mosek.append(np.array(u_mosek))
return map(np.stack, [times_ours, times_mosek, utils_ours, utils_mosek])
jobs = list(map(int, [1e2, 1e3, 1e4, 1e5, 1e6]))
matrix = [(j, 4) for j in jobs]
jobs_t_ours_cuda, _, jobs_u_ours_cuda, _ = benchmark(matrix, 5, 'cuda', cvxpy=False)
np.save('jobs_t_ours_cuda', jobs_t_ours_cuda)
np.save('jobs_u_ours_cuda', jobs_u_ours_cuda)
jobs_t_ours_cpu, jobs_t_mosek, jobs_u_ours_cpu, jobs_u_mosek = benchmark(matrix, 5, 'cpu', cvxpy=True)
np.save('jobs_t_ours_cpu', jobs_t_ours_cpu)
np.save('jobs_t_mosek', jobs_t_mosek)
np.save('jobs_u_ours_cpu', jobs_u_ours_cpu)
np.save('jobs_u_mosek', jobs_u_mosek)
resources = [2, 4, 8, 16]
matrix = [(int(1e6), r) for r in resources]
r_t_ours_cuda, _, r_u_ours_cuda, _ = benchmark(matrix, 5, 'cuda', cvxpy=False)
np.save('r_t_ours_cuda', r_t_ours_cuda)
np.save('r_u_ours_cuda', r_u_ours_cuda)
r_t_ours_cpu, r_t_mosek, r_u_ours_cpu, r_u_mosek = benchmark(matrix, 5, 'cpu', cvxpy=True, verbose=True)
np.save('r_t_ours_cpu', r_t_ours_cpu)
np.save('r_t_mosek', r_t_mosek)
np.save('r_u_ours_cpu', r_u_ours_cpu)
np.save('r_u_mosek', r_u_mosek)
import latexify
latexify.latexify()
import matplotlib.pyplot as plt
jobs_t_ours_cuda = np.load('jobs_t_ours_cuda.npy')
jobs_u_ours_cuda = np.load('jobs_u_ours_cuda.npy')
jobs_t_ours_cpu = np.load('jobs_t_ours_cpu.npy')
jobs_t_mosek = np.load('jobs_t_mosek.npy')
jobs_u_ours_cpu = np.load('jobs_u_ours_cpu.npy')
jobs_u_mosek = np.load('jobs_u_mosek.npy')
fig, axs = plt.subplots(2, 1, sharex=True)
fig.set_size_inches((5.4, 7.2/1.1))
axs[0].plot(jobs, jobs_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[0].plot(jobs, jobs_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[0].plot(jobs, jobs_t_mosek.mean(axis=1), label='mosek')
axs[0].set_xscale('log')
axs[0].set_yscale('log')
axs[0].set_ylabel('seconds')
axs[0].legend()
axs[1].bar(jobs,
jobs_u_ours_cpu.mean(axis=1) - jobs_u_mosek.mean(axis=1),
width=np.array([10, 100, 1000, 10000])*100, color='gray')
#axs[1].plot(jobs, jobs_u_ours_cpu.mean(axis=1) - jobs_u_mosek.mean(axis=1))
axs[1].axhline(0, linestyle='--', color='k')
axs[1].set_ylim(bottom=-0.001)
axs[1].set_xscale('log')
axs[1].set_ylabel('$(U(X) - U(X_{\\textnormal{msk}}))/n$')
axs[1].set_xlabel('number of jobs')
axs[1].ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.tight_layout()
plt.savefig('v_mosek_jobs.pdf')
import latexify
latexify.latexify()
import matplotlib.pyplot as plt
jobs_t_ours_cpu = np.load('jobs_t_ours_cpu.npy')
jobs_t_mosek = np.load('jobs_t_mosek.npy')
jobs_u_ours_cpu = np.load('jobs_u_ours_cpu.npy')
jobs_u_mosek = np.load('jobs_u_mosek.npy')
fig, axs = plt.subplots(2, 1)
fig.set_size_inches((5.4, 7.2/1.1))
axs[0].plot(jobs, jobs_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[0].plot(jobs, jobs_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[0].plot(jobs, jobs_t_mosek.mean(axis=1), label='mosek')
axs[0].set_xscale('log')
axs[0].set_yscale('log')
axs[0].set_ylabel('seconds')
axs[0].legend()
#plt.show()
plt.tight_layout()
plt.savefig('v_mosek_jobs.pdf')
import latexify
latexify.latexify()
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, 1)
fig.set_size_inches((5.4, 7.2/1.1))
axs[0].plot(resources, r_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[0].plot(resources, r_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[0].plot(resources, r_t_mosek.mean(axis=1), label='mosek')
axs[0].set_xscale('log', basex=2)
axs[0].set_yscale('log')
axs[0].set_ylabel('seconds')
axs[0].legend(loc='upper left')
axs[0].set_xlabel('number of resources')
#plt.show()
jobs = list(map(int, [1e2, 1e3, 1e4, 1e5, 1e6]))
axs[1].plot(jobs, jobs_t_ours_cuda.mean(axis=1), label='price discovery (gpu)')
axs[1].plot(jobs, jobs_t_ours_cpu.mean(axis=1), label='price discovery (cpu)')
axs[1].plot(jobs, jobs_t_mosek.mean(axis=1), label='mosek')
axs[1].set_xscale('log')
axs[1].set_yscale('log')
axs[1].set_ylabel('seconds')
axs[1].legend()
axs[1].set_xlabel('number of jobs')
plt.tight_layout()
plt.savefig('v_mosek.pdf')
| 0.399577 | 0.363379 |
```
%load_ext autoreload
%autoreload 2
```
# Final Evaluation
```
import os
import sys
import json
import pickle
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
```
## Load data
```
# ## here I'm loading predictions from pickled predictions
preds_path = '../data/predictions/predictions-final.pickle'
with open(preds_path, 'rb') as pickle_file:
preds = pickle.load(pickle_file)
## load ground truths (val_gt.json)
with open('../data/predictions/val_gt.json') as json_file:
val_gt = json.load(json_file)
## extract image ids
img_ids = {}
for vgt in val_gt['images']:
img_ids[vgt['im_name']] = vgt['id']
```
### fix the dictionary
```python
cutoff = 1000
anno_dict2 = {}
for img in list(anno_dict.keys()):
bboxes = anno_dict[img]
bboxes2 = []
for bbox in bboxes:
if np.prod(bbox[2:]) > cutoff:
bboxes2.append(bbox)
if len(bboxes2) > 0:
anno_dict2[img] = bboxes2
## get_annons
bboxes = []
for bb in anno_train[0, i][0][0][2]:
if bb[0] > 0: # class_label = 1 means it is a person
bboxes.append(bb[1:5]) # bbox format = [x, y, w, h]
## keep only images with persons
if bboxes != []:
d[img_name] = bboxes
```
```
val_gt['annotations'][0]
## intersect with our predictions (441 images)
val_imgs = val_gt['images']
val_imgs = [val_img['im_name'] for val_img in val_imgs]
pred_imgs = list(preds.keys())
imgs = list(set(val_imgs) & set(pred_imgs))
len(imgs)
## subset val_gt and create test_gt
cutoff = 100
test_gt = {}
test_gt['categories'] = val_gt['categories']
test_gt['images'] = []
test_gt['annotations'] = []
for img in val_gt['images']:
if img['im_name'] in imgs:
test_gt['images'].append(img)
test_ids = []
for img in imgs:
if img in list(img_ids.keys()):
test_ids.append(img_ids[img])
for anno in val_gt['annotations']:
if anno['image_id'] in test_ids:
## add area to anno (needed for default pycocotools eval)
bbox = anno['bbox']
area = int(bbox[2]) * int(bbox[3])
anno['area'] = area
## cutoff small boxes
if area > cutoff and anno['category_id'] > 0:
test_gt['annotations'].append(anno)
## check
test_imgs = test_gt['images']
test_imgs = [test_img['im_name'] for test_img in test_imgs]
test_imgs[:5], len(test_imgs)
test_gt['images'][0]
## GT bboxes
img_name = 'munster_000000_000019_leftImg8bit.png'
idx = test_imgs.index(img_name)
test1_img = test_gt['images'][idx]
img_name = test1_img['im_name']
img_id = test1_img['id']
bboxes = []
for anno in test_gt['annotations']:
if anno['image_id'] == img_id and anno['category_id'] >= 1:
bboxes.append(anno['bbox'])
# DT bboxes
bboxes_dt = np.round(preds[img_name]['boxes'])
bboxes_dt = bboxes_dt.tolist()
img = Image.open('../data/predictions/' + img_name)
plt.rcParams['figure.figsize'] = [12, 8]
fig, ax = plt.subplots()
ax.imshow(img);
for bbox in bboxes_dt:
x1, y1, x2, y2 = bbox
w, h = x2 - x1, y2 - y1
rect = patches.Rectangle(
(x1, y1), w, h,
linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
# bbox = [x, y, w, h]
for bbox in bboxes:
rect = patches.Rectangle(
(bbox[0], bbox[1]), bbox[2], bbox[3],
linewidth=1, edgecolor='g', facecolor='none')
ax.add_patch(rect)
plt.title('DTs in red, GTs in green')
plt.show()
## we can save the test_gt as json
with open('../data/predictions/test_gt.json', 'w', encoding='utf-8') as json_file:
json.dump(test_gt, json_file, ensure_ascii=False, indent=4)
## see loadRes() and loadNumpyAnnotations() from COCO Class
## we need to provide [imageID, x1, y1, w, h, score, class] for each bbox:
test_dt = []
for img in imgs:
bboxes = preds[img]['boxes']
scores = preds[img]['scores']
for i, bbox in enumerate(bboxes):
x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
w, h = x2 - x1, y2 - y1
data = [img_ids[img], x1, y1, w, h, scores[i], 1]
test_dt.append(data)
test_dt = np.array(test_dt)
## check
np.round(test_dt[0])
## check
# should be a lot more detections than gt bboxes
len(test_dt), len(test_gt['annotations'])
print('About %.2f times more detected bboxes than there are gt bboxes'
% (len(test_dt) / len(test_gt['annotations'])))
```
# DEBUG
What I did:
* Converted CityPersons scripts in `eval_script` from Python2 to Python3 using `2to3 . -w .` in `./eval_script`
* I compared the code and why did you divide `gt['vis_ratio']` by 100?
# Evaluate
```
## Citypersons average miss rate measures
module_path = os.path.abspath(os.path.join('../src/eval_script/'))
if module_path not in sys.path:
sys.path.append(module_path)
from coco import COCO
from eval_MR_multisetup import COCOeval
annType = 'bbox'
annFile = '../data/predictions/test_gt.json'
resFile = test_dt
res_file_path = '../data/predictions/results.txt'
res_file = open(res_file_path, 'w')
for id_setup in range(0, 4):
cocoGt = COCO(annFile)
cocoDt = cocoGt.loadRes(resFile)
imgIds = sorted(cocoGt.getImgIds())
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate(id_setup)
cocoEval.accumulate()
cocoEval.summarize(id_setup, res_file)
res_file.close()
## making the printout nicer..
print('Results: ')
res_file = open(res_file_path,'r')
lines = res_file.readlines()
res_file.close()
lines = [line.replace('10000000000.00', 'inf') for line in lines]
lines = [line.replace('10000000000', 'inf') for line in lines]
lines = [line.strip() for line in lines]
for line in lines:
new = ''
for elt in line.split(' '):
if elt:
new += elt + ' '
print(new)
## rewrite as a row to add it to the benchmark table
results = [line.split('=')[-1] for line in lines]
results = [line.split('=')[-1] for line in lines]
results.insert(0, ' ร ')
results.insert(0, ' Our FasterRCNN ')
results = [('**' + result.strip() + '**') for result in results]
results = ' | '.join(results)
results = ' | ' + results + ' | '
results
```
### Benchmark ###
| Method | External training data | MR (Reasonable) | MR (Reasonable_small) | MR (Reasonable_occ=heavy) | MR (All) |
|:----------------------:|:----------------------:|:---------------:|:---------------------:|:-------------------------:|:--------:|
| [APD-pretrain](https://arxiv.org/abs/1910.09188) | โ | 7.31% | 10.81% | 28.07% | 32.71% |
| [Pedestron](https://arxiv.org/abs/2003.08799) | โ | 7.69% | 9.16% | 27.08% | 28.33% |
| [APD](https://arxiv.org/abs/1910.09188) | ร | 8.27% | 11.03% | 35.45% | 35.65% |
| YT-PedDet | ร | 8.41% | 10.60% | 37.88% | 37.22% |
| STNet | ร | 8.92% | 11.13% | 34.31% | 29.54% |
| [MGAN](https://arxiv.org/abs/1910.06160) | ร | 9.29% | 11.38% | 40.97% | 38.86% |
| DVRNet | ร | 11.17% | 15.62% | 42.52% | 40.99% |
| [HBA-RCNN](https://arxiv.org/abs/1911.11985) | ร | 11.26% | 15.68% | 39.54% | 38.77% |
| [OR-CNN](https://arxiv.org/abs/1807.08407) | ร | 11.32% | 14.19% | 51.43% | 40.19% |
| [AdaptiveNMS](http://openaccess.thecvf.com/content_CVPR_2019/papers/Liu_Adaptive_NMS_Refining_Pedestrian_Detection_in_a_Crowd_CVPR_2019_paper.pdf) | ร | 11.40% | 13.64% | 46.99% | 38.89% |
| [Repultion Loss](http://arxiv.org/abs/1711.07752) | ร | 11.48% | 15.67% | 52.59% | 39.17% |
| [Cascade MS-CNN](https://arxiv.org/abs/1906.09756) | ร | 11.62% | 13.64% | 47.14% | 37.63% |
| [Adapted FasterRCNN](http://202.119.95.70/cache/12/03/openaccess.thecvf.com/f36bf52f1783160552c75ae3cd300e84/Zhang_CityPersons_A_Diverse_CVPR_2017_paper.pdf) | ร | 12.97% | 37.24% | 50.47% | 43.86% |
| [MS-CNN](https://arxiv.org/abs/1607.07155) | ร | 13.32% | 15.86% | 51.88% | 39.94% |
| **Our FasterRCNN** | **ร** | **24.73%** | **47.35%** | **64.74%** | **52.72%** |
```
## Test using default pycocotools measures
module_path = os.path.abspath(os.path.join('../src/pycocotools/'))
if module_path not in sys.path:
sys.path.append(module_path)
from coco import COCO
from cocoeval import COCOeval
annType = 'bbox'
annFile = '../data/predictions/test_gt.json'
resFile = test_dt
cocoGt=COCO(annFile)
cocoDt = cocoGt.loadRes(resFile)
imgIds = sorted(cocoGt.getImgIds())
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
```
## TODOs:
1. Why did we get worst results here compared to results on the cloud. Using `evaluate(model, data_loader_test, device=device)` when testing on the same set:
Update:
```
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.461
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.754
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.492
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.087
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.392
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.616
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.095
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.417
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.550
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.340
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.506
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.663
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import os
import sys
import json
import pickle
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# ## here I'm loading predictions from pickled predictions
preds_path = '../data/predictions/predictions-final.pickle'
with open(preds_path, 'rb') as pickle_file:
preds = pickle.load(pickle_file)
## load ground truths (val_gt.json)
with open('../data/predictions/val_gt.json') as json_file:
val_gt = json.load(json_file)
## extract image ids
img_ids = {}
for vgt in val_gt['images']:
img_ids[vgt['im_name']] = vgt['id']
cutoff = 1000
anno_dict2 = {}
for img in list(anno_dict.keys()):
bboxes = anno_dict[img]
bboxes2 = []
for bbox in bboxes:
if np.prod(bbox[2:]) > cutoff:
bboxes2.append(bbox)
if len(bboxes2) > 0:
anno_dict2[img] = bboxes2
## get_annons
bboxes = []
for bb in anno_train[0, i][0][0][2]:
if bb[0] > 0: # class_label = 1 means it is a person
bboxes.append(bb[1:5]) # bbox format = [x, y, w, h]
## keep only images with persons
if bboxes != []:
d[img_name] = bboxes
val_gt['annotations'][0]
## intersect with our predictions (441 images)
val_imgs = val_gt['images']
val_imgs = [val_img['im_name'] for val_img in val_imgs]
pred_imgs = list(preds.keys())
imgs = list(set(val_imgs) & set(pred_imgs))
len(imgs)
## subset val_gt and create test_gt
cutoff = 100
test_gt = {}
test_gt['categories'] = val_gt['categories']
test_gt['images'] = []
test_gt['annotations'] = []
for img in val_gt['images']:
if img['im_name'] in imgs:
test_gt['images'].append(img)
test_ids = []
for img in imgs:
if img in list(img_ids.keys()):
test_ids.append(img_ids[img])
for anno in val_gt['annotations']:
if anno['image_id'] in test_ids:
## add area to anno (needed for default pycocotools eval)
bbox = anno['bbox']
area = int(bbox[2]) * int(bbox[3])
anno['area'] = area
## cutoff small boxes
if area > cutoff and anno['category_id'] > 0:
test_gt['annotations'].append(anno)
## check
test_imgs = test_gt['images']
test_imgs = [test_img['im_name'] for test_img in test_imgs]
test_imgs[:5], len(test_imgs)
test_gt['images'][0]
## GT bboxes
img_name = 'munster_000000_000019_leftImg8bit.png'
idx = test_imgs.index(img_name)
test1_img = test_gt['images'][idx]
img_name = test1_img['im_name']
img_id = test1_img['id']
bboxes = []
for anno in test_gt['annotations']:
if anno['image_id'] == img_id and anno['category_id'] >= 1:
bboxes.append(anno['bbox'])
# DT bboxes
bboxes_dt = np.round(preds[img_name]['boxes'])
bboxes_dt = bboxes_dt.tolist()
img = Image.open('../data/predictions/' + img_name)
plt.rcParams['figure.figsize'] = [12, 8]
fig, ax = plt.subplots()
ax.imshow(img);
for bbox in bboxes_dt:
x1, y1, x2, y2 = bbox
w, h = x2 - x1, y2 - y1
rect = patches.Rectangle(
(x1, y1), w, h,
linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
# bbox = [x, y, w, h]
for bbox in bboxes:
rect = patches.Rectangle(
(bbox[0], bbox[1]), bbox[2], bbox[3],
linewidth=1, edgecolor='g', facecolor='none')
ax.add_patch(rect)
plt.title('DTs in red, GTs in green')
plt.show()
## we can save the test_gt as json
with open('../data/predictions/test_gt.json', 'w', encoding='utf-8') as json_file:
json.dump(test_gt, json_file, ensure_ascii=False, indent=4)
## see loadRes() and loadNumpyAnnotations() from COCO Class
## we need to provide [imageID, x1, y1, w, h, score, class] for each bbox:
test_dt = []
for img in imgs:
bboxes = preds[img]['boxes']
scores = preds[img]['scores']
for i, bbox in enumerate(bboxes):
x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
w, h = x2 - x1, y2 - y1
data = [img_ids[img], x1, y1, w, h, scores[i], 1]
test_dt.append(data)
test_dt = np.array(test_dt)
## check
np.round(test_dt[0])
## check
# should be a lot more detections than gt bboxes
len(test_dt), len(test_gt['annotations'])
print('About %.2f times more detected bboxes than there are gt bboxes'
% (len(test_dt) / len(test_gt['annotations'])))
## Citypersons average miss rate measures
module_path = os.path.abspath(os.path.join('../src/eval_script/'))
if module_path not in sys.path:
sys.path.append(module_path)
from coco import COCO
from eval_MR_multisetup import COCOeval
annType = 'bbox'
annFile = '../data/predictions/test_gt.json'
resFile = test_dt
res_file_path = '../data/predictions/results.txt'
res_file = open(res_file_path, 'w')
for id_setup in range(0, 4):
cocoGt = COCO(annFile)
cocoDt = cocoGt.loadRes(resFile)
imgIds = sorted(cocoGt.getImgIds())
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate(id_setup)
cocoEval.accumulate()
cocoEval.summarize(id_setup, res_file)
res_file.close()
## making the printout nicer..
print('Results: ')
res_file = open(res_file_path,'r')
lines = res_file.readlines()
res_file.close()
lines = [line.replace('10000000000.00', 'inf') for line in lines]
lines = [line.replace('10000000000', 'inf') for line in lines]
lines = [line.strip() for line in lines]
for line in lines:
new = ''
for elt in line.split(' '):
if elt:
new += elt + ' '
print(new)
## rewrite as a row to add it to the benchmark table
results = [line.split('=')[-1] for line in lines]
results = [line.split('=')[-1] for line in lines]
results.insert(0, ' ร ')
results.insert(0, ' Our FasterRCNN ')
results = [('**' + result.strip() + '**') for result in results]
results = ' | '.join(results)
results = ' | ' + results + ' | '
results
## Test using default pycocotools measures
module_path = os.path.abspath(os.path.join('../src/pycocotools/'))
if module_path not in sys.path:
sys.path.append(module_path)
from coco import COCO
from cocoeval import COCOeval
annType = 'bbox'
annFile = '../data/predictions/test_gt.json'
resFile = test_dt
cocoGt=COCO(annFile)
cocoDt = cocoGt.loadRes(resFile)
imgIds = sorted(cocoGt.getImgIds())
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.461
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.754
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.492
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.087
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.392
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.616
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.095
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.417
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.550
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.340
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.506
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.663
| 0.150684 | 0.760161 |
# Hello World
Python is a general-purpose, versatile and popular programming language. Itโs great as a first language because it is concise and easy to read, and it is very valuable because it can be used for everything from web development to software development and scientific applications. In fact, python is the world's [fastest growing](https://www.geeksforgeeks.org/python-fastest-growing-programming-language/) and most popular programming language used by software engineers, analysts, data scientists, and machine learning engineers alike!
Below we will write our first line of code! We will make Python "come alive" by sending a greeting to the world on the screen. To do this, we will use a command called `print`. This command tells Python to print the input text to the screen. Let's begin!
```
# command Python to print the string "Hello World"
print("Hello World")
```
Amazing! You just wrote your first line of Python code. Pretty easy right?
Python can also be used as a calculator. Let's use Python to do some simple addition and subtraction below.
```
# command Python to add 3 + 19
3 + 19
# command Python to subtract 91 - 28
91 - 28
```
We can also do multiplication and division with Python. The symbol for multiplication is `*` and the symbol for division is `/`.
Let's use Python to do some multiplication and division below!
```
# command Python to multiply 6 x 13
6*13
# command Python to divide 98 / 4
98/4
```
Easy right?
Now let's say we wanted to add the result of 6 x 13 to the result of 98 / 4. Python follows the standard order of operations: PEMDAS., so we could do it this way:
```
(6*13) + 98/4
```
But what if we wanted to save ourselves some time? We already typed those equations earlier, so instead of typing the same equations over and over, we can store the results in *variables*.
Variables are words or symbols that are used to represent some value. They're kind of like boxes that you can store things in. Variables in Python are used very similarly to variables that you have probably seen in math class. For example, if you assign x = 5 and compute 9 + x = _ ?
Exactly - 14! Let's see some examples of how this works in code.
```
# assign the value of 6*13 to the variable result1
result1 = 6*13
# assign the value of 98/4 to the variable result2
result2 = 98/4
```
You might notice that after running the above cell, nothing was printed to the screen. This is because our code commanded Python only to assign the values to the variable names, not to print to the screen. This is a good reminder that Python is very literal, and will only do **exactly what we command it to do**.
If we want to see the values assigned to each variable, we can use the `print` command we learned above to print them out.
```
# print the value stored in the variable result1
print(result1)
# print the value stored in the variable result2
print(result2)
```
Cool! Now we've seen that the `print` function in Python can accept different inputs, such as a string (as in `"Hello World"`), or a variable name (as in `result1`). We see that when we use the variable name `result1`, it prints the *value* of that variable, not the variable name itself. We will talk more about how we differentiate strings from variable names in the next lesson.
Now, let's revisit our initial objective. We wanted to add the result of 6 x 13 to the result of 98 / 4, but this time let's use our variables that contain each of our values.
```
# command Python to add the result of 6 x 13 to the result of 98 / 4 using our variable names
result1 + result2
```
Fantastic! You've just solved your first set of problems using Python code! You may not realize it, but in just a few minutes, you've already learned how to:
* Use built-in Python functions (`print`)
* Use mathematical operators to perform calculations (`+ - * /`)
* Assign values to variables
* Use variables in mathematical equations
Now let's continue to practice these skills with your partners!
|
github_jupyter
|
# command Python to print the string "Hello World"
print("Hello World")
# command Python to add 3 + 19
3 + 19
# command Python to subtract 91 - 28
91 - 28
# command Python to multiply 6 x 13
6*13
# command Python to divide 98 / 4
98/4
(6*13) + 98/4
# assign the value of 6*13 to the variable result1
result1 = 6*13
# assign the value of 98/4 to the variable result2
result2 = 98/4
# print the value stored in the variable result1
print(result1)
# print the value stored in the variable result2
print(result2)
# command Python to add the result of 6 x 13 to the result of 98 / 4 using our variable names
result1 + result2
| 0.319971 | 0.991579 |
```
class Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
class circulardoublyLinkedlist:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
node = self.head
while node:
yield node
node = node.next
if node == self.tail.next:
break
# creation of circular doubly linked list
def createCDLL(self, nodeValue):
if self.head is not None and self.tail is not None:
print("Linked list is not empty")
else:
newNode = Node(nodeValue)
newNode.next = newNode
newNode.prev = newNode
self.head = newNode
self.tail = newNode
return "The CDLL is created successfully"
# insertion of circular doubly linked list
def insertCDLL(self, value, location):
if self.head == None:
return "Linked list is empty"
newNode = Node(value)
if location == 0:
newNode.next = self.head
newNode.prev = self.tail
self.head.prev = newNode
self.tail.next = newNode
self.head = newNode
elif location == -1:
newNode.prev = self.tail
newNode.next = self.head
self.head.prev = newNode
self.tail.next = newNode
self.tail = newNode
else:
index = 0
tempNode = self.head
while index < location - 1:
index += 1
tempNode = tempNode.next
newNode.prev = tempNode
newNode.next = tempNode.next
tempNode.next.prev = newNode
tempNode.next = newNode
return "The node is successfully inserted"
# traverse the circular doubly linked list
def traverseCDLL(self):
if self.head is None:
return "Linked list is empty"
tempNode = self.head
while tempNode:
print(tempNode.value)
tempNode = tempNode.next
if tempNode == self.head:
break
# reverse traversal circular doubly linked list
def reverseCDLL(self):
if self.head is None:
return "Linked list is empty"
tempNode = self.tail
while tempNode:
print(tempNode.value)
tempNode = tempNode.prev
if tempNode == self.tail:
break
# search the node in circular doubly linked list
def searchCDLL(self, value):
if self.head is Node:
return "Linked list is empty"
tempNode = self.head
index = 0
while tempNode:
if tempNode.value == value:
print(f"index: {index} / value: {value}")
break
tempNode = tempNode.next
index += 1
if tempNode == self.head:
return "There is no that value"
# deletion the node in circular doubly linked list
def deleteCDLL(self, location):
if self.head is None:
return "Linked list is empty"
if location == 0:
if self.head == self.tail:
self.head.prev = None
self.head.next = None
self.head = None
self.tail = None
else:
self.head = self.head.next
self.head.prev = self.tail
self.tail.next = self.head
elif location == -1:
if self.head == self.tail:
self.head.prev = None
self.head.next = None
self.head = None
self.tail = None
else:
self.tail.prev.next = self.head
self.tail = self.tail.prev
self.head.prev = self.tail
else:
tempNode = self.head
index = 0
while index < location-1:
index += 1
tempNode = tempNode.next
tempNode.next.next.prev = tempNode
tempNode.next = tempNode.next.next
# delete the entire circular doubly linked list
def deleteentireCDLL(self):
if self.head is None:
return "Linked list is empty"
tempNode = self.head
while tempNode.next != self.head:
tempNode.prev = None
tempNode = tempNode.next
self.head = None
self.tail = None
print("THE CDLL has been successfully deleted")
circularDLL = circulardoublyLinkedlist()
print("---- 1. create CDLL ----")
circularDLL.createCDLL(5)
print([node.value for node in circularDLL])
print("---- 2. insert CDLL ----")
circularDLL.insertCDLL(0,0)
circularDLL.insertCDLL(1,-1)
circularDLL.insertCDLL(2,2)
print([node.value for node in circularDLL])
print("---- 3. traverse CDLL ----")
circularDLL.traverseCDLL()
print("---- 4. reverse CDLL ----")
circularDLL.reverseCDLL()
print([node.value for node in circularDLL])
print("---- 5. search CDLL ----")
circularDLL.searchCDLL(2)
print([node.value for node in circularDLL])
print("---- 6. delete CDLL ----")
circularDLL.deleteCDLL(-1)
circularDLL.deleteCDLL(0)
print("---- 7. delete entire CDLL")
circularDLL.deleteentireCDLL()
print([node.value for node in circularDLL])
```
|
github_jupyter
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
class circulardoublyLinkedlist:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
node = self.head
while node:
yield node
node = node.next
if node == self.tail.next:
break
# creation of circular doubly linked list
def createCDLL(self, nodeValue):
if self.head is not None and self.tail is not None:
print("Linked list is not empty")
else:
newNode = Node(nodeValue)
newNode.next = newNode
newNode.prev = newNode
self.head = newNode
self.tail = newNode
return "The CDLL is created successfully"
# insertion of circular doubly linked list
def insertCDLL(self, value, location):
if self.head == None:
return "Linked list is empty"
newNode = Node(value)
if location == 0:
newNode.next = self.head
newNode.prev = self.tail
self.head.prev = newNode
self.tail.next = newNode
self.head = newNode
elif location == -1:
newNode.prev = self.tail
newNode.next = self.head
self.head.prev = newNode
self.tail.next = newNode
self.tail = newNode
else:
index = 0
tempNode = self.head
while index < location - 1:
index += 1
tempNode = tempNode.next
newNode.prev = tempNode
newNode.next = tempNode.next
tempNode.next.prev = newNode
tempNode.next = newNode
return "The node is successfully inserted"
# traverse the circular doubly linked list
def traverseCDLL(self):
if self.head is None:
return "Linked list is empty"
tempNode = self.head
while tempNode:
print(tempNode.value)
tempNode = tempNode.next
if tempNode == self.head:
break
# reverse traversal circular doubly linked list
def reverseCDLL(self):
if self.head is None:
return "Linked list is empty"
tempNode = self.tail
while tempNode:
print(tempNode.value)
tempNode = tempNode.prev
if tempNode == self.tail:
break
# search the node in circular doubly linked list
def searchCDLL(self, value):
if self.head is Node:
return "Linked list is empty"
tempNode = self.head
index = 0
while tempNode:
if tempNode.value == value:
print(f"index: {index} / value: {value}")
break
tempNode = tempNode.next
index += 1
if tempNode == self.head:
return "There is no that value"
# deletion the node in circular doubly linked list
def deleteCDLL(self, location):
if self.head is None:
return "Linked list is empty"
if location == 0:
if self.head == self.tail:
self.head.prev = None
self.head.next = None
self.head = None
self.tail = None
else:
self.head = self.head.next
self.head.prev = self.tail
self.tail.next = self.head
elif location == -1:
if self.head == self.tail:
self.head.prev = None
self.head.next = None
self.head = None
self.tail = None
else:
self.tail.prev.next = self.head
self.tail = self.tail.prev
self.head.prev = self.tail
else:
tempNode = self.head
index = 0
while index < location-1:
index += 1
tempNode = tempNode.next
tempNode.next.next.prev = tempNode
tempNode.next = tempNode.next.next
# delete the entire circular doubly linked list
def deleteentireCDLL(self):
if self.head is None:
return "Linked list is empty"
tempNode = self.head
while tempNode.next != self.head:
tempNode.prev = None
tempNode = tempNode.next
self.head = None
self.tail = None
print("THE CDLL has been successfully deleted")
circularDLL = circulardoublyLinkedlist()
print("---- 1. create CDLL ----")
circularDLL.createCDLL(5)
print([node.value for node in circularDLL])
print("---- 2. insert CDLL ----")
circularDLL.insertCDLL(0,0)
circularDLL.insertCDLL(1,-1)
circularDLL.insertCDLL(2,2)
print([node.value for node in circularDLL])
print("---- 3. traverse CDLL ----")
circularDLL.traverseCDLL()
print("---- 4. reverse CDLL ----")
circularDLL.reverseCDLL()
print([node.value for node in circularDLL])
print("---- 5. search CDLL ----")
circularDLL.searchCDLL(2)
print([node.value for node in circularDLL])
print("---- 6. delete CDLL ----")
circularDLL.deleteCDLL(-1)
circularDLL.deleteCDLL(0)
print("---- 7. delete entire CDLL")
circularDLL.deleteentireCDLL()
print([node.value for node in circularDLL])
| 0.566019 | 0.196807 |
# 1. Introduction
These exercises are intended to stimulate discussion, and some might be
set as term projects. Alternatively, preliminary attempts can be made
now, and these attempts can be reviewed after the completion of the
book.
**1.1** Define in your own words: (a) intelligence, (b) artificial intelligence,
(c) agent, (d) rationality, (e) logical reasoning.
**1.2** Read Turingโs original paper on AI @Turing:1950. In the paper, he
discusses several objections to his proposed enterprise and his test for
intelligence. Which objections still carry weight? Are his refutations
valid? Can you think of new objections arising from developments since
he wrote the paper? In the paper, he predicts that, by the year 2000, a
computer will have a 30% chance of passing a five-minute Turing Test
with an unskilled interrogator. What chance do you think a computer
would have today? In another 50 years?
**1.3** Every year the Loebner Prize is awarded to the program that comes
closest to passing a version of the Turing Test. Research and report on
the latest winner of the Loebner prize. What techniques does it use? How
does it advance the state of the art in AI?
**1.4** Are reflex actions (such as flinching from a hot stove) rational? Are
they intelligent?
**1.5** There are well-known classes of problems that are intractably difficult
for computers, and other classes that are provably undecidable. Does
this mean that AI is impossible?
**1.6** Suppose we extend Evansโs *SYSTEM* program so that it can score 200 on a standard
IQ test. Would we then have a program more intelligent than a human?
Explain.
**1.7** The neural structure of the sea slug *Aplysia* has been
widely studied (first by Nobel Laureate Eric Kandel) because it has only
about 20,000 neurons, most of them large and easily manipulated.
Assuming that the cycle time for an *Aplysia* neuron is
roughly the same as for a human neuron, how does the computational
power, in terms of memory updates per second, compare with the high-end
computer described in (Figure [computer-brain-table](#/))?
**1.8** How could introspectionโreporting on oneโs inner thoughtsโbe inaccurate?
Could I be wrong about what Iโm thinking? Discuss.
**1.9** To what extent are the following computer systems instances of
artificial intelligence:
- Supermarket bar code scanners.
- Web search engines.
- Voice-activated telephone menus.
- Internet routing algorithms that respond dynamically to the state of
the network.
**1.10** To what extent are the following computer systems instances of
artificial intelligence:
- Supermarket bar code scanners.
- Voice-activated telephone menus.
- Spelling and grammar correction features in Microsoft Word.
- Internet routing algorithms that respond dynamically to the state of the network.
**1.11** Many of the computational models of cognitive activities that have been
proposed involve quite complex mathematical operations, such as
convolving an image with a Gaussian or finding a minimum of the entropy
function. Most humans (and certainly all animals) never learn this kind
of mathematics at all, almost no one learns it before college, and
almost no one can compute the convolution of a function with a Gaussian
in their head. What sense does it make to say that the โvision systemโ
is doing this kind of mathematics, whereas the actual person has no idea
how to do it?
**1.12** Some authors have claimed that perception and motor skills are the most
important part of intelligence, and that โhigher levelโ capacities are
necessarily parasiticโsimple add-ons to these underlying facilities.
Certainly, most of evolution and a large part of the brain have been
devoted to perception and motor skills, whereas AI has found tasks such
as game playing and logical inference to be easier, in many ways, than
perceiving and acting in the real world. Do you think that AIโs
traditional focus on higher-level cognitive abilities is misplaced?
**1.13** Why would evolution tend to result in systems that act rationally? What
goals are such systems designed to achieve?
**1.14** Is AI a science, or is it engineering? Or neither or both? Explain.
**1.15** โSurely computers cannot be intelligentโthey can do only what their
programmers tell them.โ Is the latter statement true, and does it imply
the former?
**1.16** โSurely animals cannot be intelligentโthey can do only what their genes
tell them.โ Is the latter statement true, and does it imply the former?
**1.17** โSurely animals, humans, and computers cannot be intelligentโthey can do
only what their constituent atoms are told to do by the laws of
physics.โ Is the latter statement true, and does it imply the former?
**1.18** Examine the AI literature to discover whether the following tasks can
currently be solved by computers:
- Playing a decent game of table tennis (Ping-Pong).
- Driving in the center of Cairo, Egypt.
- Driving in Victorville, California.
- Buying a weekโs worth of groceries at the market.
- Buying a weekโs worth of groceries on the Web.
- Playing a decent game of bridge at a competitive level.
- Discovering and proving new mathematical theorems.
- Writing an intentionally funny story.
- Giving competent legal advice in a specialized area of law.
- Translating spoken English into spoken Swedish in real time.
- Performing a complex surgical operation.
**1.19** For the currently infeasible tasks, try to find out what the
difficulties are and predict when, if ever, they will be overcome.
**1.20** Various subfields of AI have held contests by defining a standard task
and inviting researchers to do their best. Examples include the DARPA
Grand Challenge for robotic cars, the International Planning
Competition, the Robocup robotic soccer league, the TREC information
retrieval event, and contests in machine translation and speech
recognition. Investigate five of these contests and describe the
progress made over the years. To what degree have the contests advanced
the state of the art in AI? To what degree do they hurt the field by
drawing energy away from new ideas?
|
github_jupyter
|
# 1. Introduction
These exercises are intended to stimulate discussion, and some might be
set as term projects. Alternatively, preliminary attempts can be made
now, and these attempts can be reviewed after the completion of the
book.
**1.1** Define in your own words: (a) intelligence, (b) artificial intelligence,
(c) agent, (d) rationality, (e) logical reasoning.
**1.2** Read Turingโs original paper on AI @Turing:1950. In the paper, he
discusses several objections to his proposed enterprise and his test for
intelligence. Which objections still carry weight? Are his refutations
valid? Can you think of new objections arising from developments since
he wrote the paper? In the paper, he predicts that, by the year 2000, a
computer will have a 30% chance of passing a five-minute Turing Test
with an unskilled interrogator. What chance do you think a computer
would have today? In another 50 years?
**1.3** Every year the Loebner Prize is awarded to the program that comes
closest to passing a version of the Turing Test. Research and report on
the latest winner of the Loebner prize. What techniques does it use? How
does it advance the state of the art in AI?
**1.4** Are reflex actions (such as flinching from a hot stove) rational? Are
they intelligent?
**1.5** There are well-known classes of problems that are intractably difficult
for computers, and other classes that are provably undecidable. Does
this mean that AI is impossible?
**1.6** Suppose we extend Evansโs *SYSTEM* program so that it can score 200 on a standard
IQ test. Would we then have a program more intelligent than a human?
Explain.
**1.7** The neural structure of the sea slug *Aplysia* has been
widely studied (first by Nobel Laureate Eric Kandel) because it has only
about 20,000 neurons, most of them large and easily manipulated.
Assuming that the cycle time for an *Aplysia* neuron is
roughly the same as for a human neuron, how does the computational
power, in terms of memory updates per second, compare with the high-end
computer described in (Figure [computer-brain-table](#/))?
**1.8** How could introspectionโreporting on oneโs inner thoughtsโbe inaccurate?
Could I be wrong about what Iโm thinking? Discuss.
**1.9** To what extent are the following computer systems instances of
artificial intelligence:
- Supermarket bar code scanners.
- Web search engines.
- Voice-activated telephone menus.
- Internet routing algorithms that respond dynamically to the state of
the network.
**1.10** To what extent are the following computer systems instances of
artificial intelligence:
- Supermarket bar code scanners.
- Voice-activated telephone menus.
- Spelling and grammar correction features in Microsoft Word.
- Internet routing algorithms that respond dynamically to the state of the network.
**1.11** Many of the computational models of cognitive activities that have been
proposed involve quite complex mathematical operations, such as
convolving an image with a Gaussian or finding a minimum of the entropy
function. Most humans (and certainly all animals) never learn this kind
of mathematics at all, almost no one learns it before college, and
almost no one can compute the convolution of a function with a Gaussian
in their head. What sense does it make to say that the โvision systemโ
is doing this kind of mathematics, whereas the actual person has no idea
how to do it?
**1.12** Some authors have claimed that perception and motor skills are the most
important part of intelligence, and that โhigher levelโ capacities are
necessarily parasiticโsimple add-ons to these underlying facilities.
Certainly, most of evolution and a large part of the brain have been
devoted to perception and motor skills, whereas AI has found tasks such
as game playing and logical inference to be easier, in many ways, than
perceiving and acting in the real world. Do you think that AIโs
traditional focus on higher-level cognitive abilities is misplaced?
**1.13** Why would evolution tend to result in systems that act rationally? What
goals are such systems designed to achieve?
**1.14** Is AI a science, or is it engineering? Or neither or both? Explain.
**1.15** โSurely computers cannot be intelligentโthey can do only what their
programmers tell them.โ Is the latter statement true, and does it imply
the former?
**1.16** โSurely animals cannot be intelligentโthey can do only what their genes
tell them.โ Is the latter statement true, and does it imply the former?
**1.17** โSurely animals, humans, and computers cannot be intelligentโthey can do
only what their constituent atoms are told to do by the laws of
physics.โ Is the latter statement true, and does it imply the former?
**1.18** Examine the AI literature to discover whether the following tasks can
currently be solved by computers:
- Playing a decent game of table tennis (Ping-Pong).
- Driving in the center of Cairo, Egypt.
- Driving in Victorville, California.
- Buying a weekโs worth of groceries at the market.
- Buying a weekโs worth of groceries on the Web.
- Playing a decent game of bridge at a competitive level.
- Discovering and proving new mathematical theorems.
- Writing an intentionally funny story.
- Giving competent legal advice in a specialized area of law.
- Translating spoken English into spoken Swedish in real time.
- Performing a complex surgical operation.
**1.19** For the currently infeasible tasks, try to find out what the
difficulties are and predict when, if ever, they will be overcome.
**1.20** Various subfields of AI have held contests by defining a standard task
and inviting researchers to do their best. Examples include the DARPA
Grand Challenge for robotic cars, the International Planning
Competition, the Robocup robotic soccer league, the TREC information
retrieval event, and contests in machine translation and speech
recognition. Investigate five of these contests and describe the
progress made over the years. To what degree have the contests advanced
the state of the art in AI? To what degree do they hurt the field by
drawing energy away from new ideas?
| 0.553988 | 0.930899 |
This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/.
# Pre-Executing Notebooks
Automatically executing notebooks during the Sphinx build process is an important feature of `nbsphinx`.
However, there are a few use cases where pre-executing a notebook and storing the outputs might be preferable.
Storing any output will, by default, stop ``nbsphinx`` from executing the notebook.
## Long-Running Cells
If you are doing some very time-consuming computations, it might not be feasible to re-execute the notebook every time you build your Sphinx documentation.
So just do it once -- when you happen to have the time -- and then just keep the output.
```
import time
%time time.sleep(60 * 60)
6 * 7
```
## Rare Libraries
You might have created results with a library that's hard to install and therefore you have only managed to install it on one very old computer in the basement, so you probably cannot run this whenever you build your Sphinx docs.
```
from a_very_rare_library import calculate_the_answer
calculate_the_answer()
```
## Exceptions
If an exception is raised during the Sphinx build process, it is stopped (the build process, not the exception!).
If you want to show to your audience how an exception looks like, you have two choices:
1. Allow errors -- either generally or on a per-notebook or per-cell basis -- see [Ignoring Errors](allow-errors.ipynb) ([per cell](allow-errors-per-cell.ipynb)).
1. Execute the notebook beforehand and save the results, like it's done in this example notebook:
```
1 / 0
```
## Client-specific Outputs
When `nbsphinx` executes notebooks,
it uses the `nbconvert` module to do so.
Certain Jupyter clients might produce output
that differs from what `nbconvert` would produce.
To preserve those original outputs,
the notebook has to be executed and saved
before running Sphinx.
For example,
the JupyterLab help system shows the help text as cell outputs,
while executing with `nbconvert` doesn't produce any output.
```
sorted?
```
## Interactive Input
If your code asks for user input,
it probably doesn't work when executed by Sphinx/`nbsphinx`.
You'll probably get an error like this:
StdinNotImplementedError: raw_input was called, but this frontend does not support input requests.
In this case, you can run the notebook interactively,
provide the desired inputs and then save the notebook including its cell outputs.
```
name = input('What... is your name?')
quest = input('What... is your quest?')
color = input('What... is your favorite color?')
```
|
github_jupyter
|
import time
%time time.sleep(60 * 60)
6 * 7
from a_very_rare_library import calculate_the_answer
calculate_the_answer()
1 / 0
sorted?
name = input('What... is your name?')
quest = input('What... is your quest?')
color = input('What... is your favorite color?')
| 0.155142 | 0.819171 |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
import seaborn as sn
churn_df = pd.read_csv('data/ChurnData.csv')
churn_df.head()
churn_df.info()
churn_df.columns
churn_df = churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless', 'churn']]
churn_df['churn'] = churn_df['churn'].astype('int')
churn_df.head()
ax = churn_df[churn_df['churn'] == 1].plot(kind = 'scatter', x = 'income', y = 'age', color = 'red', title = 'Company_Left')
churn_df[churn_df['churn'] == 0].plot(kind = 'scatter', x = 'income', y = 'age', color = 'blue', title = 'Not_left_The_Company', ax = ax)
plt.show()
x = np.array(churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless', 'churn']])
x[0:5]
y = np.array(churn_df['churn'])
y[0:5]
from sklearn import preprocessing
x = preprocessing.StandardScaler().fit(x).transform(x)
x[0:5]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, random_state = 4)
x_train.shape
x_test.shape
y_train.shape
y_test.shape
from sklearn.linear_model import LogisticRegression
regr = LogisticRegression(solver = 'lbfgs', max_iter = 1000, C = 0.01)
regr.fit(x_train, y_train)
y_test[0:5]
yhat = regr.predict(x_test)
yhat[0:5]
a_1 = regr.score(x_train, y_train)
a_2 = regr.score(x_test, y_test)
a_3 = regr.score(x_test, yhat)
from sklearn.metrics import classification_report, accuracy_score, jaccard_similarity_score
print(classification_report(y_test, yhat))
a_4 = accuracy_score(y_test, yhat)
a_5 = jaccard_similarity_score(y_test, yhat)
from sklearn import neighbors
knn = neighbors.KNeighborsClassifier(metric = 'manhattan', n_neighbors = 1)
knn.fit(x_train, y_train)
y_test
y_pred = knn.predict(x_test)
y_pred
b_1 = knn.score(x_train, y_train)
b_1
b_2 = knn.score(x_test, y_test)
b_2
b_3 = knn.score(x_test, y_pred)
b_3
b_4 = accuracy_score(y_test, y_pred)
b_4
b_5 = jaccard_similarity_score(y_test, y_pred)
b_5
ax = churn_df[churn_df['churn'] == 1][0:10].plot(kind = 'scatter', x = 'tenure', y = 'age', color = 'k', label = 'Left')
churn_df[churn_df['churn'] == 0][0:10].plot(kind = 'scatter', x = 'tenure', y = 'age', color = 'red', label = 'Retained', ax =ax)
from sklearn import svm
clf = svm.SVC(kernel = 'poly', gamma = 'auto')
clf.fit(x_train, y_train)
y_test
yhat_1 = clf.predict(x_test)
yhat_1
c_1 = clf.score(x_train, y_train)
c_1
c_2 = clf.score(x_test, y_test)
c_2
c_3 = clf.score(x_test, yhat_1)
c_3
c_4 = accuracy_score(y_test, yhat_1)
c_4
c_5 = jaccard_similarity_score(y_test, yhat_1)
c_5
from sklearn import tree
clf_1 = tree.DecisionTreeClassifier()
clf_1.fit(x_train, y_train)
y_test
ypred_1 = clf_1.predict(x_test)
ypred_1
d_1 = clf_1.score(x_train, y_train)
d_1
d_2 = clf_1.score(x_test, y_test)
d_2
d_3 = clf.score(x_test, ypred_1)
d_3
d_4 = accuracy_score(y_test, ypred_1)
d_4
d_5 = jaccard_similarity_score(y_test, ypred_1)
d_5
from sklearn.ensemble import RandomForestClassifier
cl = RandomForestClassifier(n_estimators = 1000)
cl.fit(x_train, y_train)
y_test[0:10]
yhat_2 = cl.predict(x_test)
yhat_2[0:10]
e_1 = cl.score(x_train, y_train)
e_1
e_2 = cl.score(x_test, y_test)
e_2
e_3 = cl.score(x_test, yhat_2)
e_3
e_4 = accuracy_score(y_test, yhat_2)
e_4
e_5 = jaccard_similarity_score(y_test, yhat_2)
e_5
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
gsn = GaussianNB()
gsn.fit(x_train, y_train)
y_test[0:10]
ypred_2 = gsn.predict(x_test)
ypred_2[0:10]
f_1 = gsn.score(x_train, y_train)
f_1
f_2 = gsn.score(x_test, y_test)
f_2
f_3 = gsn.score(x_test, ypred_2)
f_3
f_4 = accuracy_score(y_test, ypred_2)
f_4
f_5 = jaccard_similarity_score(y_test, ypred_2)
f_5
mul = MultinomialNB()
mul.fit(x_train, y_train)
g_1 = np.nan
g_1
g_2 = np.nan
g_2
g_3 = np.nan
g_3
g_4 = np.nan
g_4
g_5 = np.nan
g_5
ber = BernoulliNB()
ber.fit(x_train, y_train)
y_test[0:10]
yhat_2 = ber.predict(x_test)
yhat_2[0:10]
h_1 = ber.score(x_train, y_train)
h_1
h_2 = ber.score(x_test, y_test)
h_2
h_3 = ber.score(x_test, yhat_2)
h_3
h_4 = accuracy_score(y_test, yhat_2)
h_4
h_5 = jaccard_similarity_score(y_test, yhat_2)
h_5
df = pd.DataFrame({'Training Score' : [a_1, b_1, c_1, d_1, e_1, f_1, g_1, h_1],
'Testing Score' : [a_2, b_2, c_2, d_2, e_2, f_2, g_2, h_2],
'Predicted Score' : [a_3, b_3, c_3, d_3, e_3, f_3, g_3, h_3],
'Accuracy Score' : [a_4, b_4, c_4, d_4, e_4, f_4, g_4, h_4],
'Jaccard Similarity Score' : [a_5, b_5, c_5, d_5, e_5, f_5, g_5, h_5]}, index = ['Logistic', 'KNN', 'SVM', 'Decsion_Tree', 'Random_Forest', 'GaussianNB', 'MultinomialNB', 'BernoulliNB'])
df
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
import seaborn as sn
churn_df = pd.read_csv('data/ChurnData.csv')
churn_df.head()
churn_df.info()
churn_df.columns
churn_df = churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless', 'churn']]
churn_df['churn'] = churn_df['churn'].astype('int')
churn_df.head()
ax = churn_df[churn_df['churn'] == 1].plot(kind = 'scatter', x = 'income', y = 'age', color = 'red', title = 'Company_Left')
churn_df[churn_df['churn'] == 0].plot(kind = 'scatter', x = 'income', y = 'age', color = 'blue', title = 'Not_left_The_Company', ax = ax)
plt.show()
x = np.array(churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless', 'churn']])
x[0:5]
y = np.array(churn_df['churn'])
y[0:5]
from sklearn import preprocessing
x = preprocessing.StandardScaler().fit(x).transform(x)
x[0:5]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, random_state = 4)
x_train.shape
x_test.shape
y_train.shape
y_test.shape
from sklearn.linear_model import LogisticRegression
regr = LogisticRegression(solver = 'lbfgs', max_iter = 1000, C = 0.01)
regr.fit(x_train, y_train)
y_test[0:5]
yhat = regr.predict(x_test)
yhat[0:5]
a_1 = regr.score(x_train, y_train)
a_2 = regr.score(x_test, y_test)
a_3 = regr.score(x_test, yhat)
from sklearn.metrics import classification_report, accuracy_score, jaccard_similarity_score
print(classification_report(y_test, yhat))
a_4 = accuracy_score(y_test, yhat)
a_5 = jaccard_similarity_score(y_test, yhat)
from sklearn import neighbors
knn = neighbors.KNeighborsClassifier(metric = 'manhattan', n_neighbors = 1)
knn.fit(x_train, y_train)
y_test
y_pred = knn.predict(x_test)
y_pred
b_1 = knn.score(x_train, y_train)
b_1
b_2 = knn.score(x_test, y_test)
b_2
b_3 = knn.score(x_test, y_pred)
b_3
b_4 = accuracy_score(y_test, y_pred)
b_4
b_5 = jaccard_similarity_score(y_test, y_pred)
b_5
ax = churn_df[churn_df['churn'] == 1][0:10].plot(kind = 'scatter', x = 'tenure', y = 'age', color = 'k', label = 'Left')
churn_df[churn_df['churn'] == 0][0:10].plot(kind = 'scatter', x = 'tenure', y = 'age', color = 'red', label = 'Retained', ax =ax)
from sklearn import svm
clf = svm.SVC(kernel = 'poly', gamma = 'auto')
clf.fit(x_train, y_train)
y_test
yhat_1 = clf.predict(x_test)
yhat_1
c_1 = clf.score(x_train, y_train)
c_1
c_2 = clf.score(x_test, y_test)
c_2
c_3 = clf.score(x_test, yhat_1)
c_3
c_4 = accuracy_score(y_test, yhat_1)
c_4
c_5 = jaccard_similarity_score(y_test, yhat_1)
c_5
from sklearn import tree
clf_1 = tree.DecisionTreeClassifier()
clf_1.fit(x_train, y_train)
y_test
ypred_1 = clf_1.predict(x_test)
ypred_1
d_1 = clf_1.score(x_train, y_train)
d_1
d_2 = clf_1.score(x_test, y_test)
d_2
d_3 = clf.score(x_test, ypred_1)
d_3
d_4 = accuracy_score(y_test, ypred_1)
d_4
d_5 = jaccard_similarity_score(y_test, ypred_1)
d_5
from sklearn.ensemble import RandomForestClassifier
cl = RandomForestClassifier(n_estimators = 1000)
cl.fit(x_train, y_train)
y_test[0:10]
yhat_2 = cl.predict(x_test)
yhat_2[0:10]
e_1 = cl.score(x_train, y_train)
e_1
e_2 = cl.score(x_test, y_test)
e_2
e_3 = cl.score(x_test, yhat_2)
e_3
e_4 = accuracy_score(y_test, yhat_2)
e_4
e_5 = jaccard_similarity_score(y_test, yhat_2)
e_5
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
gsn = GaussianNB()
gsn.fit(x_train, y_train)
y_test[0:10]
ypred_2 = gsn.predict(x_test)
ypred_2[0:10]
f_1 = gsn.score(x_train, y_train)
f_1
f_2 = gsn.score(x_test, y_test)
f_2
f_3 = gsn.score(x_test, ypred_2)
f_3
f_4 = accuracy_score(y_test, ypred_2)
f_4
f_5 = jaccard_similarity_score(y_test, ypred_2)
f_5
mul = MultinomialNB()
mul.fit(x_train, y_train)
g_1 = np.nan
g_1
g_2 = np.nan
g_2
g_3 = np.nan
g_3
g_4 = np.nan
g_4
g_5 = np.nan
g_5
ber = BernoulliNB()
ber.fit(x_train, y_train)
y_test[0:10]
yhat_2 = ber.predict(x_test)
yhat_2[0:10]
h_1 = ber.score(x_train, y_train)
h_1
h_2 = ber.score(x_test, y_test)
h_2
h_3 = ber.score(x_test, yhat_2)
h_3
h_4 = accuracy_score(y_test, yhat_2)
h_4
h_5 = jaccard_similarity_score(y_test, yhat_2)
h_5
df = pd.DataFrame({'Training Score' : [a_1, b_1, c_1, d_1, e_1, f_1, g_1, h_1],
'Testing Score' : [a_2, b_2, c_2, d_2, e_2, f_2, g_2, h_2],
'Predicted Score' : [a_3, b_3, c_3, d_3, e_3, f_3, g_3, h_3],
'Accuracy Score' : [a_4, b_4, c_4, d_4, e_4, f_4, g_4, h_4],
'Jaccard Similarity Score' : [a_5, b_5, c_5, d_5, e_5, f_5, g_5, h_5]}, index = ['Logistic', 'KNN', 'SVM', 'Decsion_Tree', 'Random_Forest', 'GaussianNB', 'MultinomialNB', 'BernoulliNB'])
df
| 0.562657 | 0.57069 |
```
import imagecrawler as ic
import mediummapper as mm
import imagedownload as id
import moveimages as mv
import yaml
```
## Download images metadata
You use the crawler with a URL for retrieving metada of all images contained in page. Metada is stored in SQLite DB
Crawlers are supported for:
- [Getty Search Gateway](https://search.getty.edu/gateway/landing)
- [Cornell University Digital Library](https://digital.library.cornell.edu/)
- [Libary of Congress](https://www.loc.gov/)
- [Eastman Museum](https://collections.eastman.org/collections)
```
#Download metadata from Getty
getty_crawler = ic.GettyCrawler()
getty_crawler.saves_pages_img_data("https://search.getty.edu/gateway/search?q=&cat=type&types=%22Photographs%22&rows=50&srt=a&dir=s&dsp=0&img=0&pg=",1,10)
#Download metadata from Cornell
cornell_crawler = ic.CornellCrawler()
cornell_crawler.saves_pages_img_data("https://digital.library.cornell.edu/?f[type_tesim][]=cyanotypes&page=",1,5)
#Download metadata from Library of Congress
congress_crawler = ic.CongressCrawler()
congress_crawler.saves_pages_img_data("https://www.loc.gov/pictures/search/?va=exact&q=Cyanotypes.&fa=displayed%3Aanywhere&fi=format&sg=true&op=EQUAL&sp=",1,5)
#Download metadata from Eastman
eastman_crawler = ic.EastmanCrawler()
eastman_crawler.saves_pages_img_data("https://collections.eastman.org/collections/20331/photography/objects/list?filter=date%3A1602%2C1990&page=",1,10)
```
## Standardize Photographic Processes descriptions
There is code in ``mediummaper.py`` to map the source descriptions to the predefined descriptions in ``config.yaml``
```
mapper = mm.MediumMapper()
```
Run cell below. If results are ok, move to next cell. Otherwise, adjust ```propose_mapping``` method in ```mediummapper.py``` and repeat previous cell
```
mapper.show_undefined()
mapper.update_mediums() #Updates mediums in DB
```
## Download images to disk
```
with open("ppi/config.yaml", "r") as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
for medium in config['allowed_processes']:
print(medium)
id.download_imgs(max=10,medium = medium)
```
## Prepare images for Deep Learning
Images must be MANUALLY cropped, as exemplified below (we only want to keep "relevant" information):
<img src = "ppi/images/GettyCrawler_49753.jpg" width="180" height="180">
<img src = "ppi/images/GettyCrawler_49753_crop.jpg" width="180" height="180">
## Move images into respective Process folder
This will help building the model with Keras
```
mv.move_images(balanced = False)
```
|
github_jupyter
|
import imagecrawler as ic
import mediummapper as mm
import imagedownload as id
import moveimages as mv
import yaml
#Download metadata from Getty
getty_crawler = ic.GettyCrawler()
getty_crawler.saves_pages_img_data("https://search.getty.edu/gateway/search?q=&cat=type&types=%22Photographs%22&rows=50&srt=a&dir=s&dsp=0&img=0&pg=",1,10)
#Download metadata from Cornell
cornell_crawler = ic.CornellCrawler()
cornell_crawler.saves_pages_img_data("https://digital.library.cornell.edu/?f[type_tesim][]=cyanotypes&page=",1,5)
#Download metadata from Library of Congress
congress_crawler = ic.CongressCrawler()
congress_crawler.saves_pages_img_data("https://www.loc.gov/pictures/search/?va=exact&q=Cyanotypes.&fa=displayed%3Aanywhere&fi=format&sg=true&op=EQUAL&sp=",1,5)
#Download metadata from Eastman
eastman_crawler = ic.EastmanCrawler()
eastman_crawler.saves_pages_img_data("https://collections.eastman.org/collections/20331/photography/objects/list?filter=date%3A1602%2C1990&page=",1,10)
mapper = mm.MediumMapper()
mapper.show_undefined()
mapper.update_mediums() #Updates mediums in DB
with open("ppi/config.yaml", "r") as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
for medium in config['allowed_processes']:
print(medium)
id.download_imgs(max=10,medium = medium)
mv.move_images(balanced = False)
| 0.393385 | 0.828141 |
```
OUTPUT_BUCKET_FOLDER = "gs://<GCS_BUCKET_NAME>/outbrain-click-prediction/output/"
DATA_BUCKET_FOLDER = "gs://<GCS_BUCKET_NAME>/outbrain-click-prediction/data/"
from pyspark.sql.types import *
import pyspark.sql.functions as F
```
## Loading data
```
truncate_day_from_timestamp_udf = F.udf(lambda ts: int(ts / 1000 / 60 / 60 / 24), IntegerType())
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \
.alias('events')
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"promoted_content.csv") \
.alias('promoted_content')
clicks_train_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True),
StructField("clicked", IntegerType(), True)]
)
clicks_train_df = spark.read.schema(clicks_train_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"clicks_train.csv") \
.alias('clicks_train')
clicks_train_joined_df = clicks_train_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(events_df, on='display_id', how='left')
clicks_train_joined_df.createOrReplaceTempView('clicks_train_joined')
validation_display_ids_df = clicks_train_joined_df.select('display_id','day_event').distinct() \
.sampleBy("day_event", fractions={0: 0.2, 1: 0.2, 2: 0.2, 3: 0.2, 4: 0.2, \
5: 0.2, 6: 0.2, 7: 0.2, 8: 0.2, 9: 0.2, 10: 0.2, \
11: 1.0, 12: 1.0}, seed=0)
validation_display_ids_df.createOrReplaceTempView("validation_display_ids")
validation_set_df = spark.sql('''SELECT display_id, ad_id, uuid_event, day_event, timestamp_event,
document_id_promo, platform_event, geo_location_event FROM clicks_train_joined t
WHERE EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''')
validation_set_gcs_output = "validation_set.parquet"
validation_set_df.write.parquet(OUTPUT_BUCKET_FOLDER+validation_set_gcs_output, mode='overwrite')
validation_set_df.take(5)
```
|
github_jupyter
|
OUTPUT_BUCKET_FOLDER = "gs://<GCS_BUCKET_NAME>/outbrain-click-prediction/output/"
DATA_BUCKET_FOLDER = "gs://<GCS_BUCKET_NAME>/outbrain-click-prediction/data/"
from pyspark.sql.types import *
import pyspark.sql.functions as F
truncate_day_from_timestamp_udf = F.udf(lambda ts: int(ts / 1000 / 60 / 60 / 24), IntegerType())
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \
.alias('events')
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"promoted_content.csv") \
.alias('promoted_content')
clicks_train_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True),
StructField("clicked", IntegerType(), True)]
)
clicks_train_df = spark.read.schema(clicks_train_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"clicks_train.csv") \
.alias('clicks_train')
clicks_train_joined_df = clicks_train_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(events_df, on='display_id', how='left')
clicks_train_joined_df.createOrReplaceTempView('clicks_train_joined')
validation_display_ids_df = clicks_train_joined_df.select('display_id','day_event').distinct() \
.sampleBy("day_event", fractions={0: 0.2, 1: 0.2, 2: 0.2, 3: 0.2, 4: 0.2, \
5: 0.2, 6: 0.2, 7: 0.2, 8: 0.2, 9: 0.2, 10: 0.2, \
11: 1.0, 12: 1.0}, seed=0)
validation_display_ids_df.createOrReplaceTempView("validation_display_ids")
validation_set_df = spark.sql('''SELECT display_id, ad_id, uuid_event, day_event, timestamp_event,
document_id_promo, platform_event, geo_location_event FROM clicks_train_joined t
WHERE EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''')
validation_set_gcs_output = "validation_set.parquet"
validation_set_df.write.parquet(OUTPUT_BUCKET_FOLDER+validation_set_gcs_output, mode='overwrite')
validation_set_df.take(5)
| 0.400867 | 0.536131 |
**Simulation Name:** Name of folder <br>
**Trial Number:** In name of folder <br>
**Single Peptide Charge:** List out all of the amino acids and sum up the charge <br>
**Number of Peptides:** Count the number of peptides: look in packmol.inp file <br>
**Multiple Peptide Charge:** (Single Peptide Charge) * (Number of Peptides) <br>
**Number of Ions Added:** `grep NA mol.gro | wc -l | awk '{print $1}'` <br>
**New System Charge:** (Multiple Peptide Charge) - (Number of Ions Added) <br>
**Number of CBD Molecules Added:** Count the number of CBD molecules: look in packmol.inp file <br>
 `tail -n 50821 em.gro | grep CBD | wc -l | awk '{print $1/53}'` <br>
**Number of Waters:** `grep SOL em.gro | wc -l | awk '{print $1/3}'` <br>
```
list = {'ACE', 'SER', 'LEU', 'SER', 'LEU', 'HIS', 'GLN', 'LYS', 'LEU', 'VAL', 'PHE', 'PHE', 'SER', 'GLU', 'ASP',
'VAL', 'SER', 'LEU', 'GLY', 'NME'}
GLU = -1
ASP = -1
ARG = +1
LYS = +1
Net_Charge = -1
import pandas as pd
simulations = pd.DataFrame(columns = ['Simulation Name', 'Trial Number',
'Single Peptide Charge', 'Number of Peptides',
'Multiple Peptide Charge', 'Number of Ions Added',
'New System Charge', 'Number of THC/CBD Molecules Added',
'Number of Waters', 'Concentration [mg/kg] of THC/CBD'])
def addSim(dataframe, trial, simName, trialNum, numOfPep, numOfIons, numOfTHC_CBD, numOfWater):
import pandas as pd
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
trialCount, objects = dataframe.shape
while trial >= trialCount:
dataframe = dataframe.append(pd.Series(), ignore_index=True)
trialCount, objects = dataframe.shape
dataframe['Simulation Name'][trial] = simName
dataframe['Trial Number'][trial] = trialNum
dataframe['Single Peptide Charge'][trial] = -1
dataframe['Number of Peptides'][trial] = numOfPep
dataframe['Multiple Peptide Charge'][trial] = dataframe['Single Peptide Charge'][trial] * dataframe['Number of Peptides'][trial]
dataframe['Number of Ions Added'][trial] = numOfIons
dataframe['New System Charge'][trial] = dataframe['Multiple Peptide Charge'][trial] + dataframe['Number of Ions Added'][trial]
dataframe['Number of THC/CBD Molecules Added'][trial] = numOfTHC_CBD
dataframe['Number of Waters'][trial] = numOfWater
kgWater = 18.01528/(6.02214*(10.0**23))*numOfWater/1000.0
mgTHC_CBD = 314.464/(6.02214*(10.0**23))*numOfTHC_CBD*1000.0
dataframe['Concentration [mg/kg] of THC/CBD'][trial] = mgTHC_CBD/kgWater
return dataframe
simulations = addSim(simulations, 0, 'trial1_1-1_swp-cbd', 1, 5, 5, 5, 16360)
simulations = addSim(simulations, 1, 'trial2_1-1_swp-cbd', 2, 5, 5, 5, 16360)
simulations = addSim(simulations, 2, 'trial3_1-1_swp-cbd', 3, 5, 5, 5, 16360)
simulations = addSim(simulations, 3, 'trial1_1-2_swp-cbd', 1, 5, 0, 10, 16258)
simulations = addSim(simulations, 4, 'trial2_1-2_swp-cbd', 2, 5, 0, 10, 16258)
simulations = addSim(simulations, 5, 'trial3_1-2_swp-cbd', 3, 5, 0, 10, 16258)
simulations = addSim(simulations, 6, 'trial1_1-3_swp-cbd', 1, 5, 0, 15, 16185)
simulations = addSim(simulations, 7, 'trial2_1-3_swp-cbd', 2, 5, 0, 15, 16185)
simulations = addSim(simulations, 8, 'trial3_1-3_swp-cbd', 3, 5, 0, 15, 16185)
simulations = addSim(simulations, 9, 'trial1_1-4_swp-cbd', 1, 5, 0, 20, 16076)
simulations = addSim(simulations, 10, 'trial2_1-4_swp-cbd', 2, 5, 0, 20, 16076)
simulations = addSim(simulations, 11, 'trial3_1-4_swp-cbd', 3, 5, 0, 20, 16076)
simulations = addSim(simulations, 12, 'trial1_1-5_swp-cbd', 1, 5, 0, 25, 15990)
simulations = addSim(simulations, 13, 'trial2_1-5_swp-cbd', 2, 5, 0, 25, 15990)
simulations = addSim(simulations, 14, 'trial3_1-5_swp-cbd', 3, 5, 0, 25, 15990)
simulations = addSim(simulations, 15, 'trial1_1-1_swp-thc', 1, 5, 5, 5, 16382)
simulations = addSim(simulations, 16, 'trial2_1-1_swp-thc', 2, 5, 5, 5, 16382)
simulations = addSim(simulations, 17, 'trial3_1-1_swp-thc', 3, 5, 5, 5, 16382)
simulations = addSim(simulations, 18, 'trial1_swp', 1, 5, 5, 0, 16467)
simulations = addSim(simulations, 19, 'trial2_swp', 2, 5, 5, 0, 16467)
simulations = addSim(simulations, 20, 'trial3_swp', 3, 5, 5, 0, 16467)
simulations
#/Users/prguser/Emily
export_csv = simulations.to_csv (r'Simulations.csv', index = None, header=True) #Don't forget to add '.csv' at the end of the path
```
|
github_jupyter
|
list = {'ACE', 'SER', 'LEU', 'SER', 'LEU', 'HIS', 'GLN', 'LYS', 'LEU', 'VAL', 'PHE', 'PHE', 'SER', 'GLU', 'ASP',
'VAL', 'SER', 'LEU', 'GLY', 'NME'}
GLU = -1
ASP = -1
ARG = +1
LYS = +1
Net_Charge = -1
import pandas as pd
simulations = pd.DataFrame(columns = ['Simulation Name', 'Trial Number',
'Single Peptide Charge', 'Number of Peptides',
'Multiple Peptide Charge', 'Number of Ions Added',
'New System Charge', 'Number of THC/CBD Molecules Added',
'Number of Waters', 'Concentration [mg/kg] of THC/CBD'])
def addSim(dataframe, trial, simName, trialNum, numOfPep, numOfIons, numOfTHC_CBD, numOfWater):
import pandas as pd
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
trialCount, objects = dataframe.shape
while trial >= trialCount:
dataframe = dataframe.append(pd.Series(), ignore_index=True)
trialCount, objects = dataframe.shape
dataframe['Simulation Name'][trial] = simName
dataframe['Trial Number'][trial] = trialNum
dataframe['Single Peptide Charge'][trial] = -1
dataframe['Number of Peptides'][trial] = numOfPep
dataframe['Multiple Peptide Charge'][trial] = dataframe['Single Peptide Charge'][trial] * dataframe['Number of Peptides'][trial]
dataframe['Number of Ions Added'][trial] = numOfIons
dataframe['New System Charge'][trial] = dataframe['Multiple Peptide Charge'][trial] + dataframe['Number of Ions Added'][trial]
dataframe['Number of THC/CBD Molecules Added'][trial] = numOfTHC_CBD
dataframe['Number of Waters'][trial] = numOfWater
kgWater = 18.01528/(6.02214*(10.0**23))*numOfWater/1000.0
mgTHC_CBD = 314.464/(6.02214*(10.0**23))*numOfTHC_CBD*1000.0
dataframe['Concentration [mg/kg] of THC/CBD'][trial] = mgTHC_CBD/kgWater
return dataframe
simulations = addSim(simulations, 0, 'trial1_1-1_swp-cbd', 1, 5, 5, 5, 16360)
simulations = addSim(simulations, 1, 'trial2_1-1_swp-cbd', 2, 5, 5, 5, 16360)
simulations = addSim(simulations, 2, 'trial3_1-1_swp-cbd', 3, 5, 5, 5, 16360)
simulations = addSim(simulations, 3, 'trial1_1-2_swp-cbd', 1, 5, 0, 10, 16258)
simulations = addSim(simulations, 4, 'trial2_1-2_swp-cbd', 2, 5, 0, 10, 16258)
simulations = addSim(simulations, 5, 'trial3_1-2_swp-cbd', 3, 5, 0, 10, 16258)
simulations = addSim(simulations, 6, 'trial1_1-3_swp-cbd', 1, 5, 0, 15, 16185)
simulations = addSim(simulations, 7, 'trial2_1-3_swp-cbd', 2, 5, 0, 15, 16185)
simulations = addSim(simulations, 8, 'trial3_1-3_swp-cbd', 3, 5, 0, 15, 16185)
simulations = addSim(simulations, 9, 'trial1_1-4_swp-cbd', 1, 5, 0, 20, 16076)
simulations = addSim(simulations, 10, 'trial2_1-4_swp-cbd', 2, 5, 0, 20, 16076)
simulations = addSim(simulations, 11, 'trial3_1-4_swp-cbd', 3, 5, 0, 20, 16076)
simulations = addSim(simulations, 12, 'trial1_1-5_swp-cbd', 1, 5, 0, 25, 15990)
simulations = addSim(simulations, 13, 'trial2_1-5_swp-cbd', 2, 5, 0, 25, 15990)
simulations = addSim(simulations, 14, 'trial3_1-5_swp-cbd', 3, 5, 0, 25, 15990)
simulations = addSim(simulations, 15, 'trial1_1-1_swp-thc', 1, 5, 5, 5, 16382)
simulations = addSim(simulations, 16, 'trial2_1-1_swp-thc', 2, 5, 5, 5, 16382)
simulations = addSim(simulations, 17, 'trial3_1-1_swp-thc', 3, 5, 5, 5, 16382)
simulations = addSim(simulations, 18, 'trial1_swp', 1, 5, 5, 0, 16467)
simulations = addSim(simulations, 19, 'trial2_swp', 2, 5, 5, 0, 16467)
simulations = addSim(simulations, 20, 'trial3_swp', 3, 5, 5, 0, 16467)
simulations
#/Users/prguser/Emily
export_csv = simulations.to_csv (r'Simulations.csv', index = None, header=True) #Don't forget to add '.csv' at the end of the path
| 0.131982 | 0.775562 |
# Machine learning tutorial: R edition
I developed this tutorial for a presentation I was giving to the University of Guelph Integrative Biology R users group. The topic was an introduction to the implementation of machine learning algorithms in R.
### Who can benefit from this?
This tutorial is a good first step for someone looking to learn the steps needed for exploring data, cleaning data, and training/evaluating some basic machine learning algorithms. It is also a useful resource for someone who is comfortable doing data science in other languages such as python and wants to learn how to apply their data science skills in R. As a fun exercise you could compare this code to the python code in the book listed below.
Data and code come from chapter 2 of this book: https://github.com/ageron/handson-ml
Here I have 'translated' (and heavily abridged) the code from python into R so that it can be used as a good intro example for how to implement some machine learning algorithms. The workflow isn't exactly the same as the book but the data arrives cleaned at roughly the same point.
I've chosen this dataset because:
1. It is freely avaliable online so we won't get sued.
2. It is 'medium' sized. Not small enough to feel overly toyish, but not so big as to be cumbersome.
3. There are a reasonable number of predictor columns, so it isn't too much to take in and understand what they all mean.
The columns are as follows, their names are pretty self explanitory:
longitude
latitude
housing_median_age
total_rooms
total_bedrooms
population
households
median_income
median_house_value
ocean_proximity
Each row pertains to a group of houses (I forget if this is by block or postal code but the important bit is they are medians because it is a bunch of houses in close proximity grouped together).
## Step 1. Load in the data.
If you missed the email link, download 'housing.csv' from here:
https://github.com/ageron/handson-ml/tree/master/datasets/housing
Then adjust the following code to your directory of choice.
```
library(tidyverse)
library(reshape2)
housing = read.csv('../housing.csv')
```
First thing I always do is use the head command to make sure the data isn't weird and looks how I expected.
```
head(housing)
```
Next I always call summary, just to see if the #s are #s and the categoricals are categoricals.
```
summary(housing)
```
So from that summary we can see a few things we need to do before actually running algorithms.
1. NA's in total_bedrooms need to be addressed. These must be given a value
2. We will split the ocean_proximity into binary columns. Most machine learning algorithms in R can handle categoricals in a single column, but we will cater to the lowest common denominator and do the splitting.
3. Make the total_bedrooms and total_rooms into a mean_number_bedrooms and mean_number_rooms columns as there are likely more accurate depections of the houses in a given group.
```
par(mfrow=c(2,5))
colnames(housing)
```
Lets take a gander at the variables
```
ggplot(data = melt(housing), mapping = aes(x = value)) +
geom_histogram(bins = 30) + facet_wrap(~variable, scales = 'free_x')
```
Things I see from this:
1. There are some housing blocks with old age homes in them.
2. The median house value has some weird cap applied to it causing there to be a blip at the rightmost point on the hist. There are most definitely houses in the bay area worth more than 500,000... even in the 90s when this data was collected!
3. We should standardize the scale of the data for any non-tree based methods. As some of the variables range from 0-10, while others go up to 500,000
4. We need to think about how the cap on housing prices can affect our prediction... may be worth removing the capped values and only working with the data we are confident in.
## Step 2. Clean the data
### Impute missing values
Fill median for total_bedrooms which is the only column with missing values. The median is used instead of mean because it is less influenced by extreme outliers. Note this may not be the best, as these could be actual buildings with no bedrooms (warehouses or something). We don't know... but imputation is often the best of a bad job
```
housing$total_bedrooms[is.na(housing$total_bedrooms)] = median(housing$total_bedrooms , na.rm = TRUE)
```
### Fix the total columns - make them means
```
housing$mean_bedrooms = housing$total_bedrooms/housing$households
housing$mean_rooms = housing$total_rooms/housing$households
drops = c('total_bedrooms', 'total_rooms')
housing = housing[ , !(names(housing) %in% drops)]
head(housing)
```
### Turn categoricals into booleans
Below I do the following:
1. Get a list of all the categories in the 'ocean_proximity' column
2. Make a new empty dataframe of all 0s, where each category is its own colum
3. Use a for loop to populate the appropriate columns of the dataframe
4. Drop the original column from the dataframe.
This is an example of me coding R with a python accent... I would love comments about how to do this more cleanly in R!
Fun follow up task: can you turn this into a function that could be used to split any categorial column?
```
categories = unique(housing$ocean_proximity)
#split the categories off
cat_housing = data.frame(ocean_proximity = housing$ocean_proximity)
for(cat in categories){
cat_housing[,cat] = rep(0, times= nrow(cat_housing))
}
head(cat_housing) #see the new columns on the right
for(i in 1:length(cat_housing$ocean_proximity)){
cat = as.character(cat_housing$ocean_proximity[i])
cat_housing[,cat][i] = 1
}
head(cat_housing)
cat_columns = names(cat_housing)
keep_columns = cat_columns[cat_columns != 'ocean_proximity']
cat_housing = select(cat_housing,one_of(keep_columns))
tail(cat_housing)
```
## Scale the numerical variables
Note here I scale every one of the numericals except for 'median_house_value' as this is what we will be working to predict. The x values are scaled so that coefficients in things like support vector machines are given equal weight, but the y value scale doen't affect the learning algorithms in the same way (and we would just need to re-scale the predictions at the end which is another hassle).
```
colnames(housing)
drops = c('ocean_proximity','median_house_value')
housing_num = housing[ , !(names(housing) %in% drops)]
head(housing_num)
scaled_housing_num = scale(housing_num)
head(scaled_housing_num)
```
## Merge the altered numerical and categorical dataframes
```
cleaned_housing = cbind(cat_housing, scaled_housing_num, median_house_value=housing$median_house_value)
head(cleaned_housing)
```
## Step 3. Create a test set of data
We pull this subsection from the main dataframe and put it to the side to not be looked at prior to testing our models. Don't look at it, as snooping the test data introduces a bias to your work!
This is the data we use to validate our model, when we train a machine learning algorithm the goal is usually to make an algorithm that predicts well on data it hasn't seen before. To assess this feature, we pull a set of data to validate the models as accurate/inaccurate once we have completed the training process.
```
set.seed(1738) # Set a random seed so that same sample can be reproduced in future runs
sample = sample.int(n = nrow(cleaned_housing), size = floor(.8*nrow(cleaned_housing)), replace = F)
train = cleaned_housing[sample, ] #just the samples
test = cleaned_housing[-sample, ] #everything but the samples
```
I like to use little sanity checks like the ones below to make sure the manipulations have done what I want.
With big dataframes you need find ways to be sure that don't involve looking at the whole thing every step!
Note that the train data below has all the columns we want, and also that the index is jumbled (so we did take a random sample). The second check makes sure that the length of the train and test dataframes equals the length of the dataframe they were split from, which shows we didn't lose data or make any up by accident!
```
head(train)
nrow(train) + nrow(test) == nrow(cleaned_housing)
```
## Step 4. Test some predictive models.
We start here with just a simple linear model using 3 of the avaliable predictors. Median income, total rooms and population. This serves as an entry point to introduce the topic of cross validation and a basic model. We want a model that makes good predictions on data that it has not seen before. A model that explains the variation in the data it was trained on well, but does not generalize to external data is referred to as being overfit. You may thin "that's why we split off some test data!" but we don't want to repeatedly assess against our test set, as then the model can just become overfit to that set of data thus moving and not solving the problem.
So here we do cross validation to test the model using the training data itself. Our K is 5, what this means is that the training data is split into 5 equal portions. One of the 5 folds is put to the side (as a mini test data set) and then the model is trained using the other 4 portions. After that the predictions are made on the folds that was withheld, and the process is repeated for each of the 5 folds and the average predictions produced from the iterations of the model is taken. This gives us a rough understanding of how well the model predicts on external data!
```
library('boot')
?cv.glm # note the K option for K fold cross validation
glm_house = glm(median_house_value~median_income+mean_rooms+population, data=cleaned_housing)
k_fold_cv_error = cv.glm(cleaned_housing , glm_house, K=5)
k_fold_cv_error$delta
```
The first component is the raw cross-validation estimate of prediction error.
The second component is the adjusted cross-validation estimate.
```
glm_cv_rmse = sqrt(k_fold_cv_error$delta)[1]
glm_cv_rmse #off by about $83,000... it is a start
names(glm_house) #what parts of the model are callable?
glm_house$coefficients
```
Since we scaled the imputs we can say that of the three we looked at, median income had the biggest effect on housing price... but I'm always very careful and google lots before intrepreting coefficients!
### Random forest model
```
library('randomForest')
?randomForest
names(train)
set.seed(1738)
train_y = train[,'median_house_value']
train_x = train[, names(train) !='median_house_value']
head(train_y)
head(train_x)
#some people like weird r format like this... I find it causes headaches
#rf_model = randomForest(median_house_value~. , data = train, ntree =500, importance = TRUE)
rf_model = randomForest(train_x, y = train_y , ntree = 500, importance = TRUE)
names(rf_model) #these are all the different things you can call from the model.
rf_model$importance
```
Percentage included mean squared error is a measure of feature importance. It is defined as the measure of the increase in mean squared error of predictions when the given variable is shuffled, thereby acting as a metric of that given variableโs importance in the performance of the model. So higher number == more important predictor.
### The out-of-bag (oob) error estimate
In random forests, there is no need for cross-validation or a separate test set to get an unbiased estimate of the test set error. It is estimated internally, during the run, as follows:
Each tree is constructed using a different bootstrap sample from the original data. About one-third of the cases are left out of the bootstrap sample and not used in the construction of the kth tree.
```
oob_prediction = predict(rf_model) #leaving out a data source forces OOB predictions
#you may have noticed that this is avaliable using the $mse in the model options.
#but this way we learn stuff!
train_mse = mean(as.numeric((oob_prediction - train_y)^2))
oob_rmse = sqrt(train_mse)
oob_rmse
```
So even using a random forest of only 1000 decision trees we are able to predict the median price of a house in a given district to within $49,000 of the actual median house price. This can serve as our bechmark moving forward and trying other models.
How well does the model predict on the test data?
```
test_y = test[,'median_house_value']
test_x = test[, names(test) !='median_house_value']
y_pred = predict(rf_model , test_x)
test_mse = mean(((y_pred - test_y)^2))
test_rmse = sqrt(test_mse)
test_rmse
```
Well that looks great! Our model scored roughly the same on the training and testing data, suggesting that it is not overfit and that it makes good predictions.
## Step 5. Next Steps
So above we have covered the basics of cleaning data and getting a machine learning algorithm up and running in R. But I've on purpose left some room for improvement.
The obvious way to improve the model is to provide it with better data. Recall our columns:
longitude
latitude
housing_median_age
total_rooms
total_bedrooms
population
households
median_income
median_house_value
ocean_proximity
### Suggestions on ways to improve the results
Why not use your R skills to build new data! One suggestion would be to take the longitude and latitude and work with these data. You could try to find things like 'distance to closest city with 1 million people' or other location based stats. This is called feature engineering and data scientists get paid big bucks to do it effectively!
You may also wish to branch out and try some other models to see if they improve over the random forest benchmark we have set. Note this is not an exhaustive list but a starting point
Tree based methods:
gradient boosting - library(gbm)
extreme gradient boosting - library(xgb)
Other fun methods:
support vevtor machines - library(e1071)
neural networks - library(neuralnet)
### Hyperparameters and Grid search
When tuning models the next thing to worry about is the hyperparameters. All this means is the different options you pass into a model when you initialze it. i.e. the hyperparameter in out random forest model was n_tree = x, we chose x = 500, but we could have tried x = 2500, x = 1500, x = 100000 etc.
Grid search is a common method to find the best combination of hyperparameters (as there are often more than the 1 we see in the random forest example!). Essentially this is where you make every combination of a set of paramaters and run a cross validation on each set, seeing which set gives the best predictions. An alternative is random search. When the number of hyperparameters is high then the computational load of a full grid search may be too much, so a random search takes a subset of the combinations and finds the best one in the random sample (sounds like a crapshoot but it actually works well!). These methods can be implemented easily using a for loop or two... there are also packages avaliable to help with these tasks.
Here we exit the scope of what I can cover in a short tutorial, look at the r package 'caret' it has great functions for streamling things like grid searches for the best parameters. http://caret.r-forge.r-project.org/
## Have you made a sweet model that predicts well or taught you something?
If so, you can submit the script to kaggle here:
You can post a script or your own kernel (or fork this document and make a better version) up for the world to enjoy! I promise to upvote you if you do.
### Making your own models? Go forth with the train and test dataframes in hand to make your machine learn something!
I have also followed up on this kernel with a few sequels. Take a look at some of the other kerels produced using this dataset! [Here I expand the model through the use of gradient boosting algorithms (also in r)](https://www.kaggle.com/camnugent/gradient-boosting-and-parameter-tuning-in-r) and [here I engineer some new features and increase the prediction accuracy even more (I did this one in python).](https://www.kaggle.com/camnugent/geospatial-feature-engineering-and-visualization)
Also the code from this notebook has been refactored and made more R-like [here!](https://www.kaggle.com/karlcottenie/introduction-to-machine-learning-in-r-tutorial)
|
github_jupyter
|
library(tidyverse)
library(reshape2)
housing = read.csv('../housing.csv')
head(housing)
summary(housing)
par(mfrow=c(2,5))
colnames(housing)
ggplot(data = melt(housing), mapping = aes(x = value)) +
geom_histogram(bins = 30) + facet_wrap(~variable, scales = 'free_x')
housing$total_bedrooms[is.na(housing$total_bedrooms)] = median(housing$total_bedrooms , na.rm = TRUE)
housing$mean_bedrooms = housing$total_bedrooms/housing$households
housing$mean_rooms = housing$total_rooms/housing$households
drops = c('total_bedrooms', 'total_rooms')
housing = housing[ , !(names(housing) %in% drops)]
head(housing)
categories = unique(housing$ocean_proximity)
#split the categories off
cat_housing = data.frame(ocean_proximity = housing$ocean_proximity)
for(cat in categories){
cat_housing[,cat] = rep(0, times= nrow(cat_housing))
}
head(cat_housing) #see the new columns on the right
for(i in 1:length(cat_housing$ocean_proximity)){
cat = as.character(cat_housing$ocean_proximity[i])
cat_housing[,cat][i] = 1
}
head(cat_housing)
cat_columns = names(cat_housing)
keep_columns = cat_columns[cat_columns != 'ocean_proximity']
cat_housing = select(cat_housing,one_of(keep_columns))
tail(cat_housing)
colnames(housing)
drops = c('ocean_proximity','median_house_value')
housing_num = housing[ , !(names(housing) %in% drops)]
head(housing_num)
scaled_housing_num = scale(housing_num)
head(scaled_housing_num)
cleaned_housing = cbind(cat_housing, scaled_housing_num, median_house_value=housing$median_house_value)
head(cleaned_housing)
set.seed(1738) # Set a random seed so that same sample can be reproduced in future runs
sample = sample.int(n = nrow(cleaned_housing), size = floor(.8*nrow(cleaned_housing)), replace = F)
train = cleaned_housing[sample, ] #just the samples
test = cleaned_housing[-sample, ] #everything but the samples
head(train)
nrow(train) + nrow(test) == nrow(cleaned_housing)
library('boot')
?cv.glm # note the K option for K fold cross validation
glm_house = glm(median_house_value~median_income+mean_rooms+population, data=cleaned_housing)
k_fold_cv_error = cv.glm(cleaned_housing , glm_house, K=5)
k_fold_cv_error$delta
glm_cv_rmse = sqrt(k_fold_cv_error$delta)[1]
glm_cv_rmse #off by about $83,000... it is a start
names(glm_house) #what parts of the model are callable?
glm_house$coefficients
library('randomForest')
?randomForest
names(train)
set.seed(1738)
train_y = train[,'median_house_value']
train_x = train[, names(train) !='median_house_value']
head(train_y)
head(train_x)
#some people like weird r format like this... I find it causes headaches
#rf_model = randomForest(median_house_value~. , data = train, ntree =500, importance = TRUE)
rf_model = randomForest(train_x, y = train_y , ntree = 500, importance = TRUE)
names(rf_model) #these are all the different things you can call from the model.
rf_model$importance
oob_prediction = predict(rf_model) #leaving out a data source forces OOB predictions
#you may have noticed that this is avaliable using the $mse in the model options.
#but this way we learn stuff!
train_mse = mean(as.numeric((oob_prediction - train_y)^2))
oob_rmse = sqrt(train_mse)
oob_rmse
test_y = test[,'median_house_value']
test_x = test[, names(test) !='median_house_value']
y_pred = predict(rf_model , test_x)
test_mse = mean(((y_pred - test_y)^2))
test_rmse = sqrt(test_mse)
test_rmse
| 0.342462 | 0.989582 |
# Grover's search algorithm
## Search problems
A lot of the problems that computers solve are types of _search problems_. Youโve probably already searched the web using a search engine, which is a program that builds a database from websites and allows you to search through it. We can think of a database as a program that takes an address as input, and outputs the data at that address. A phone book is one example of a database; each entry in the book contains a name and number. For example, we might ask the database to give us the data in at the 3441<sup>st</sup> address, and it will return the 3441<sup>st</sup> name and number in the book.

We call this process of providing an input and reading the output "querying the database". Often in computer science, we consider databases to be black boxes, which means we're not allowed to see how they work; weโll just assume they're magical processes that do exactly as they promise. We call magical processes like these "oracles".
If we have someone's name and weโre trying to find their phone number, this is easy if the book is sorted alphabetically by name. We can use an algorithm called _binary search_.
<!-- ::: q-block -->
### Example: Binary search
<!-- ::: q-carousel -->
<!-- ::: div -->

Binary search is a very efficient classical algorithm for searching sorted databases. Youโve probably used something similar when searching for a specific page in a book (or even using a physical phone book). Letโs say we want to find Evelina's phone number.
<!-- ::: -->
<!-- ::: div -->

First, we check the middle item in the database and see if itโs higher or lower than the item weโre searching for.
<!-- ::: -->
<!-- ::: div -->

In this case โHโ comes after โEโ. Since the list is sorted we know that the address of the entry weโre looking for has to be lower than 7. We can ignore any addresses larger than 6 and repeat this algorithm on the reduced list.
<!-- ::: -->
<!-- ::: div -->

This time, the middle entryโs name begins with โDโ, which comes before โEโ. Now we know our entry must have address higher than 3.
<!-- ::: -->
<!-- ::: div -->

Each step halves the size of list weโre working on, so the search space _shrinks_ exponentially.
<!-- ::: -->
<!-- ::: div -->

Which means that even with very large databases, we can find entries quickly.
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
<!-- ::: q-block.exercise -->
### Quick quiz
<!-- ::: q-quiz(goal="intro-grover-0") -->
The maximum number of database queries needed grows logarithmically (base 2) with the number of entries in the database.
<!-- ::: .question -->
Using binary search, what's the largest number of queries we'd need to search a sorted database with 1024 entries?
<!-- ::: -->
<!-- ::: .option(correct) -->
1. 10
<!-- ::: -->
<!-- ::: .option -->
2. 1
<!-- ::: -->
<!-- ::: .option -->
3. 100
<!-- ::: -->
<!-- ::: -->
*Hint: how many times do you need to halve the database to be left with only one item?*
<!-- ::: -->
Since binary search grows [logarithmically](gloss:logarithm) with the size of the database, there isnโt much room for improvement from a quantum computer. But we donโt always have the convenience of searching sorted lists. What if we were instead given a phone number, and we wanted to find the name associated with that number?
This is a lot more difficult, as phone books aren't usually sorted by number. If we assume the phone numbers are ordered randomly in the list, thereโs no way of homing in on our target as we did last time. The best we can do with a classical computer is randomly pick an input address, see if it contains the phone number weโre looking for, and repeat until we happen upon the correct entry. For this reason, a lot of work goes into [indexing](gloss:index) databases to improve search times.
When the database is completely disordered like this, we say it's _unstructured_. And the quantum algorithm we'll learn about on this page is an algorithm for unstructured search.
<!-- ::: q-block.exercise -->
### Unstructured search
<!-- ::: q-quiz(goal="intro-grover-1") -->
<!-- ::: .question -->
If we search an unstructured database by randomly choosing inputs, how many inputs would we need to check on average before we find the entry we're looking for?
<!-- ::: -->
<!-- ::: .option(correct) -->
1. Half the possible inputs.
<!-- ::: -->
<!-- ::: .option -->
2. All the possible inputs.
<!-- ::: -->
<!-- ::: .option -->
3. Three-quarters of the possible inputs.
<!-- ::: -->
<!-- ::: -->
***
<!-- ::: q-quiz(goal="intro-grover-2") -->
<!-- ::: .question -->
Using random guessing, how does the average number of database queries needed grow with the number of entries in the database?
<!-- ::: -->
<!-- ::: .option(correct) -->
1. Linearly.
<!-- ::: -->
<!-- ::: .option -->
2. Logarithmically.
<!-- ::: -->
<!-- ::: .option -->
3. Quadratically.
<!-- ::: -->
<!-- ::: .option -->
4. Exponentially.
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
It may seem that we can't possibly do better than random guessing here; we don't have any idea where the correct entry will be in the database, and each incorrect query only rules out one entry.
For classical computers, our intuition is correct, but if our database can input and output quantum superpositions, it turns out we can do better than random guessing! On this page we will learn about our first quantum algorithm: Grover's quantum search algorithm. When searching any database (structured or unstructured), Grover's algorithm grows with the _square root_ of the number of inputs, which for unstructured search is a [quadratic](gloss:quadratic) improvement over the best classical algorithm.

## Beyond black boxes
Search algorithms can search databases of collected information such as phone books, but they can also do more than that. If we can make a problem _look_ like a database search problem, then we can use a search algorithm to solve it. For example, letโs consider the problem of solving a [sudoku](gloss:sudoku). If someone claims to have solved a sudoku, you can check if itโs solved pretty quickly: You check along each row, check along each column, check each square, and youโre finished. In this sense, _you_ are the database, and the person that gave you the solution is querying you. They are trying to find the input that returns the information โyes this is a valid solutionโ.
In fact, we can present a lot of computational problems as "find the input that results in a certain output".

<!-- vale QiskitTextbook.Acronyms = NO -->
One example of a problem we can solve like this is the Boolean satisfiability problem (known as 'SAT').
## SAT problems
SAT problems are widely studied in computer science, and lots of other computing problems can be converted to SAT problems. In this page we will use Groverโs algorithm to solve a simple SAT problem, and you can use the skills you learn here to apply quantum search algorithms to other problems.
A solution to a SAT problem is a string of bits, which makes it easy to map to a quantum circuit. The problem itself is essentially a bunch of conditions (we call them clauses) that rule out different combinations of bit values. For example, if we had three bits, one of the clauses might be "You canโt have the zeroth bit `ON` _and_ the first bit `OFF`", which would rule out the combinations `101` and `001` as valid solutions.
Here's a file that encodes a _"[3-SAT](gloss:3-sat)"_ problem, which is a SAT problem where every clause refers to exactly 3 bits, and one of these bit conditions in each clause must be satisfied.
<!-- ::: q-block -->
### Example 3-SAT problem
Here is an examples of a 3-SAT problem, stored in a file format called "DIMACS CNF". These files are very simple and are just one way of storing SAT problems.
$\cssId{_dimacs-c}{\texttt{c example DIMACS-CNF 3-SAT}}$ <br>
$\cssId{_dimacs-problem}{\texttt{p cnf 3 5}}$ <br>
$\texttt{-1 -2 -3 0}$<br>
$\cssId{_dimacs-clause-1}{\texttt{1 -2 3 0}}$ <br>
$\texttt{1 2 -3 0}$<br>
$\cssId{_dimacs-clause-3}{\texttt{1 -2 -3 0}}$ <br>
$\cssId{_dimacs-clause-4}{\texttt{-1 2 3 0}}$ <br>
<!-- ::: -->
Like with the sudoku, itโs easy to check if a bit string is a valid solution to a SAT problem; we just look at each clause in turn and see if our string disobeys any of them. In this course, we wonโt worry about how we do this in a quantum circuit. Just remember we have efficient classical algorithms for checking SAT solutions, and for now weโll just use Qiskitโs built-in tools to build a circuit that does this for us.
We've saved this file under `examples/3sat.dimacs` (relative to the code we're running).
```
with open('examples/3sat.dimacs', 'r', encoding='utf8') as f:
dimacs = f.read()
print(dimacs) # let's check the file is as promised
```
And we can use Qiskit's circuit library to build a circuit that does the job of the oracle we described above (we'll keep calling this circuit the 'oracle' even though it's no longer magic and all-powerful).
```
from qiskit.circuit.library import PhaseOracle
oracle = PhaseOracle.from_dimacs_file('examples/3sat.dimacs')
oracle.draw()
```
This circuit above acts similarly to the databases we described before. The input to this circuit is a string of 3 bits, and the output given depends on whether the input string is a solution to the SAT problem or not.
The result of this checking computation will still be either `True` or `False`, but the behaviour of this circuit is slightly different to how you might expect. To use this circuit with Grover's algorithm, we want the oracle to change the phase of the output state by 180ยฐ (i.e. multiply by -1) if the state is a solution. This is why Qiskit calls the class '`PhaseOracle`'.
$$
U_\text{oracle}|x\rangle = \bigg\{
\begin{aligned}
\phantom{-}|x\rangle & \quad \text{if $x$ is not a solution} \\
-|x\rangle & \quad \text{if $x$ is a solution} \\
\end{aligned}
$$
For example, the only solutions to this problem are `000`, `011`, and `101`, so the circuit above has this matrix:
$$
U_\text{oracle} =
\begin{bmatrix}
-1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & -1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & -1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
\end{bmatrix}
$$
To summarise:
1. There are problems for which it's easy to check if a proposed solution is correct.
2. We can convert an algorithm that checks solutions into a quantum circuit that changes the phase of solution states
3. We can then use Grover's algorithm to work out which states have their phases changed.
In this sense, the database or oracle _is the problem_ to be solved.

## Overview of Grover's algorithm
Now we understand the problem, we finally come to Groverโs algorithm. Groverโs algorithm has three steps:
1. The first step is to create an equal superposition of every possible input to the oracle. If our qubits all start in the state $|0\rangle$, we can create this superposition by applying a H-gate to each qubit. Weโll call this equal superposition state '$|s\rangle$'.
2. The next step is to run the oracle circuit ($U_\text{oracle}$) on these qubits. On this page, we'll use the circuit (`oracle`) Qiskit created for us above, but we could use any circuit or hardware that changes the phases of solution states.
3. The final step is to run a circuit called the 'diffusion operator' or 'diffuser' ($U_s$) on the qubits. We'll go over this circuit when we explore Grover's algorithm in the next section, but it's a remarkably simple circuit that is the same for any oracle.
We then need to repeat steps 2 & 3 a few times depending on the size of the circuit. Note that step #2 is the step in which we query the oracle, so the number of times we do this is roughly proportional to the square root of the size of the number of possible inputs. If we repeat 2 & 3 enough times, then when we measure, we'll have a high chance of measuring a solution to the oracle.

```
from qiskit import QuantumCircuit
init = QuantumCircuit(3)
init.h([0,1,2])
init.draw()
```
Next, we can again use Qiskit's tools to create a circuit that does steps 2 & 3 for us.
```
# steps 2 & 3 of Grover's algorithm
from qiskit.circuit.library import GroverOperator
grover_operator = GroverOperator(oracle)
```
And we can combine this into a circuit that performs Grover's algorithm. Here, we won't repeat steps 2 & 3 as this is a small problem and doing them once is enough.
```
qc = init.compose(grover_operator)
qc.measure_all()
qc.draw()
```
Finally, let's run this on a simulator and see what results we get:
```
# Simulate the circuit
from qiskit import Aer, transpile
sim = Aer.get_backend('aer_simulator')
t_qc = transpile(qc, sim)
counts = sim.run(t_qc).result().get_counts()
# plot the results
from qiskit.visualization import plot_histogram
plot_histogram(counts)
```
We have a high probability of measuring one of the 3 solutions to the SAT problem.
<!-- ::: q-block.exercise -->
### Quick quiz
<!-- ::: q-quiz(goal="intro-grover-3") -->
<!-- ::: .question -->
Which of these bit strings is a solution to the SAT problem solved by this quantum circuit?
<!-- ::: -->
<!-- ::: .option(correct) -->
1. `011`
<!-- ::: -->
<!-- ::: .option -->
2. `001`
<!-- ::: -->
<!-- ::: .option -->
3. `010`
<!-- ::: -->
<!-- ::: .option -->
3. `110`
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
## How does Grover's algorithm work?
Weโve learnt about search problems, and seen Groverโs algorithm used to solve one. But how, and why, does this work?
<!-- ::: q-block -->
### Visualising Grover's algorithm
<!-- ::: q-carousel -->
<!-- ::: div -->
Groverโs algorithm has a nice geometric explanation. Weโve seen that we can represent quantum states through vectors. With search problems like these, there are only two vectors we care about: The solutions, and everything else. We'll call the superposition of all solution states '$|โ\rangle$', so for the SAT problem above:
$$|โ\rangle = \tfrac{1}{\sqrt{3}}(|000\rangle + |011\rangle + |101\rangle)$$
and we'll call the superposition of every other state '$|โ\rangle$':
$$|โ\rangle = \tfrac{1}{\sqrt{5}}(|001\rangle + |010\rangle + |100\rangle + |110\rangle + |111\rangle)$$
<!-- ::: -->
<!-- ::: div -->
**The plane**

Since the two vectors $|โ\rangle$ and $|โ\rangle$ don't share any elements, they are perpendicular, so we can draw them at right angles on a 2D plane. These will be our y- and x-axes, respectively.
<!-- ::: -->
<!-- ::: div -->
**Step 1**

Let's plot the states of our quantum computer on this plane at different points in the algorithm. The first state we'll plot is $|s\rangle$. This is the state _after_ step 1 (the initialisation step). This state is an equal superposition of all computational basis states. Since any possible state is either a solution or not a solution, we can write $|s\rangle$ as some combination of $|โ\rangle$ and $|โ\rangle$, so it sits in between them on the our plane.
$$|s\rangle = a|โ\rangle + b|โ\rangle$$
<!-- ::: -->
<!-- ::: div -->
**Step 1**

For difficult problems, we'd expect there to be lots of possible inputs, but only a small number of solutions. In this case, $|s\rangle$ would be much closer to $|โ\rangle$ than $|โ\rangle$ (i.e. the angle, $\theta$, between them is small), so it's unlikely that measuring our qubits would give us one of the computational basis states that make up $|โ\rangle$. Our goal is to end up with the computer in a state as close to $|โ\rangle$ as possible.
<!-- ::: -->
<!-- ::: div -->
**Step 2**

Next we pass our qubits through the circuit $U_\text{oracle}$. We saw above that, by definition, $U_\text{oracle}$ flips the phase of all solution states. In our diagram, this is a reflection through the vector $|โ\rangle$. I.e.:
$$a|โ\rangle + b|โ\rangle \xrightarrow{\text{oracle}} a|โ\rangle - b|โ\rangle$$
<!-- ::: -->
<!-- ::: div -->
**Step 3**

We've just seen that we can reflect through the vector $|โ\rangle$, so is there another vector could we reflect through that would move our state closer to $|โ\rangle$? The answer is 'yes', we can reflect through the vector $|s\rangle$. It may not be obvious at first how we can create a circuit that does this, but it's a relatively simple operation that we'll cover later in this page.
<!-- ::: -->
<!-- ::: div -->
**Finish (or repeat)**

Now our state vector is closer to $|โ\rangle$ than before, which means we have a higher chance of measuring one of our solution states. If there is only one solution, we need to repeat steps 2 & 3 ~$\sqrt{N}$ times to have the highest probability of measuring that solution.
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
<!-- ::: q-block -->
### How many times do we need to query the oracle?
<!-- ::: q-carousel -->
<!-- ::: div -->

To work this out, we'll have to work how much each iteration rotates our state towards $|โ\rangle$. Let's say we're somewhere in the middle of our algorithm, the state of our computer ($|\psi\rangle$) is an angle $\phi$ from the starting state $|s\rangle$. The angle between $|\psi\rangle$ and $|โ\rangle$ is $\theta + \phi$.
<!-- ::: -->
<!-- ::: div -->

The oracle reflects the state vector of our computer around $|โ\rangle$, so the angle between our new, reflected state vector ($|\psi'\rangle$) and $|โ\rangle$ will also be $\theta + \phi$.
<!-- ::: -->
<!-- ::: div -->

Next we reflect through $|s\rangle$. The angle between the state of our computer ($|\psi'\rangle$) and $|s\rangle$ is $2\theta + \phi$.
<!-- ::: -->
<!-- ::: div -->

So, after one iteration, we know the angle between the state of our computer and $|s\rangle$ is also $2\theta + \phi$.
<!-- ::: -->
<!-- ::: div -->

Which means each iteration rotates the state of our computer towards $|โ\rangle$ by $2\theta$.
<!-- ::: -->
<!-- ::: div -->

Now we just need to work out how many lots of $2\theta$ fit into a right angle, and this will be roughly the number of iterations needed to rotate $|s\rangle$ into $|โ\rangle$.
<!-- ::: -->
<!-- ::: div -->

So what's the angle $\theta$ in terms of $N$? With a bit of trigonometry, we know that $\sin(\theta)$ is equal to the $|โ\rangle$ component of $|s\rangle$, divided by the length of $|s\rangle$ (which is 1). If there's only one solution state, then $|s\rangle = \tfrac{1}{\sqrt{N}}(|0\rangle + |1\rangle \dots + |โ\rangle \dots + |N-1\rangle)$. So $\sin(\theta) = \tfrac{1}{\sqrt{N}}$.
<!-- ::: -->
<!-- ::: div -->

Finally, for difficult problems, $\theta$ will be very small, which means we can use the small angle approximation to say $\theta \approx \tfrac{1}{\sqrt{N}}$ radians.
<!-- ::: -->
<!-- ::: div -->

Since, for small $\theta$, we want to rotate $|s\rangle$ around $\pi/2$ radians, this means we need to do roughly $\tfrac{\pi}{2}\div\tfrac{2}{\sqrt{N}} = \tfrac{\pi}{4}\sqrt{N}$ iterations. Since we query the oracle once per iteration, the number of oracle queries needed is proportional to $\sqrt{N}$, if there is exactly one solution.
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
<!-- ::: q-block.exercise -->
### Quick quiz
<!-- ::: q-quiz(goal="intro-grover-4") -->
<!-- ::: .question -->
For an oracle with many possible inputs and exactly one solution, $\theta \approx \tfrac{1}{\sqrt{N}}$. What approximate value would $\theta$ have if there were _two_ solutions?
<!-- ::: -->
<!-- ::: .option -->
1. $\theta \approx \tfrac{2}{\sqrt{N}}$
<!-- ::: -->
<!-- ::: .option(correct) -->
2. $\theta \approx \tfrac{\sqrt{2}}{\sqrt{N}}$
<!-- ::: -->
<!-- ::: .option -->
3. $\theta \approx \tfrac{1}{\sqrt{2N}}$
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
## Circuits for Grover's algorithm
To round off the chapter, weโll create a simple circuit from scratch that implements Groverโs algorithm, and show it works. Weโll use two qubits, and weโll start by creating an oracle circuit.
```
from qiskit import QuantumCircuit
```
### The oracle
To keep things simple, we're not going to solve a real problem here. For this demonstration, we'll create a circuit that flips the phase of the state $|11\rangle$ and leaves everything else unchanged. Fortunately, we already know of a two-qubit gate that does exactly that!
```
oracle = QuantumCircuit(2)
oracle.cz(0,1) # invert phase of |11>
oracle.draw()
```
Here's a short function to show the matrix representation of this circuit:
```
def display_unitary(qc, prefix=""):
"""Simulates a simple circuit and display its matrix representation.
Args:
qc (QuantumCircuit): The circuit to compile to a unitary matrix
prefix (str): Optional LaTeX to be displayed before the matrix
Returns:
None (displays matrix as side effect)
"""
from qiskit import Aer
from qiskit.visualization import array_to_latex
sim = Aer.get_backend('aer_simulator')
# Next, we'll create a copy of the circuit and work on
# that so we don't change anything as a side effect
qc = qc.copy()
# Tell the simulator to save the unitary matrix of this circuit
qc.save_unitary()
unitary = sim.run(qc).result().get_unitary()
display(array_to_latex(unitary, prefix=prefix))
display_unitary(oracle, "U_\\text{oracle}=")
```
<!-- ::: q-block.exercise -->
### Try it
Can you create 3 more oracle circuits that instead target the other 3 computational basis states ($|00\rangle$, $|01\rangle$ and $|10\rangle$)? Use `display_unitary` to check your answer.
_Hint:_ Try to create circuits that transform $|11\rangle$ to and from the basis state you're targeting, can you then use these circuits with the `cz` gate?
[Try in IBM Quantum Lab](https://quantum-computing.ibm.com/lab)
<!-- ::: -->
### Creating the diffuser
Next we'll create a diffuser for two qubits. Remember that we want to do a reflection around the state $|s\rangle$, so let's see if we can use the tools we already have to build a circuit that does this reflection.
We've already seen that the `cz` gate does a reflection around $|11\rangle$ (up to a global phase), so if we know the transformation that maps $|s\rangle \rightarrow |11\rangle$, we can:
1. Do the transformation $|s\rangle \rightarrow |11\rangle$
2. Reflect around $|11\rangle$ (i.e the `cz` gate)
3. Do the transformation $|11\rangle \rightarrow |s\rangle$
We know that we can create the state $|s\rangle$ from a the state $|00\rangle$ by applying a H-gate to each qubit. Since the H-gate is it's own inverse, applying H-gates to each qubit also does $|s\rangle \rightarrow |00\rangle$.
```
diffuser = QuantumCircuit(2)
diffuser.h([0, 1])
diffuser.draw()
```
Now we need to work out how we transform $|00\rangle \rightarrow |11\rangle$.
<!-- ::: q-block.exercise -->
### Quick quiz
<!-- ::: q-quiz(goal="intro-grover-5") -->
<!-- ::: .question -->
Which of these gates transforms $|0\rangle \rightarrow |1\rangle$?
<!-- ::: -->
<!-- ::: .option(correct) -->
1. `x`
<!-- ::: -->
<!-- ::: .option -->
2. `z`
<!-- ::: -->
<!-- ::: .option -->
3. `h`
<!-- ::: -->
<!-- ::: .option -->
3. `s`
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
So applying an X-gate to each qubit will do the transformation $|00\rangle \rightarrow |11\rangle$. Let's do that:
```
diffuser.x([0,1])
diffuser.draw()
```
Now we have the transformation $|s\rangle \rightarrow |11\rangle$, we can apply our `cz` gate and reverse the transformation.
```
diffuser.cz(0,1)
diffuser.x([0,1])
diffuser.h([0,1])
diffuser.draw()
```
### Putting it together
We now have two circuits, `oracle` and `diffuser`, so we can put this together into a circuit that performs Grover's algorithm. Remember the three steps:
1. Initialise the qubits to the state $|s\rangle$
2. Perform the oracle
3. Perform the diffuser
```
grover = QuantumCircuit(2)
grover.h([0,1]) # initialise |s>
grover = grover.compose(oracle)
grover = grover.compose(diffuser)
grover.measure_all()
grover.draw()
```
And when we simulate, we can see a 100% probability of measuring $|11\rangle$, which was the solution to our oracle!
```
from qiskit import Aer
sim = Aer.get_backend('aer_simulator')
sim.run(grover).result().get_counts()
```
<!-- ::: q-block.exercise -->
### Try it
Try replacing the oracle in this circuit with the different oracles you created above. Do you get the expected result?
[Try in IBM Quantum Lab](https://quantum-computing.ibm.com/lab)
<!-- ::: -->
## SAT problems are hard

Random guessing grows linearly with the number of entries in the database, which isnโt actually too bad (although we know we can do much better), but we usually measure how algorithms grow by their input length in _bits_. So how do these two connect? Each extra variable (bit) in our SAT problem _doubles_ the number of possible solutions (i.e. entries to our database), so the search space grows exponentially with the number of bits.
$$\cssId{Big-N}{N} = 2^\cssId{lil-n}{n}$$
Since random guessing grows linearly with $N$, the running time will grow by roughly $2^n$.
<!-- ::: q-block.exercise -->
### Quick quiz
<!-- ::: q-quiz(goal="intro-grover-6") -->
<!-- ::: .question -->
How does the running time of Grover's algorithm grow with the number of input bits (when there is only one solution)?
<!-- ::: -->
<!-- ::: .option -->
1. $\sqrt{n}$
<!-- ::: -->
<!-- ::: .option -->
2. $2^n$
<!-- ::: -->
<!-- ::: .option(correct) -->
3. $\sqrt{2^n}$
<!-- ::: -->
<!-- ::: .option -->
3. $\sqrt{2^{n/2}}$
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
## Making use of structure
So far, weโve treated SAT problems as if theyโre completely unstructured, but unlike the unsorted phonebook, we _do_ have some clues that will help us in our search. A SAT problem isnโt a black box, but a set of individual clauses, and we can use these clauses to home in on a correct answer. We wonโt get anything nearly as efficient as binary search, but itโs still much better than random guessing. One (classical) algorithm that uses the structure of SAT problems is Schรถningโs algorithm.

Like random guessing, Schรถningโs algorithm chooses an input at random and checks if it works. But unlike random guessing, it doesnโt just throw this string away. Instead, it picks an unsatisfied clause and toggles a bit in the string to satisfy that clause. Annoyingly, this new string might unsatisfy a different, previously-satisfied clause, but on average it's beneficial to keep toggling bits in this manner a few times. If the initial guess was close enough, thereโs a fair chance weโll stumble upon the correct solution. If not, then after some number of steps, the computer starts again with a new completely random guess. It turns out for 3-SAT (although not (>3)-SAT), this algorithm grows with roughly $1.3334^n$, which not only beats random guessing, but also beats Grover's algorithm!

It may not be obvious at first glance, but we can actually combine Grover and Schรถning's algorithms to get something even better than either individually. If you create a circuit that carries out the bit-toggling part of Schรถning's algorithm, you can use this as the oracle and use Grover's algorithm to find the best "initial guess". We won't go into it in this course, but it's a fun project to investigate it!
|
github_jupyter
|
with open('examples/3sat.dimacs', 'r', encoding='utf8') as f:
dimacs = f.read()
print(dimacs) # let's check the file is as promised
from qiskit.circuit.library import PhaseOracle
oracle = PhaseOracle.from_dimacs_file('examples/3sat.dimacs')
oracle.draw()
from qiskit import QuantumCircuit
init = QuantumCircuit(3)
init.h([0,1,2])
init.draw()
# steps 2 & 3 of Grover's algorithm
from qiskit.circuit.library import GroverOperator
grover_operator = GroverOperator(oracle)
qc = init.compose(grover_operator)
qc.measure_all()
qc.draw()
# Simulate the circuit
from qiskit import Aer, transpile
sim = Aer.get_backend('aer_simulator')
t_qc = transpile(qc, sim)
counts = sim.run(t_qc).result().get_counts()
# plot the results
from qiskit.visualization import plot_histogram
plot_histogram(counts)
from qiskit import QuantumCircuit
oracle = QuantumCircuit(2)
oracle.cz(0,1) # invert phase of |11>
oracle.draw()
def display_unitary(qc, prefix=""):
"""Simulates a simple circuit and display its matrix representation.
Args:
qc (QuantumCircuit): The circuit to compile to a unitary matrix
prefix (str): Optional LaTeX to be displayed before the matrix
Returns:
None (displays matrix as side effect)
"""
from qiskit import Aer
from qiskit.visualization import array_to_latex
sim = Aer.get_backend('aer_simulator')
# Next, we'll create a copy of the circuit and work on
# that so we don't change anything as a side effect
qc = qc.copy()
# Tell the simulator to save the unitary matrix of this circuit
qc.save_unitary()
unitary = sim.run(qc).result().get_unitary()
display(array_to_latex(unitary, prefix=prefix))
display_unitary(oracle, "U_\\text{oracle}=")
diffuser = QuantumCircuit(2)
diffuser.h([0, 1])
diffuser.draw()
diffuser.x([0,1])
diffuser.draw()
diffuser.cz(0,1)
diffuser.x([0,1])
diffuser.h([0,1])
diffuser.draw()
grover = QuantumCircuit(2)
grover.h([0,1]) # initialise |s>
grover = grover.compose(oracle)
grover = grover.compose(diffuser)
grover.measure_all()
grover.draw()
from qiskit import Aer
sim = Aer.get_backend('aer_simulator')
sim.run(grover).result().get_counts()
| 0.766731 | 0.869216 |
# Problem Statement
Predicting the costs of used cars given the data collected from various sources and distributed across various locations in India.
## Import libraries
```
#Importing all libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import warnings
warnings.simplefilter('ignore')
from pandas import set_option
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint as sp_randint
from sklearn.neighbors import KNeighborsRegressor
import copy as cp
import re
warnings.filterwarnings('ignore')
```
## Importing the Datasets
```
#Importing the Datasets
df_train = pd.read_excel("Data_Train.xlsx")
df_test = pd.read_excel("Data_Test.xlsx")
```
## Performing EDA - Exploratory Data Analysis
```
#Identifying the number of features in the Datasets
df_train.shape , df_test.shape
#Identifying the features in the Datasets
print(list(df_train.columns))
print(list(df_test.columns))
#Identifying the data types of features provided in train and test set
print("\nTraining Set : \n","\n", df_train.dtypes)
print("\nTest Set : \n","\n",df_test.dtypes)
#Identifying the nummber of empty/null cells or NaNs by features
print(df_train.isnull().sum())
print()
print(df_test.isnull().sum())
#Check statistics for train data
df_train.describe(include = 'all')
#Check statistics for test data
df_test.describe(include = 'all')
```
## Data Cleaning
```
#Appending Test and Train Data Frame In to One dataFrame
df = df_train.append(df_test, ignore_index=True, sort=False)
#removing Electric vehicals
df = df[df['Fuel_Type'] != 'Electric']
len(df)
#Adding Are age according to 2020
df['Car_Age'] = 2020 - df['Year']
#Removing Unit
df['Mileage'] = df['Mileage'].apply(lambda x : str(x).split(' ')[0]).astype(float)
df['Engine'] = df['Engine'].apply(lambda x : str(x).split(" ")[0]).astype(float)
df['Power'] = df['Power'].replace('null bhp','0 bhp').apply(lambda x : str(x).split(' ')[0]).astype(float)
#Adding seat as 5 where seat value is null
df['Seats'] = df['Seats'].fillna(5)
#Creating columms of company followed by car model ---> Car_Brand1
df['Car_Brand1'] = df['Name'].apply(lambda x: ' '.join(x.split(' ')[:2]))
#substituting Engine and Power null value with there mean
df['Engine'] = df.groupby(['Car_Brand1']).transform(lambda x: x.fillna(x.median()))['Engine']
df['Power'] = df.groupby(['Car_Brand1']).transform(lambda x: x.fillna(x.median()))['Power']
#Creating columms of company ---> Car_Brand2
df['Car_Brand2'] = df['Name'].apply(lambda x: x.split(' ')[0])
df.head()
#changing catagorical variable to numbers
df_obj = df.select_dtypes(exclude=['int64','float64'])
df_num = df.select_dtypes(include=['int64','float64'])
df_encoded = df_obj.apply(LabelEncoder().fit_transform)
df_2 = df_num.join(df_encoded)
df_obj.shape, df_num.shape, df_encoded.shape, df_2.shape
df_2['Mileage'].replace(0.00, np.nan, inplace= True) #As Milage can't be 0.00
df_2['Seats'].replace(0.00, np.nan, inplace= True) #As Seats can't be 0.00
df_2['Mileage'].replace(0.00, np.nan, inplace= True) #As Milage can't be 0.00
df_2['Seats'].replace(0.00, np.nan, inplace= True) #As Seats can't be 0.00
#Dropping name and Year because we have age and car_brand1 and car_brand 2
df_2.drop(columns=['Name','Year'], axis = 1, inplace=True)
df_2['Price'] = df_2['Price'].fillna(0.00)
#Attend to missing values
df_2['Mileage']=df_2['Mileage'].fillna(df_2['Mileage'].median())
df_2['Seats']=df_2['Seats'].fillna(5)
#Covert Seats and Engine feature to int
df_2['Seats']=df_2['Seats'].astype(int)
df_2['Engine']=df_2['Engine'].astype(int)
df_2.head()
df_2.isnull().sum()
```
### OneHotEncoding
```
# importing one hot encoder from sklearn
from sklearn.preprocessing import OneHotEncoder
#One hot encoding catagorical variables
onehotencoder = OneHotEncoder(categorical_features = [7,8,9,10,11,12])
df_2 = onehotencoder.fit_transform(df_2).toarray()
df_2 = pd.DataFrame(df_2)
df_2.head()
#dividing traning and test dataset
train_df = df_2[df_2[280]!=0.0]# 280 = Price
test_df = df_2[df_2[280]==0.0]
test_df.drop(columns=[280], axis = 1, inplace=True)
train_df.shape, test_df.shape
#No of null values for each feature
print(train_df.isnull().sum(),'\n',test_df.isnull().sum())
### Scaling/Normalization of Features
#sc = StandardScaler()
#test_df_arr_scld = sc.fit_transform(test_df)
#test_df_2=pd.DataFrame(test_df_arr_scld, columns=test_df.columns)
test_df_2 = test_df.copy()
#train_df_arr_scld = sc.fit_transform(train_df)
#train_df_2=pd.DataFrame(train_df_arr_scld, columns=train_df.columns)
train_df_2 = train_df.copy()
train_df_2.head()
test_df_2.head()
```
## Model Building, Predicting and Evaluation
```
#dividing dataset into X and Y
train_y = train_df_2[280]
train_df_2.drop(columns=[280], axis = 1, inplace=True)
train_x = train_df_2
train_x.columns#280 is missing so length and last column is 281
print(train_y)
#Train Test Split on the Train dataset
seed = 15
test_size = 0.3
X_train, X_val, Y_train, Y_val = train_test_split(train_x, train_y, test_size = test_size, random_state = seed)
Y_train.isnull().sum()
```
## RandomForestRegressor without any hyperparameters, i.e Default paramters
```
reg = RandomForestRegressor()
reg.fit(X_train, Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('RandomForestRegressor Train RMSE: ', train_RMSE)
print('RandomForestRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
#Using Cross Validation
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
#test_df_arr_scld = sc.inverse_transform(df_2)
#test_df_2=pd.DataFrame(test_df_arr_scld, columns=test_df_2.columns)
```
#### Writing Result in 'Output_RandomForestRegressor.xlsx'
```
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_RandomForestRegressor.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
```
## RandomForestRegressor with hyperparameters
```
#Finding optimal parameters via grid_search
reg = RandomForestRegressor()
param_dist = {"max_features": sp_randint(1, 10),
"min_samples_split": sp_randint(2, 10),
"max_depth": [2,3,4,5,6,7,8,9,10],
"min_samples_leaf": sp_randint(2, 10),
"n_estimators" : sp_randint(1, 40)}
n_iter_search = 40
random_search = RandomizedSearchCV(reg, param_distributions=param_dist, cv=10,
n_iter=n_iter_search)
random_search.fit(train_x,train_y)
random_search.best_params_
reg = RandomForestRegressor(n_estimators=35,min_samples_split=15,max_features=7,max_depth=9,min_samples_leaf=2)
reg.fit(X_train, Y_train)
reg_temp = cp.deepcopy(reg) #After all analysis, this turns out to be the model with highest accuracy, hence keeping a copy of it
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('RandomForestRegressor Train RMSE: ', train_RMSE)
print('RandomForestRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
```
#### Writing Result in 'Output_RandomForestRegressor_hyperparameters.xlsx'
```
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_RandomForestRegressor_hyperparameters.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
```
## KNNRegressor without any hyperparameters
```
reg = KNeighborsRegressor()
reg.fit(X_train, Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('KNeighborsRegressor Train RMSE: ', train_RMSE)
print('KNeighborsRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
```
#### Writing Result in 'Output_KNNRegressor.xlsx'
```
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_KNNRegressor.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
```
## KNNRegressor with hyperparameters
```
k_range = range(5,15)
for k in k_range:
reg = KNeighborsRegressor(k)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print("For K value : ",k)
print("---------------------------------------------------")
print('KNeighborsRegressor Train RMSE: ', train_RMSE)
print('KNeighborsRegressor Validation RMSE: ', val_RMSE)
print('KNeighborsRegressor: Test RMSE - Validation RMSE: ', train_RMSE-val_RMSE)
print("\n")
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
print("\n")
reg = KNeighborsRegressor(7)
reg.fit(X_train, Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('KNeighborsRegressor Train RMSE: ', train_RMSE)
print('KNeighborsRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
```
#### Writing Result in 'Output_KNNRegressor_hyperparameters.xlsx'
```
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_KNNRegressor_hyperparameters.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
```
## Decision Tree Regressor
```
#X_train, X_test, Y_train, Y_test = train_test_split(features_final, span_new['price'], test_size=0.33, random_state=42)
reg = tree.DecisionTreeRegressor(max_depth=3)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('Decision Tree Regressor Train RMSE: ', train_RMSE)
print('Decision Tree Regressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
trknn_scores=[]
teknn_scores= []
rmse_scores=[]
for i in np.arange(1,20,1):
reg = tree.DecisionTreeRegressor(max_depth=i,random_state=42)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)
train_scores = reg.score(X_train,Y_train)
val_scores = reg.score(X_val,Y_val)
# The Root mean squared error
trknn_scores.append(train_scores)
teknn_scores.append(val_scores)
rmse_scores.append(np.sqrt(mean_squared_error(Y_val, Y_val_pred)))
from sklearn import tree
from sklearn.metrics import mean_squared_error
#X_train, X_test, y_train, y_test = train_test_split(features_final, span_new['price'], test_size=0.33, random_state=42)
reg = tree.DecisionTreeRegressor(max_depth=8,random_state=42)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('Decision Tree Regressor Train RMSE: ', train_RMSE)
print('Decision Tree Regressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
```
#### Writing Result in 'Output_Decision_Tree_Regressor.xlsx'
```
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_Decision_Tree_Regressor.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
```
## Linear Regression Model
```
# Linear Regression Sklearn
from sklearn.linear_model import LinearRegression
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_val = sc.transform(X_val)
reg = LinearRegression()
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('Linear Regression Train RMSE: ', train_RMSE)
print('Linear Regression Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
```
#### Writing Result in 'Output_Linear_Regression.xlsx'
```
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_Linear_Regression.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
```
### Conclusion
We observe that, among the models implemented, RandomForestRegressor performs well on the provided dataset
|
github_jupyter
|
#Importing all libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import warnings
warnings.simplefilter('ignore')
from pandas import set_option
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint as sp_randint
from sklearn.neighbors import KNeighborsRegressor
import copy as cp
import re
warnings.filterwarnings('ignore')
#Importing the Datasets
df_train = pd.read_excel("Data_Train.xlsx")
df_test = pd.read_excel("Data_Test.xlsx")
#Identifying the number of features in the Datasets
df_train.shape , df_test.shape
#Identifying the features in the Datasets
print(list(df_train.columns))
print(list(df_test.columns))
#Identifying the data types of features provided in train and test set
print("\nTraining Set : \n","\n", df_train.dtypes)
print("\nTest Set : \n","\n",df_test.dtypes)
#Identifying the nummber of empty/null cells or NaNs by features
print(df_train.isnull().sum())
print()
print(df_test.isnull().sum())
#Check statistics for train data
df_train.describe(include = 'all')
#Check statistics for test data
df_test.describe(include = 'all')
#Appending Test and Train Data Frame In to One dataFrame
df = df_train.append(df_test, ignore_index=True, sort=False)
#removing Electric vehicals
df = df[df['Fuel_Type'] != 'Electric']
len(df)
#Adding Are age according to 2020
df['Car_Age'] = 2020 - df['Year']
#Removing Unit
df['Mileage'] = df['Mileage'].apply(lambda x : str(x).split(' ')[0]).astype(float)
df['Engine'] = df['Engine'].apply(lambda x : str(x).split(" ")[0]).astype(float)
df['Power'] = df['Power'].replace('null bhp','0 bhp').apply(lambda x : str(x).split(' ')[0]).astype(float)
#Adding seat as 5 where seat value is null
df['Seats'] = df['Seats'].fillna(5)
#Creating columms of company followed by car model ---> Car_Brand1
df['Car_Brand1'] = df['Name'].apply(lambda x: ' '.join(x.split(' ')[:2]))
#substituting Engine and Power null value with there mean
df['Engine'] = df.groupby(['Car_Brand1']).transform(lambda x: x.fillna(x.median()))['Engine']
df['Power'] = df.groupby(['Car_Brand1']).transform(lambda x: x.fillna(x.median()))['Power']
#Creating columms of company ---> Car_Brand2
df['Car_Brand2'] = df['Name'].apply(lambda x: x.split(' ')[0])
df.head()
#changing catagorical variable to numbers
df_obj = df.select_dtypes(exclude=['int64','float64'])
df_num = df.select_dtypes(include=['int64','float64'])
df_encoded = df_obj.apply(LabelEncoder().fit_transform)
df_2 = df_num.join(df_encoded)
df_obj.shape, df_num.shape, df_encoded.shape, df_2.shape
df_2['Mileage'].replace(0.00, np.nan, inplace= True) #As Milage can't be 0.00
df_2['Seats'].replace(0.00, np.nan, inplace= True) #As Seats can't be 0.00
df_2['Mileage'].replace(0.00, np.nan, inplace= True) #As Milage can't be 0.00
df_2['Seats'].replace(0.00, np.nan, inplace= True) #As Seats can't be 0.00
#Dropping name and Year because we have age and car_brand1 and car_brand 2
df_2.drop(columns=['Name','Year'], axis = 1, inplace=True)
df_2['Price'] = df_2['Price'].fillna(0.00)
#Attend to missing values
df_2['Mileage']=df_2['Mileage'].fillna(df_2['Mileage'].median())
df_2['Seats']=df_2['Seats'].fillna(5)
#Covert Seats and Engine feature to int
df_2['Seats']=df_2['Seats'].astype(int)
df_2['Engine']=df_2['Engine'].astype(int)
df_2.head()
df_2.isnull().sum()
# importing one hot encoder from sklearn
from sklearn.preprocessing import OneHotEncoder
#One hot encoding catagorical variables
onehotencoder = OneHotEncoder(categorical_features = [7,8,9,10,11,12])
df_2 = onehotencoder.fit_transform(df_2).toarray()
df_2 = pd.DataFrame(df_2)
df_2.head()
#dividing traning and test dataset
train_df = df_2[df_2[280]!=0.0]# 280 = Price
test_df = df_2[df_2[280]==0.0]
test_df.drop(columns=[280], axis = 1, inplace=True)
train_df.shape, test_df.shape
#No of null values for each feature
print(train_df.isnull().sum(),'\n',test_df.isnull().sum())
### Scaling/Normalization of Features
#sc = StandardScaler()
#test_df_arr_scld = sc.fit_transform(test_df)
#test_df_2=pd.DataFrame(test_df_arr_scld, columns=test_df.columns)
test_df_2 = test_df.copy()
#train_df_arr_scld = sc.fit_transform(train_df)
#train_df_2=pd.DataFrame(train_df_arr_scld, columns=train_df.columns)
train_df_2 = train_df.copy()
train_df_2.head()
test_df_2.head()
#dividing dataset into X and Y
train_y = train_df_2[280]
train_df_2.drop(columns=[280], axis = 1, inplace=True)
train_x = train_df_2
train_x.columns#280 is missing so length and last column is 281
print(train_y)
#Train Test Split on the Train dataset
seed = 15
test_size = 0.3
X_train, X_val, Y_train, Y_val = train_test_split(train_x, train_y, test_size = test_size, random_state = seed)
Y_train.isnull().sum()
reg = RandomForestRegressor()
reg.fit(X_train, Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('RandomForestRegressor Train RMSE: ', train_RMSE)
print('RandomForestRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
#Using Cross Validation
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
#test_df_arr_scld = sc.inverse_transform(df_2)
#test_df_2=pd.DataFrame(test_df_arr_scld, columns=test_df_2.columns)
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_RandomForestRegressor.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
#Finding optimal parameters via grid_search
reg = RandomForestRegressor()
param_dist = {"max_features": sp_randint(1, 10),
"min_samples_split": sp_randint(2, 10),
"max_depth": [2,3,4,5,6,7,8,9,10],
"min_samples_leaf": sp_randint(2, 10),
"n_estimators" : sp_randint(1, 40)}
n_iter_search = 40
random_search = RandomizedSearchCV(reg, param_distributions=param_dist, cv=10,
n_iter=n_iter_search)
random_search.fit(train_x,train_y)
random_search.best_params_
reg = RandomForestRegressor(n_estimators=35,min_samples_split=15,max_features=7,max_depth=9,min_samples_leaf=2)
reg.fit(X_train, Y_train)
reg_temp = cp.deepcopy(reg) #After all analysis, this turns out to be the model with highest accuracy, hence keeping a copy of it
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('RandomForestRegressor Train RMSE: ', train_RMSE)
print('RandomForestRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_RandomForestRegressor_hyperparameters.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
reg = KNeighborsRegressor()
reg.fit(X_train, Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('KNeighborsRegressor Train RMSE: ', train_RMSE)
print('KNeighborsRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_KNNRegressor.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
k_range = range(5,15)
for k in k_range:
reg = KNeighborsRegressor(k)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print("For K value : ",k)
print("---------------------------------------------------")
print('KNeighborsRegressor Train RMSE: ', train_RMSE)
print('KNeighborsRegressor Validation RMSE: ', val_RMSE)
print('KNeighborsRegressor: Test RMSE - Validation RMSE: ', train_RMSE-val_RMSE)
print("\n")
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
print("\n")
reg = KNeighborsRegressor(7)
reg.fit(X_train, Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('KNeighborsRegressor Train RMSE: ', train_RMSE)
print('KNeighborsRegressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_KNNRegressor_hyperparameters.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
#X_train, X_test, Y_train, Y_test = train_test_split(features_final, span_new['price'], test_size=0.33, random_state=42)
reg = tree.DecisionTreeRegressor(max_depth=3)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)#this line pridicts the price vlues of the test dataset
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('Decision Tree Regressor Train RMSE: ', train_RMSE)
print('Decision Tree Regressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
trknn_scores=[]
teknn_scores= []
rmse_scores=[]
for i in np.arange(1,20,1):
reg = tree.DecisionTreeRegressor(max_depth=i,random_state=42)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)
train_scores = reg.score(X_train,Y_train)
val_scores = reg.score(X_val,Y_val)
# The Root mean squared error
trknn_scores.append(train_scores)
teknn_scores.append(val_scores)
rmse_scores.append(np.sqrt(mean_squared_error(Y_val, Y_val_pred)))
from sklearn import tree
from sklearn.metrics import mean_squared_error
#X_train, X_test, y_train, y_test = train_test_split(features_final, span_new['price'], test_size=0.33, random_state=42)
reg = tree.DecisionTreeRegressor(max_depth=8,random_state=42)
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('Decision Tree Regressor Train RMSE: ', train_RMSE)
print('Decision Tree Regressor Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_Decision_Tree_Regressor.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
# Linear Regression Sklearn
from sklearn.linear_model import LinearRegression
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_val = sc.transform(X_val)
reg = LinearRegression()
reg.fit(X_train,Y_train)
Y_train_pred = reg.predict(X_train)
Y_val_pred = reg.predict(X_val)
Y_test_pridiction = reg.predict(test_df_2)
train_RMSE=np.sqrt(mean_squared_error(Y_train,Y_train_pred))
val_RMSE=np.sqrt(mean_squared_error(Y_val,Y_val_pred))
print('Linear Regression Train RMSE: ', train_RMSE)
print('Linear Regression Validation RMSE: ', val_RMSE)
print('Score for Train Data: ', reg.score(X_train,Y_train))
print('Score for Validation Data: ', reg.score(X_val,Y_val))
scores = cross_val_score(reg, train_x, train_y, cv=10)
scores
df_test['Price'] = Y_test_pridiction
df_test.head()
df_sub = pd.DataFrame(data=df_test)
writer = pd.ExcelWriter('Output_Linear_Regression.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
| 0.362518 | 0.890865 |
# MNIST Convolutional Neural Network - 2nd model
Gaetano Bonofiglio, Veronica Iovinella
This time we are going to implement a model similar to the one used by Dan Ciresan, Ueli Meier and Jurgen Schmidhuber in 2012. The model should have an error of 0.23% and it's quite similar to the previous one we implemented from Keras documentation. The network was not only one of the best for MNIST, ranking second best at the moment, but also very good on NIST SD 19 and NORB.
We are also going to use Keras checkpoints because of the many epochs required by the model and we're going to integrate some of the most recent techniques, like dropout.
Again for this notebook we are going to use **TensorFlow** with **Keras**.
```
import tensorflow as tf
# We don't really need to import TensorFlow here since it's handled by Keras,
# but we do it in order to output the version we are using.
tf.__version__
```
We are using TensorFlow-GPU 0.12.1 on Python 3.5.2, running on Windows 10 with Cuda 8.0.
We have 3 machines with the same environment and 3 different GPUs, respectively with 384, 1024 and 1664 Cuda cores.
## Imports
```
import os.path
from IPython.display import Image
from util import Util
u = Util()
import numpy as np
# Explicit random seed for reproducibility
np.random.seed(1337)
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.datasets import mnist
```
## Definitions
```
batch_size = 512
nb_classes = 10
nb_epoch = 800
# checkpoint path
checkpoints_filepath_tanh = "checkpoints/02_MNIST_tanh_weights.best.hdf5"
checkpoints_filepath_relu = "checkpoints/02_MNIST_relu_weights.best.hdf5"
# model image path
model_image_path = 'images/model_02_MNIST.png' # saving only relu
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters1 = 20
nb_filters2 = 40
# size of pooling area for max pooling
pool_size1 = (2, 2)
pool_size2 = (3, 3)
# convolution kernel size
kernel_size1 = (4, 4)
kernel_size2 = (5, 5)
# dense layer size
dense_layer_size1 = 150
# dropout rate
dropout = 0.15
```
## Data load
```
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
u.plot_images(X_train[0:9], y_train[0:9])
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
```
## Model definition
The model is structurally similar to the previous one, with 2 Convolutional layers and 1 Fully conneted layers.
However there are major difference in values and sizes, and also there is one more intermediate max pooling layer and the activation function is a scaled hyperbolic tangent, as described in the [paper](http://people.idsia.ch/~ciresan/data/cvpr2012.pdf). However, since Rectified Linear Units started spreading after 2015, we are going to compare two different CNN, one using tanh (as in the paper) and the other one using relu.
**1x29x29-20C4-MP2-40C5-MP3-150N-10N DNN**.
<img src="images/cvpr2012.PNG" alt="1x29x29-20C4-MP2-40C5-MP3-150N-10N DNN" style="width: 400px;"/>
The paper doesn't seem to use any dropout layer to avoid overfitting, so we're going to use a dropout of 0.15, way lower then we did before.
It is also worth mentioning that the authors of the paper have their methods to avoid overfitting, like dataset expansion by adding translations, rotations and deformations to the images of the training set.
```
model_tanh = Sequential()
model_relu = Sequential()
def initialize_network_with_activation_function(model, activation, checkpoints_filepath):
model.add(Convolution2D(nb_filters1, kernel_size1[0], kernel_size1[1],
border_mode='valid',
input_shape=input_shape, name='covolution_1_' + str(nb_filters1) + '_filters'))
model.add(Activation(activation, name='activation_1_' + activation))
model.add(MaxPooling2D(pool_size=pool_size1, name='max_pooling_1_' + str(pool_size1) + '_pool_size'))
model.add(Convolution2D(nb_filters2, kernel_size2[0], kernel_size2[1]))
model.add(Activation(activation, name='activation_2_' + activation))
model.add(MaxPooling2D(pool_size=pool_size2, name='max_pooling_1_' + str(pool_size2) + '_pool_size'))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(dense_layer_size1, name='fully_connected_1_' + str(dense_layer_size1) + '_neurons'))
model.add(Activation(activation, name='activation_3_' + activation))
model.add(Dropout(dropout))
model.add(Dense(nb_classes, name='output_' + str(nb_classes) + '_neurons'))
model.add(Activation('softmax', name='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy', 'precision', 'recall', 'mean_absolute_error'])
# loading weights from checkpoints
if os.path.exists(checkpoints_filepath):
model.load_weights(checkpoints_filepath)
initialize_network_with_activation_function(model_tanh, 'tanh', checkpoints_filepath_tanh)
initialize_network_with_activation_function(model_relu, 'relu', checkpoints_filepath_relu)
Image(u.maybe_save_network(model_relu, model_image_path), width=300)
```
## Training and evaluation
Using non verbose output for training, since we already get some informations from the callback.
```
# checkpoint
checkpoint_tanh = ModelCheckpoint(checkpoints_filepath_tanh, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_tanh = [checkpoint_tanh]
# training
print('training tanh model')
history_tanh = model_tanh.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test), callbacks=callbacks_list_tanh)
# evaluation
print('evaluating tanh model')
score = model_tanh.evaluate(X_test, Y_test, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Test error:', (1-score[2])*100, '%')
u.plot_history(history_tanh)
u.plot_history(history_tanh, metric='loss', loc='upper left')
# checkpoint
checkpoint_relu = ModelCheckpoint(checkpoints_filepath_relu, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_relu = [checkpoint_relu]
# training
print('training relu model')
history_relu = model_relu.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test), callbacks=callbacks_list_relu)
# evaluation
print('evaluating relu model')
score = model_relu.evaluate(X_test, Y_test, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Test error:', (1-score[2])*100, '%')
u.plot_history(history_relu)
u.plot_history(history_relu, metric='loss', loc='upper left')
```
## Inspecting the result
```
# The predict_classes function outputs the highest probability class
# according to the trained classifier for each input example.
predicted_classes_tanh = model_tanh.predict_classes(X_test)
predicted_classes_relu = model_relu.predict_classes(X_test)
# Check which items we got right / wrong
correct_indices_tanh = np.nonzero(predicted_classes_tanh == y_test)[0]
incorrect_indices_tanh = np.nonzero(predicted_classes_tanh != y_test)[0]
correct_indices_relu = np.nonzero(predicted_classes_relu == y_test)[0]
incorrect_indices_relu = np.nonzero(predicted_classes_relu != y_test)[0]
```
### Examples of correct predictions (tanh)
```
u.plot_images(X_test[correct_indices_tanh[:9]], y_test[correct_indices_tanh[:9]],
predicted_classes_tanh[correct_indices_tanh[:9]])
```
### Examples of incorrect predictions (tanh)
```
u.plot_images(X_test[incorrect_indices_tanh[:9]], y_test[incorrect_indices_tanh[:9]],
predicted_classes_tanh[incorrect_indices_tanh[:9]])
```
### Examples of correct predictions (relu)
```
u.plot_images(X_test[correct_indices_relu[:9]], y_test[correct_indices_relu[:9]],
predicted_classes_relu[correct_indices_relu[:9]])
```
### Examples of incorrect predictions (relu)
```
u.plot_images(X_test[incorrect_indices_relu[:9]], y_test[incorrect_indices_relu[:9]],
predicted_classes_relu[incorrect_indices_relu[:9]])
```
### Confusion matrix (tanh)
```
u.plot_confusion_matrix(y_test, nb_classes, predicted_classes_tanh)
```
### Confusion matrix (relu)
```
u.plot_confusion_matrix(y_test, nb_classes, predicted_classes_relu)
```
## Results
We experimented with 2 CNN models, identical in every aspect except the activation function, one used "tanh" and the other "relu". After 800 epochs running in about 2 seconds each (on GTX 970), we observed both models resulted in overfitting. Looking at the graphs, we noticed that a good number of epochs to choose to save time is about 50. In particular in this case relu presented a better behaviour than tanh which degrades earlier after 40 epochs, in contrast to the 56 epochs of relu. The results obtained after 800 epochs have higher precision (as we will see in the next notebook), the time it takes to train the network is not worth the increase.
|
github_jupyter
|
import tensorflow as tf
# We don't really need to import TensorFlow here since it's handled by Keras,
# but we do it in order to output the version we are using.
tf.__version__
import os.path
from IPython.display import Image
from util import Util
u = Util()
import numpy as np
# Explicit random seed for reproducibility
np.random.seed(1337)
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.datasets import mnist
batch_size = 512
nb_classes = 10
nb_epoch = 800
# checkpoint path
checkpoints_filepath_tanh = "checkpoints/02_MNIST_tanh_weights.best.hdf5"
checkpoints_filepath_relu = "checkpoints/02_MNIST_relu_weights.best.hdf5"
# model image path
model_image_path = 'images/model_02_MNIST.png' # saving only relu
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters1 = 20
nb_filters2 = 40
# size of pooling area for max pooling
pool_size1 = (2, 2)
pool_size2 = (3, 3)
# convolution kernel size
kernel_size1 = (4, 4)
kernel_size2 = (5, 5)
# dense layer size
dense_layer_size1 = 150
# dropout rate
dropout = 0.15
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
u.plot_images(X_train[0:9], y_train[0:9])
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model_tanh = Sequential()
model_relu = Sequential()
def initialize_network_with_activation_function(model, activation, checkpoints_filepath):
model.add(Convolution2D(nb_filters1, kernel_size1[0], kernel_size1[1],
border_mode='valid',
input_shape=input_shape, name='covolution_1_' + str(nb_filters1) + '_filters'))
model.add(Activation(activation, name='activation_1_' + activation))
model.add(MaxPooling2D(pool_size=pool_size1, name='max_pooling_1_' + str(pool_size1) + '_pool_size'))
model.add(Convolution2D(nb_filters2, kernel_size2[0], kernel_size2[1]))
model.add(Activation(activation, name='activation_2_' + activation))
model.add(MaxPooling2D(pool_size=pool_size2, name='max_pooling_1_' + str(pool_size2) + '_pool_size'))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(dense_layer_size1, name='fully_connected_1_' + str(dense_layer_size1) + '_neurons'))
model.add(Activation(activation, name='activation_3_' + activation))
model.add(Dropout(dropout))
model.add(Dense(nb_classes, name='output_' + str(nb_classes) + '_neurons'))
model.add(Activation('softmax', name='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy', 'precision', 'recall', 'mean_absolute_error'])
# loading weights from checkpoints
if os.path.exists(checkpoints_filepath):
model.load_weights(checkpoints_filepath)
initialize_network_with_activation_function(model_tanh, 'tanh', checkpoints_filepath_tanh)
initialize_network_with_activation_function(model_relu, 'relu', checkpoints_filepath_relu)
Image(u.maybe_save_network(model_relu, model_image_path), width=300)
# checkpoint
checkpoint_tanh = ModelCheckpoint(checkpoints_filepath_tanh, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_tanh = [checkpoint_tanh]
# training
print('training tanh model')
history_tanh = model_tanh.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test), callbacks=callbacks_list_tanh)
# evaluation
print('evaluating tanh model')
score = model_tanh.evaluate(X_test, Y_test, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Test error:', (1-score[2])*100, '%')
u.plot_history(history_tanh)
u.plot_history(history_tanh, metric='loss', loc='upper left')
# checkpoint
checkpoint_relu = ModelCheckpoint(checkpoints_filepath_relu, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_relu = [checkpoint_relu]
# training
print('training relu model')
history_relu = model_relu.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test), callbacks=callbacks_list_relu)
# evaluation
print('evaluating relu model')
score = model_relu.evaluate(X_test, Y_test, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Test error:', (1-score[2])*100, '%')
u.plot_history(history_relu)
u.plot_history(history_relu, metric='loss', loc='upper left')
# The predict_classes function outputs the highest probability class
# according to the trained classifier for each input example.
predicted_classes_tanh = model_tanh.predict_classes(X_test)
predicted_classes_relu = model_relu.predict_classes(X_test)
# Check which items we got right / wrong
correct_indices_tanh = np.nonzero(predicted_classes_tanh == y_test)[0]
incorrect_indices_tanh = np.nonzero(predicted_classes_tanh != y_test)[0]
correct_indices_relu = np.nonzero(predicted_classes_relu == y_test)[0]
incorrect_indices_relu = np.nonzero(predicted_classes_relu != y_test)[0]
u.plot_images(X_test[correct_indices_tanh[:9]], y_test[correct_indices_tanh[:9]],
predicted_classes_tanh[correct_indices_tanh[:9]])
u.plot_images(X_test[incorrect_indices_tanh[:9]], y_test[incorrect_indices_tanh[:9]],
predicted_classes_tanh[incorrect_indices_tanh[:9]])
u.plot_images(X_test[correct_indices_relu[:9]], y_test[correct_indices_relu[:9]],
predicted_classes_relu[correct_indices_relu[:9]])
u.plot_images(X_test[incorrect_indices_relu[:9]], y_test[incorrect_indices_relu[:9]],
predicted_classes_relu[incorrect_indices_relu[:9]])
u.plot_confusion_matrix(y_test, nb_classes, predicted_classes_tanh)
u.plot_confusion_matrix(y_test, nb_classes, predicted_classes_relu)
| 0.805096 | 0.97362 |
# Creative LTA - Round 2
## **Interview Questions**
### Category 7 - Data Visualization
***
**PLEASE BE BRIEF AND CONCISE IN YOUR ANSWERS:**
***1. They have given you a time-series dataset in which observation values by country for vaccination rates are reported with various levels of disaggregation, e.g. for DPT there are values by wealth quintile, while for MMR there are not. In addition, the series are reported at different time resolutions, some monthly, others quarterly, and others annually. Please propose, with text and visual examples, how you would approach this dataset in order to maximize its usability and interpretability.***
**Time-series dataset**
We have created an example time-series dataset with observation values by province for Kenya. The series have been created at different time resolutions and levels of disaggregation. For MMR there are vaccination rate values monthly, while for DPT there are both rates and wealth quintiles quarterly.
```
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
data = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data.columns = map(str.lower, data.columns)
data = data[['gid_1', 'name_1', 'geometry']]
data.rename(columns={'gid_1': 'iso_1', 'name_1': 'province'}, inplace= True)
data = pd.concat([data,pd.DataFrame(columns=['vaccine_type', 'rate', 'wealth_quintile', 'month', 'quarter', 'year'])], sort=True)
years = np.arange(2000, 2019, 1)
quarters = np.arange(4)+1
months = np.arange(12)+1
vaccine_dic = {'DPT': ['quarter', quarters], 'MMR': ['month', months]}
data_1 = pd.DataFrame(columns=list(data.columns))
for vaccine in vaccine_dic.keys():
data_vaccine = pd.DataFrame(columns=list(data.columns))
for n, iso in enumerate(data['iso_1'].unique()):
data_iso = data.iloc[n:n+1]
nrows = len(years)*len(vaccine_dic[vaccine][1])-1
data_iso = data_iso.append([data_iso]*nrows,ignore_index=True, sort=True)
data_iso[vaccine_dic[vaccine][0]] = np.tile(vaccine_dic[vaccine][1], len(years))
data_iso['year'] = np.repeat(years, len(vaccine_dic[vaccine][1]))
data_vaccine = pd.concat([data_vaccine, data_iso])
rate = np.random.uniform(low=0.0, high=100, size=(len(data_vaccine),)).round(2)
data_vaccine['rate'] = rate
if vaccine == 'DPT':
data_vaccine['wealth_quintile'] = pd.qcut(rate, 5, labels=False)+1
data_vaccine['vaccine_type'] = vaccine
data_1 = pd.concat([data_1, data_vaccine])
data_1 = data_1[['geometry', 'iso_1', 'province', 'vaccine_type', 'rate', 'wealth_quintile', 'month', 'quarter', 'year']]
```
#### **Inspecting the data:**
- For DPT we have vaccination rates and wealth quintiles with a quarterly time resolution:
```
data_1[data_1['vaccine_type'] == 'DPT'].iloc[:4]
```
- For MMR we have vaccination rates with a monthly time resolution:
```
data_1[data_1['vaccine_type'] == 'MMR'].iloc[:12]
```
#### **Visual examples**
**Comparing two vaccines**
In the following figure we show monthly vaccination rates for MMR across the entire population, while the second dataset shows quarterly vaccination by wealth quintile for DPT
```
year = 2016
province = 'Baringo'
dpt = data_1[(data_1['vaccine_type'] == 'DPT') & (data_1['year'] == year)].copy()
result = dpt.groupby(['year', 'quarter', 'wealth_quintile']).size().reset_index(name='counts')
values = []
for i in result['wealth_quintile'].unique():
values.append(list(result[result['wealth_quintile'] == i]['counts']))
mmr = data_1[(data_1['vaccine_type'] == 'MMR') & (data_1['year'] == year) & (data_1['province'] == province)].copy()
# Plot the simple time series
my_ts = plt.figure()
my_ts.set_size_inches(10,5) # Specify the output size
ax1 = my_ts.add_subplot(211) # Add an axis frame object to the plot (i.e. a pannel)
ax2 = my_ts.add_subplot(212)
categories = ['Quintile 1','Quintile 2','Quintile 3','Quintile 4','Quintile 5']
locations = ['Quarter 1', 'Quarter 2', 'Quarter 3', 'Quarter 4'] # the x locations for the groups
width = 0.35 # the width of the bars
for i in range(len(values)):
if i == 0:
ax2.bar(locations, values[i], width, label=categories[i], edgecolor ='k')
past_values = np.array(values[i])
else:
ax2.bar(locations, values[i], width, bottom=past_values, label=categories[i], edgecolor ='k')
past_values = np.array(values[i]) + past_values
ax2.set_title('DPT wealth quintile distribution in Kenya'+' through '+str(year))
ax2.set_ylabel('counts')
ax2.legend()
ax1.plot(mmr['month'], mmr['rate'])
ax1.set_title('MMR rates in '+ province+' through '+str(year))
ax1.set_xlabel('months')
ax1.set_ylabel('Rate [%]')
ax1.legend()
plt.tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None)
my_ts.savefig('DPY_MMR.png',dpi=300)
```
If we want to compare the vaccination rates of two vaccines that have different temporal resolutions, we have to resampled the data to the coarser resolution. In this particular case we resample the MMR monthly data by taking the mean of each quarter. In addition we can also resample quarterly data to yearly for both vaccines.
```
mmr = data_1[data_1['vaccine_type'] == 'MMR'].copy()
months = np.arange(12)+1
quarters = np.repeat(np.arange(4)+1, 3)
dic = dict(zip(months, quarters))
mmr['quarter'] = mmr['month'].apply(lambda x: dic[x])
mmr['quarter_rate'] = mmr.groupby(['quarter', 'year'])['rate'].transform(np.mean)
mmr['quarter_rate_error'] = mmr.groupby(['quarter', 'year'])['rate'].transform(np.std)
mmr['year_rate'] = mmr.groupby(['year'])['rate'].transform(np.mean)
mmr['year_rate_error'] = mmr.groupby(['year'])['rate'].transform(np.std)
mmr.iloc[:12]
```
In column `quarter_rate` we are now showing the vaccination rates of each quarter. In addition, we have also compute the standard deviation in column `quarter_rate_error` to have an estimation of the error.
```
dpt = data_1[data_1['vaccine_type'] == 'DPT'].copy()
dpt['year_rate'] = dpt.groupby(['year'])['rate'].transform(np.mean)
dpt['year_rate_error'] = dpt.groupby(['year'])['rate'].transform(np.std)
dpt.iloc[:4]
```
Now we are ready to compare the vaccination rates. As an example we can compare the vaccination rates of a given province in Kenya in a time span of three years.
```
dpt_compare = dpt[(dpt['province'] == 'Nairobi') & (dpt['year'] > 2015)].copy()
mmr_compare = mmr[(mmr['province'] == 'Nairobi') & (mmr['year'] > 2015)].copy()
mmr_compare = mmr_compare.groupby(['quarter', 'year'], as_index=False).first().sort_values('year')
```
Filterd DPT data.
- Quarterly:
```
dpt_compare['error'] = np.nan
dpt_result = dpt_compare[['province', 'vaccine_type', 'rate', 'error', 'quarter', 'year']].copy()
dpt_result
```
- Yearly:
```
dpt_yearly = dpt_compare.groupby(['year'], as_index=False).first()
dpt_yearly[['province', 'vaccine_type', 'year_rate', 'year_rate_error', 'year']].rename(columns={'year_rate': 'rate', 'year_rate_error': 'error'})
```
Filterd MMR data.
- Quarterly:
```
mmr_result = mmr_compare[['province', 'vaccine_type', 'quarter_rate', 'quarter_rate_error', 'quarter', 'year']].rename(columns={'quarter_rate': 'rate', 'quarter_rate_error': 'error'})
mmr_result
```
- Yearly:
```
mmr_yearly = mmr_compare.groupby(['year'], as_index=False).first()
mmr_yearly[['province', 'vaccine_type', 'year_rate', 'year_rate_error', 'year']].rename(columns={'year_rate': 'rate', 'year_rate_error': 'error'})
```
We have visualized these data as a line chart bounded by shaded bands:
```
lthick=2.0
xlabs = []
for n, year in enumerate(mmr_result.year.unique()):
for i in range(4):
xlabs.append(f"Q{i+1} {year}")
fig2 = plt.figure()
fig2.set_size_inches(20, 5)
ax1 = fig2.add_subplot(121)
ax1.plot(mmr_result.year+(mmr_result.quarter*3-1.5)/12, mmr_result.rate, 'r-', lw=lthick, label='MMR')
ax1.fill_between(mmr_result.year+(mmr_result.quarter*3-1.5)/12, mmr_result.rate-mmr_result.error, mmr_result.rate+mmr_result.error,
color='red', linewidth=0.1, alpha=0.3)
ax1.plot(dpt_result.year+(dpt_result.quarter*3-1.5)/12, dpt_result.rate, 'b-', lw=lthick, label='DPT')
ax1.set_ylabel('Rate [%]')
ax1.legend()
ax1.set_xticklabels(xlabs)
fig2.savefig('compare.png',dpi=300, bbox_inches='tight')
```
***2. Consider a case in which you have a dataset with a mix of dimensions and attributes, i.e. the dimensions are the axes of the hypercube that identify the observation values, while the attributes represent additional useful metadata about the values (singly or as an aggregate set). Using an imaginary dataset based on measures of stunting and wasting of children by age group, sex, country, and year, describe with text and visual examples how you would organize the dimensions, attributes and observation values both in tabular and chart formats.***
#### **Imaginary dataset**
We have created an imaginary dataset that fulfill the above-mentioned requirements.
```
data = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data.columns = map(str.lower, data.columns)
data = data[['gid_1', 'name_1']]
data.rename(columns={'gid_1': 'iso_1', 'name_1': 'province'}, inplace= True)
data = pd.concat([data,pd.DataFrame(columns=['indicator_type', 'value', 'age_group', 'sex', 'year', 'attribute_singly'])], sort=True)
indicators = ['stunting', 'wasting']
data_2 = pd.DataFrame(columns=list(data.columns))
years = np.arange(2000, 2019, 1)
sex = ['male', 'female']
age_group = ['0 โ 4 years old', '5 โ 9 years old', '10 โ 14 years old', '15 โ 18 years old']
for indicator in indicators:
data_indicator = pd.DataFrame(columns=list(data.columns))
for n, iso in enumerate(data['iso_1'].unique()):
data_iso = data.iloc[n:n+1]
nrows = len(years)*len(sex)*len(age_group)-1
data_iso = data_iso.append([data_iso]*nrows,ignore_index=True, sort=True)
data_iso['sex'] = np.tile(np.concatenate([np.repeat('male', len(age_group)), np.repeat('female', len(age_group))]), len(years))
data_iso['age_group'] = np.tile(age_group, len(years)*len(sex))
data_iso['year'] = np.repeat(years, len(sex)*len(age_group))
data_indicator = pd.concat([data_indicator, data_iso])
value = np.random.uniform(low=0.0, high=100, size=(len(data_indicator),)).round(2)
data_indicator['value'] = value
#data_indicator['sex'] = np.random.choice(sex, len(data_indicator))
data_indicator['indicator_type'] = indicator
data_2 = pd.concat([data_2, data_indicator])
data_2['attribute_singly'] = np.core.defchararray.add(np.repeat('metadata ', len(data_2)), np.arange(len(data_2)).astype('str'))
data_2 = data_2[['iso_1', 'province', 'indicator_type', 'value', 'age_group', 'sex', 'year', 'attribute_singly']]
```
#### **Data arrangement**
```
data_2.head(8)
```
In column `attribute_singly` we could include any kind of attribute per value. In order to have additional useful metadata aggregated by a set of values we can create another table. As an example we could have various attributes related to each provice.
Dataset with aggregated attribute per province:
```
data_3 = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data_3.columns = map(str.lower, data_3.columns)
data_3 = data_3[['gid_1', 'geometry']]
data_3.rename(columns={'gid_1': 'iso_1'}, inplace= True)
data_3['attribute_aggregate'] = np.core.defchararray.add(np.repeat('metadata ', len(data_3)), data_3['iso_1'].unique())
data_3.head()
```
We can also add data from multiple sources into a single table by merging them together using a common column. In this particular case we can merge them by using the `iso_1` column.
```
pd.merge(data_2, data_3, how='left', on='iso_1').head(4)
```
***3. Common query operations over multi-dimensional data sets include slicing, dicing, drilling-down, and rolling up. Please describe with text and visual examples how you would design a data dashboard capable of handling all such operations over a dataset consisting of educational enrollment, attainment, and dropout rates, by sex, grade level, country, and year.***
```
data = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data.columns = map(str.lower, data.columns)
data = data[['gid_1', 'name_1']]
data.rename(columns={'gid_1': 'iso_1', 'name_1': 'province'}, inplace= True)
data = pd.concat([data,pd.DataFrame(columns=['indicator', 'rate', 'grade_level', 'sex', 'year'])], sort=True)
indicators = ['enrollment', 'attainment', 'dropout']
data_4 = pd.DataFrame(columns=list(data.columns))
years = np.arange(2000, 2019, 1)
sex = ['male', 'female']
grade_level = ['Early childhood education', 'Primary education', 'Secondary education']
for indicator in indicators:
data_indicator = pd.DataFrame(columns=list(data.columns))
for n, iso in enumerate(data['iso_1'].unique()):
data_iso = data.iloc[n:n+1]
nrows = len(years)*len(sex)*len(grade_level)-1
data_iso = data_iso.append([data_iso]*nrows,ignore_index=True, sort=True)
data_iso['sex'] = np.tile(np.concatenate([np.repeat('male', len(grade_level)), np.repeat('female', len(grade_level))]), len(years))
data_iso['grade_level'] = np.tile(grade_level, len(years)*len(sex))
data_iso['year'] = np.repeat(years, len(sex)*len(grade_level))
data_indicator = pd.concat([data_indicator, data_iso])
value = np.random.uniform(low=0.0, high=100, size=(len(data_indicator),)).round(2)
data_indicator['rate'] = value
#data_indicator['sex'] = np.random.choice(sex, len(data_indicator))
data_indicator['indicator'] = indicator
data_4 = pd.concat([data_4, data_indicator])
data_4 = data_4[['iso_1', 'province', 'indicator', 'rate', 'grade_level', 'sex', 'year']]
data_4.head(6)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
data = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data.columns = map(str.lower, data.columns)
data = data[['gid_1', 'name_1', 'geometry']]
data.rename(columns={'gid_1': 'iso_1', 'name_1': 'province'}, inplace= True)
data = pd.concat([data,pd.DataFrame(columns=['vaccine_type', 'rate', 'wealth_quintile', 'month', 'quarter', 'year'])], sort=True)
years = np.arange(2000, 2019, 1)
quarters = np.arange(4)+1
months = np.arange(12)+1
vaccine_dic = {'DPT': ['quarter', quarters], 'MMR': ['month', months]}
data_1 = pd.DataFrame(columns=list(data.columns))
for vaccine in vaccine_dic.keys():
data_vaccine = pd.DataFrame(columns=list(data.columns))
for n, iso in enumerate(data['iso_1'].unique()):
data_iso = data.iloc[n:n+1]
nrows = len(years)*len(vaccine_dic[vaccine][1])-1
data_iso = data_iso.append([data_iso]*nrows,ignore_index=True, sort=True)
data_iso[vaccine_dic[vaccine][0]] = np.tile(vaccine_dic[vaccine][1], len(years))
data_iso['year'] = np.repeat(years, len(vaccine_dic[vaccine][1]))
data_vaccine = pd.concat([data_vaccine, data_iso])
rate = np.random.uniform(low=0.0, high=100, size=(len(data_vaccine),)).round(2)
data_vaccine['rate'] = rate
if vaccine == 'DPT':
data_vaccine['wealth_quintile'] = pd.qcut(rate, 5, labels=False)+1
data_vaccine['vaccine_type'] = vaccine
data_1 = pd.concat([data_1, data_vaccine])
data_1 = data_1[['geometry', 'iso_1', 'province', 'vaccine_type', 'rate', 'wealth_quintile', 'month', 'quarter', 'year']]
data_1[data_1['vaccine_type'] == 'DPT'].iloc[:4]
data_1[data_1['vaccine_type'] == 'MMR'].iloc[:12]
year = 2016
province = 'Baringo'
dpt = data_1[(data_1['vaccine_type'] == 'DPT') & (data_1['year'] == year)].copy()
result = dpt.groupby(['year', 'quarter', 'wealth_quintile']).size().reset_index(name='counts')
values = []
for i in result['wealth_quintile'].unique():
values.append(list(result[result['wealth_quintile'] == i]['counts']))
mmr = data_1[(data_1['vaccine_type'] == 'MMR') & (data_1['year'] == year) & (data_1['province'] == province)].copy()
# Plot the simple time series
my_ts = plt.figure()
my_ts.set_size_inches(10,5) # Specify the output size
ax1 = my_ts.add_subplot(211) # Add an axis frame object to the plot (i.e. a pannel)
ax2 = my_ts.add_subplot(212)
categories = ['Quintile 1','Quintile 2','Quintile 3','Quintile 4','Quintile 5']
locations = ['Quarter 1', 'Quarter 2', 'Quarter 3', 'Quarter 4'] # the x locations for the groups
width = 0.35 # the width of the bars
for i in range(len(values)):
if i == 0:
ax2.bar(locations, values[i], width, label=categories[i], edgecolor ='k')
past_values = np.array(values[i])
else:
ax2.bar(locations, values[i], width, bottom=past_values, label=categories[i], edgecolor ='k')
past_values = np.array(values[i]) + past_values
ax2.set_title('DPT wealth quintile distribution in Kenya'+' through '+str(year))
ax2.set_ylabel('counts')
ax2.legend()
ax1.plot(mmr['month'], mmr['rate'])
ax1.set_title('MMR rates in '+ province+' through '+str(year))
ax1.set_xlabel('months')
ax1.set_ylabel('Rate [%]')
ax1.legend()
plt.tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None)
my_ts.savefig('DPY_MMR.png',dpi=300)
mmr = data_1[data_1['vaccine_type'] == 'MMR'].copy()
months = np.arange(12)+1
quarters = np.repeat(np.arange(4)+1, 3)
dic = dict(zip(months, quarters))
mmr['quarter'] = mmr['month'].apply(lambda x: dic[x])
mmr['quarter_rate'] = mmr.groupby(['quarter', 'year'])['rate'].transform(np.mean)
mmr['quarter_rate_error'] = mmr.groupby(['quarter', 'year'])['rate'].transform(np.std)
mmr['year_rate'] = mmr.groupby(['year'])['rate'].transform(np.mean)
mmr['year_rate_error'] = mmr.groupby(['year'])['rate'].transform(np.std)
mmr.iloc[:12]
dpt = data_1[data_1['vaccine_type'] == 'DPT'].copy()
dpt['year_rate'] = dpt.groupby(['year'])['rate'].transform(np.mean)
dpt['year_rate_error'] = dpt.groupby(['year'])['rate'].transform(np.std)
dpt.iloc[:4]
dpt_compare = dpt[(dpt['province'] == 'Nairobi') & (dpt['year'] > 2015)].copy()
mmr_compare = mmr[(mmr['province'] == 'Nairobi') & (mmr['year'] > 2015)].copy()
mmr_compare = mmr_compare.groupby(['quarter', 'year'], as_index=False).first().sort_values('year')
dpt_compare['error'] = np.nan
dpt_result = dpt_compare[['province', 'vaccine_type', 'rate', 'error', 'quarter', 'year']].copy()
dpt_result
dpt_yearly = dpt_compare.groupby(['year'], as_index=False).first()
dpt_yearly[['province', 'vaccine_type', 'year_rate', 'year_rate_error', 'year']].rename(columns={'year_rate': 'rate', 'year_rate_error': 'error'})
mmr_result = mmr_compare[['province', 'vaccine_type', 'quarter_rate', 'quarter_rate_error', 'quarter', 'year']].rename(columns={'quarter_rate': 'rate', 'quarter_rate_error': 'error'})
mmr_result
mmr_yearly = mmr_compare.groupby(['year'], as_index=False).first()
mmr_yearly[['province', 'vaccine_type', 'year_rate', 'year_rate_error', 'year']].rename(columns={'year_rate': 'rate', 'year_rate_error': 'error'})
lthick=2.0
xlabs = []
for n, year in enumerate(mmr_result.year.unique()):
for i in range(4):
xlabs.append(f"Q{i+1} {year}")
fig2 = plt.figure()
fig2.set_size_inches(20, 5)
ax1 = fig2.add_subplot(121)
ax1.plot(mmr_result.year+(mmr_result.quarter*3-1.5)/12, mmr_result.rate, 'r-', lw=lthick, label='MMR')
ax1.fill_between(mmr_result.year+(mmr_result.quarter*3-1.5)/12, mmr_result.rate-mmr_result.error, mmr_result.rate+mmr_result.error,
color='red', linewidth=0.1, alpha=0.3)
ax1.plot(dpt_result.year+(dpt_result.quarter*3-1.5)/12, dpt_result.rate, 'b-', lw=lthick, label='DPT')
ax1.set_ylabel('Rate [%]')
ax1.legend()
ax1.set_xticklabels(xlabs)
fig2.savefig('compare.png',dpi=300, bbox_inches='tight')
data = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data.columns = map(str.lower, data.columns)
data = data[['gid_1', 'name_1']]
data.rename(columns={'gid_1': 'iso_1', 'name_1': 'province'}, inplace= True)
data = pd.concat([data,pd.DataFrame(columns=['indicator_type', 'value', 'age_group', 'sex', 'year', 'attribute_singly'])], sort=True)
indicators = ['stunting', 'wasting']
data_2 = pd.DataFrame(columns=list(data.columns))
years = np.arange(2000, 2019, 1)
sex = ['male', 'female']
age_group = ['0 โ 4 years old', '5 โ 9 years old', '10 โ 14 years old', '15 โ 18 years old']
for indicator in indicators:
data_indicator = pd.DataFrame(columns=list(data.columns))
for n, iso in enumerate(data['iso_1'].unique()):
data_iso = data.iloc[n:n+1]
nrows = len(years)*len(sex)*len(age_group)-1
data_iso = data_iso.append([data_iso]*nrows,ignore_index=True, sort=True)
data_iso['sex'] = np.tile(np.concatenate([np.repeat('male', len(age_group)), np.repeat('female', len(age_group))]), len(years))
data_iso['age_group'] = np.tile(age_group, len(years)*len(sex))
data_iso['year'] = np.repeat(years, len(sex)*len(age_group))
data_indicator = pd.concat([data_indicator, data_iso])
value = np.random.uniform(low=0.0, high=100, size=(len(data_indicator),)).round(2)
data_indicator['value'] = value
#data_indicator['sex'] = np.random.choice(sex, len(data_indicator))
data_indicator['indicator_type'] = indicator
data_2 = pd.concat([data_2, data_indicator])
data_2['attribute_singly'] = np.core.defchararray.add(np.repeat('metadata ', len(data_2)), np.arange(len(data_2)).astype('str'))
data_2 = data_2[['iso_1', 'province', 'indicator_type', 'value', 'age_group', 'sex', 'year', 'attribute_singly']]
data_2.head(8)
data_3 = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data_3.columns = map(str.lower, data_3.columns)
data_3 = data_3[['gid_1', 'geometry']]
data_3.rename(columns={'gid_1': 'iso_1'}, inplace= True)
data_3['attribute_aggregate'] = np.core.defchararray.add(np.repeat('metadata ', len(data_3)), data_3['iso_1'].unique())
data_3.head()
pd.merge(data_2, data_3, how='left', on='iso_1').head(4)
data = gpd.read_file('./gadm36_KEN_shp/gadm36_KEN_1.shp')
data.columns = map(str.lower, data.columns)
data = data[['gid_1', 'name_1']]
data.rename(columns={'gid_1': 'iso_1', 'name_1': 'province'}, inplace= True)
data = pd.concat([data,pd.DataFrame(columns=['indicator', 'rate', 'grade_level', 'sex', 'year'])], sort=True)
indicators = ['enrollment', 'attainment', 'dropout']
data_4 = pd.DataFrame(columns=list(data.columns))
years = np.arange(2000, 2019, 1)
sex = ['male', 'female']
grade_level = ['Early childhood education', 'Primary education', 'Secondary education']
for indicator in indicators:
data_indicator = pd.DataFrame(columns=list(data.columns))
for n, iso in enumerate(data['iso_1'].unique()):
data_iso = data.iloc[n:n+1]
nrows = len(years)*len(sex)*len(grade_level)-1
data_iso = data_iso.append([data_iso]*nrows,ignore_index=True, sort=True)
data_iso['sex'] = np.tile(np.concatenate([np.repeat('male', len(grade_level)), np.repeat('female', len(grade_level))]), len(years))
data_iso['grade_level'] = np.tile(grade_level, len(years)*len(sex))
data_iso['year'] = np.repeat(years, len(sex)*len(grade_level))
data_indicator = pd.concat([data_indicator, data_iso])
value = np.random.uniform(low=0.0, high=100, size=(len(data_indicator),)).round(2)
data_indicator['rate'] = value
#data_indicator['sex'] = np.random.choice(sex, len(data_indicator))
data_indicator['indicator'] = indicator
data_4 = pd.concat([data_4, data_indicator])
data_4 = data_4[['iso_1', 'province', 'indicator', 'rate', 'grade_level', 'sex', 'year']]
data_4.head(6)
| 0.357343 | 0.948775 |
# T003 ยท Molecular filtering: unwanted substructures
**Note:** This talktorial is a part of TeachOpenCADD, a platform that aims to teach domain-specific skills and to provide pipeline templates as starting points for research projects.
Authors:
- Maximilian Driller, CADD seminar, 2017, Charitรฉ/FU Berlin
- Sandra Krรผger, CADD seminar, 2018, Charitรฉ/FU Berlin
__Talktorial T003__: This talktorial is part of the TeachOpenCADD pipeline described in the first TeachOpenCADD publication ([_J. Cheminform._ (2019), **11**, 1-7](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0351-x)), comprising of talktorials T001-T010.
## Aim of this talktorial
There are some substructures we prefer not to include into our screening library. In this talktorial, we learn about different types of such unwanted substructures and how to find, highlight and remove them with RDKit.
### Contents in Theory
* Unwanted substructures
* Pan Assay Interference Compounds (PAINS)
### Contents in Practical
* Load and visualize data
* Filter for PAINS
* Filter for unwanted substructures
* Highlight substructures
* Substructure statistics
### References
* Pan Assay Interference compounds ([wikipedia](https://en.wikipedia.org/wiki/Pan-assay_interference_compounds), [_J. Med. Chem._ (2010), **53**, 2719-2740](https://pubs.acs.org/doi/abs/10.1021/jm901137j))
* Unwanted substructures according to Brenk *et al.* ([_Chem. Med. Chem._ (2008), **3**, 435-44](https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139))
* Inspired by a Teach-Discover-Treat tutorial ([repository](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb))
* RDKit ([repository](https://github.com/rdkit/rdkit), [documentation](https://www.rdkit.org/docs/index.html))
## Theory
### Unwanted substructures
Substructures can be unfavorable, e.g., because they are toxic or reactive, due to unfavorable pharmacokinetic properties, or because they likely interfere with certain assays.
Nowadays, drug discovery campaigns often involve [high throughput screening](https://en.wikipedia.org/wiki/High-throughput_screening). Filtering unwanted substructures can support assembling more efficient screening libraries, which can save time and resources.
Brenk *et al.* ([_Chem. Med. Chem._ (2008), **3**, 435-44](https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139)) have assembled a list of unfavorable substructures to filter their libraries used to screen for compounds to treat neglected diseases. Examples of such unwanted features are nitro groups (mutagenic), sulfates and phosphates (likely resulting in unfavorable pharmacokinetic properties), 2-halopyridines and thiols (reactive). This list of undesired substructures was published in the above mentioned paper and will be used in the practical part of this talktorial.
### Pan Assay Interference Compounds (PAINS)
[PAINS](https://en.wikipedia.org/wiki/Pan-assay_interference_compounds) are compounds that often occur as hits in HTS even though they actually are false positives. PAINS show activity at numerous targets rather than one specific target. Such behavior results from unspecific binding or interaction with assay components. Baell *et al.* ([_J. Med. Chem._ (2010), **53**, 2719-2740](https://pubs.acs.org/doi/abs/10.1021/jm901137j)) focused on substructures interfering in assay signaling. They described substructures which can help to identify such PAINS and provided a list which can be used for substructure filtering.

Figure 1: Specific and unspecific binding in the context of PAINS. Figure taken from [Wikipedia](https://commons.wikimedia.org/wiki/File:PAINS_Figure.tif).
## Practical
### Load and visualize data
First, we import the required libraries, load our filtered dataset from **Talktorial T002** and draw the first molecules.
```
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
from rdkit import Chem
from rdkit.Chem import PandasTools
from rdkit.Chem.FilterCatalog import FilterCatalog, FilterCatalogParams
# define paths
HERE = Path(_dh[-1])
DATA = HERE / "data"
# load data from Talktorial T2
egfr_data = pd.read_csv(
HERE / "../T002_compound_adme/data/EGFR_compounds_lipinski.csv",
index_col=0,
)
# Drop unnecessary information
print("Dataframe shape:", egfr_data.shape)
egfr_data.drop(columns=["molecular_weight", "n_hbd", "n_hba", "logp"], inplace=True)
egfr_data.head()
# Add molecule column
PandasTools.AddMoleculeColumnToFrame(egfr_data, smilesCol="smiles")
# Draw first 3 molecules
Chem.Draw.MolsToGridImage(
list(egfr_data.head(3).ROMol),
legends=list(egfr_data.head(3).molecule_chembl_id),
)
```
### Filter for PAINS
The PAINS filter is already implemented in RDKit ([documentation](http://rdkit.org/docs/source/rdkit.Chem.rdfiltercatalog.html)). Such pre-defined filters can be applied via the `FilterCatalog` class. Let's learn how it can be used.
```
# initialize filter
params = FilterCatalogParams()
params.AddCatalog(FilterCatalogParams.FilterCatalogs.PAINS)
catalog = FilterCatalog(params)
# search for PAINS
matches = []
clean = []
for index, row in tqdm(egfr_data.iterrows(), total=egfr_data.shape[0]):
molecule = Chem.MolFromSmiles(row.smiles)
entry = catalog.GetFirstMatch(molecule) # Get the first matching PAINS
if entry is not None:
# store PAINS information
matches.append(
{
"chembl_id": row.molecule_chembl_id,
"rdkit_molecule": molecule,
"pains": entry.GetDescription().capitalize(),
}
)
else:
# collect indices of molecules without PAINS
clean.append(index)
matches = pd.DataFrame(matches)
egfr_data = egfr_data.loc[clean] # keep molecules without PAINS
# NBVAL_CHECK_OUTPUT
print(f"Number of compounds with PAINS: {len(matches)}")
print(f"Number of compounds without PAINS: {len(egfr_data)}")
```
Let's have a look at the first 3 identified PAINS.
```
Chem.Draw.MolsToGridImage(
list(matches.head(3).rdkit_molecule),
legends=list(matches.head(3)["pains"]),
)
```
### Filter and highlight unwanted substructures
Some lists of unwanted substructures, like PAINS, are already implemented in RDKit. However, it is also possible to use an external list and get the substructure matches manually.
Here, we use the list provided in the supporting information from Brenk *et al.* ([_Chem. Med. Chem._ (2008), **3**, 535-44](https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139)).
```
substructures = pd.read_csv(DATA / "unwanted_substructures.csv", sep="\s+")
substructures["rdkit_molecule"] = substructures.smarts.apply(Chem.MolFromSmarts)
print("Number of unwanted substructures in collection:", len(substructures))
# NBVAL_CHECK_OUTPUT
```
Let's have a look at a few substructures.
```
Chem.Draw.MolsToGridImage(
mols=substructures.rdkit_molecule.tolist()[2:5],
legends=substructures.name.tolist()[2:5],
)
```
Search our filtered dataframe for matches with these unwanted substructures.
```
# search for unwanted substructure
matches = []
clean = []
for index, row in tqdm(egfr_data.iterrows(), total=egfr_data.shape[0]):
molecule = Chem.MolFromSmiles(row.smiles)
match = False
for _, substructure in substructures.iterrows():
if molecule.HasSubstructMatch(substructure.rdkit_molecule):
matches.append(
{
"chembl_id": row.molecule_chembl_id,
"rdkit_molecule": molecule,
"substructure": substructure.rdkit_molecule,
"substructure_name": substructure["name"],
}
)
match = True
if not match:
clean.append(index)
matches = pd.DataFrame(matches)
egfr_data = egfr_data.loc[clean]
# NBVAL_CHECK_OUTPUT
print(f"Number of found unwanted substructure: {len(matches)}")
print(f"Number of compounds without unwanted substructure: {len(egfr_data)}")
```
### Highlight substructures
Let's have a look at the first 3 identified unwanted substructures. Since we have access to the underlying SMARTS patterns we can highlight the substructures within the RDKit molecules.
```
to_highlight = [
row.rdkit_molecule.GetSubstructMatch(row.substructure) for _, row in matches.head(3).iterrows()
]
Chem.Draw.MolsToGridImage(
list(matches.head(3).rdkit_molecule),
highlightAtomLists=to_highlight,
legends=list(matches.head(3).substructure_name),
)
```
### Substructure statistics
Finally, we want to find the most frequent substructure found in our data set. The Pandas `DataFrame` provides convenient methods to group containing data and to retrieve group sizes.
```
# NBVAL_CHECK_OUTPUT
groups = matches.groupby("substructure_name")
group_frequencies = groups.size()
group_frequencies.sort_values(ascending=False, inplace=True)
group_frequencies.head(10)
```
## Discussion
In this talktorial we learned two possibilities to perform a search for unwanted substructures with RDKit:
* The `FilterCatalog` class can be used to search for predefined collections of substructures, e.g., PAINS.
* The `HasSubstructMatch()` function to perform manual substructure searches.
Actually, PAINS filtering could also be implemented via manual substructure searches with `HasSubstructMatch()`. Furthermore, the substructures defined by Brenk *et al.* ([_Chem. Med. Chem._ (2008), **3**, 535-44](https://onlinelibrary.wiley.com/doi/full/10.1002/cmdc.200700139)) are already implemented as a `FilterCatalog`. Additional pre-defined collections can be found in the RDKit [documentation](http://rdkit.org/docs/source/rdkit.Chem.rdfiltercatalog.html).
So far, we have been using the `HasSubstructMatch()` function, which only yields one match per compound. With the `GetSubstructMatches()` function ([documentation](https://www.rdkit.org/docs/source/rdkit.Chem.rdchem.html)) we have the opportunity to identify all occurrences of a particular substructure in a compound.
In case of PAINS, we have only looked at the first match per molecule (`GetFirstMatch()`). If we simply want to filter out all PAINS this is enough. However, we could also use `GetMatches()` in order to see all critical substructures of a molecule.
Detected substructures can be handled in two different fashions:
* Either, the substructure search is applied as a filter and the compounds are excluded from further testing to save time and money.
* Or, they can be used as warnings, since ~5 % of FDA-approved drugs were found to contain PAINS ([_ACS. Chem. Biol._ (2018), **13**, 36-44](https://pubs.acs.org/doi/10.1021/acschembio.7b00903)). In this case experts can judge manually, if an identified substructure is critical or not.
## Quiz
* Why should we consider removing "PAINS" from a screening library? What is the issue with these compounds?
* Can you find situations when some unwanted substructures would not need to be removed?
* How are the substructures we used in this tutorial encoded?
|
github_jupyter
|
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
from rdkit import Chem
from rdkit.Chem import PandasTools
from rdkit.Chem.FilterCatalog import FilterCatalog, FilterCatalogParams
# define paths
HERE = Path(_dh[-1])
DATA = HERE / "data"
# load data from Talktorial T2
egfr_data = pd.read_csv(
HERE / "../T002_compound_adme/data/EGFR_compounds_lipinski.csv",
index_col=0,
)
# Drop unnecessary information
print("Dataframe shape:", egfr_data.shape)
egfr_data.drop(columns=["molecular_weight", "n_hbd", "n_hba", "logp"], inplace=True)
egfr_data.head()
# Add molecule column
PandasTools.AddMoleculeColumnToFrame(egfr_data, smilesCol="smiles")
# Draw first 3 molecules
Chem.Draw.MolsToGridImage(
list(egfr_data.head(3).ROMol),
legends=list(egfr_data.head(3).molecule_chembl_id),
)
# initialize filter
params = FilterCatalogParams()
params.AddCatalog(FilterCatalogParams.FilterCatalogs.PAINS)
catalog = FilterCatalog(params)
# search for PAINS
matches = []
clean = []
for index, row in tqdm(egfr_data.iterrows(), total=egfr_data.shape[0]):
molecule = Chem.MolFromSmiles(row.smiles)
entry = catalog.GetFirstMatch(molecule) # Get the first matching PAINS
if entry is not None:
# store PAINS information
matches.append(
{
"chembl_id": row.molecule_chembl_id,
"rdkit_molecule": molecule,
"pains": entry.GetDescription().capitalize(),
}
)
else:
# collect indices of molecules without PAINS
clean.append(index)
matches = pd.DataFrame(matches)
egfr_data = egfr_data.loc[clean] # keep molecules without PAINS
# NBVAL_CHECK_OUTPUT
print(f"Number of compounds with PAINS: {len(matches)}")
print(f"Number of compounds without PAINS: {len(egfr_data)}")
Chem.Draw.MolsToGridImage(
list(matches.head(3).rdkit_molecule),
legends=list(matches.head(3)["pains"]),
)
substructures = pd.read_csv(DATA / "unwanted_substructures.csv", sep="\s+")
substructures["rdkit_molecule"] = substructures.smarts.apply(Chem.MolFromSmarts)
print("Number of unwanted substructures in collection:", len(substructures))
# NBVAL_CHECK_OUTPUT
Chem.Draw.MolsToGridImage(
mols=substructures.rdkit_molecule.tolist()[2:5],
legends=substructures.name.tolist()[2:5],
)
# search for unwanted substructure
matches = []
clean = []
for index, row in tqdm(egfr_data.iterrows(), total=egfr_data.shape[0]):
molecule = Chem.MolFromSmiles(row.smiles)
match = False
for _, substructure in substructures.iterrows():
if molecule.HasSubstructMatch(substructure.rdkit_molecule):
matches.append(
{
"chembl_id": row.molecule_chembl_id,
"rdkit_molecule": molecule,
"substructure": substructure.rdkit_molecule,
"substructure_name": substructure["name"],
}
)
match = True
if not match:
clean.append(index)
matches = pd.DataFrame(matches)
egfr_data = egfr_data.loc[clean]
# NBVAL_CHECK_OUTPUT
print(f"Number of found unwanted substructure: {len(matches)}")
print(f"Number of compounds without unwanted substructure: {len(egfr_data)}")
to_highlight = [
row.rdkit_molecule.GetSubstructMatch(row.substructure) for _, row in matches.head(3).iterrows()
]
Chem.Draw.MolsToGridImage(
list(matches.head(3).rdkit_molecule),
highlightAtomLists=to_highlight,
legends=list(matches.head(3).substructure_name),
)
# NBVAL_CHECK_OUTPUT
groups = matches.groupby("substructure_name")
group_frequencies = groups.size()
group_frequencies.sort_values(ascending=False, inplace=True)
group_frequencies.head(10)
| 0.499268 | 0.942082 |
<a href="https://colab.research.google.com/github/michalwilk123/huggingface-bert-finetune-example-pl/blob/master/ProjektSI_modele_2021.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Tworzenie nowego modelu BERT za pomocฤ
techniki fine-tune
```
! pip install transformers datasets torch &> /dev/null
from datasets import load_dataset, Dataset, DatasetDict
```
# Zbiรณr danych
```
dataset = load_dataset("imdb")
```
## Rozdzielanie danych na dwa spolaryzowane podzbiory
```
def split_polarity_based(dataset) -> tuple:
"""
Splitting dataset basing on Label. The expected output of this function
is tuple of two datasets: negative and positive (0 = neg, 1 = pos)
The inner structure of datasets is preserved
"""
negative, positive = DatasetDict(), DatasetDict()
for k in dataset:
if k == 'unsupervised':
continue
negative[k] = dataset[k].filter(lambda dat: dat['label'] == 0)
positive[k] = dataset[k].filter(lambda dat: dat['label'] == 1)
return negative, positive
neg_dataset, pos_dataset = split_polarity_based(dataset)
```
Deklarujemy nasz tokenizer oraz model: DistilBERT, ktรณry jest 'okrojonฤ
' wersjฤ
berta, ktรณra dla naszych celรณw powinna w zupeลnoลci wystarczyฤ.
Oprรณcz tego musimy w jakiล sposรณb przetrenowaฤ nasz model i wypeลniฤ testowe dane 'dziurami', ktรณre nasz model powinien wypeลniฤ. Wy tym celu deklarujemy
narzฤdzie o nazwie __DataCollatorForLanguageModeling__, ktรณre z prawdopodobieลstwem o wartoลci __mlm_probability__ zamaskuje nam wyrazy w zdaniach.
Podana tutaj wartoลฤ prawdopodobieลstwa jest zgodna z tฤ
sugerowanฤ
w [pracy naukowej](https://arxiv.org/abs/1810.04805):
```
from transformers import (AutoTokenizer,
AutoModelForMaskedLM,
DataCollatorForLanguageModeling)
import torch
MODEL_NAME = "distilbert-base-uncased"
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME, use_fast=True
)
model = AutoModelForMaskedLM.from_pretrained(MODEL_NAME).to(device)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm_probability=0.15
)
```
Teraz musimy zmienic tekst typu: "Ala ma kota", na jego reprezentacje
za pomocฤ
tokenรณw, czyli np: 1, 10, 8.
Jako ลผe zadaniem naszych modeli __nie__ jest klasyfikacja (Sentiment analysis), tylko wypeลnianie pustych miejsc w zdaniach, to pola 'label' okreลlajฤ
ce nastrรณj (polaryzacje) nie sฤ
juลผ nam wiฤcej potrzebne
```
tokenized_negative = neg_dataset.map(
lambda dat:tokenizer(dat['text'], truncation=True),
batched=True, num_proc=4, remove_columns=["text", "label"]
)
tokenized_positive = pos_dataset.map(
lambda dat:tokenizer(dat['text'], truncation=True),
batched=True, num_proc=4, remove_columns=["text", "label"]
)
```
# Trenowanie modeli
Kolejnym krokiem jest faktyczne trenowanie modelu na naszych danych!
Korzystamy w tym celu z narzฤdzia o nazwie __Trainer__
```
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
"modele-negatywne",
evaluation_strategy = "epoch",
learning_rate=2e-5,
weight_decay=0.01,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_negative['train'],
eval_dataset=tokenized_negative["test"],
data_collator=data_collator,
)
# Odkomentuj poniลผszฤ
linijkฤ aby wykonaฤ 'fine-tune' na podanych danych
# Estymowany czas treningu i ewaluacji modelu: ~2h
# trainer.train()
# Odkomentuj poniลผsze linijki aby zapisaฤ przetrenowany model na
# lokalnym folderze
# model.save_pretrained("distilbert-imdb-negative")
# tokenizer.save_pretrained("distilbert-imdb-negative")
training_args = TrainingArguments(
"modele-pozytywne",
evaluation_strategy = "epoch",
learning_rate=2e-5,
weight_decay=0.01,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_positive['train'],
eval_dataset=tokenized_positive["test"],
data_collator=data_collator,
)
# Odkomentuj poniลผszฤ
linijkฤ aby wykonaฤ 'fine-tune' na podanych danych
# Estymowany czas treningu i ewaluacji modelu: ~2h
# trainer.train()
# Odkomentuj poniลผsze linijki aby zapisaฤ przetrenowany model na
# lokalnym folderze
# model.save_pretrained("distilbert-imdb-positive")
# tokenizer.save_pretrained("distilbert-imdb-positive")
```
# Pobieranie stworzonego negatywnego modelu z platformy huggingface
* [Udostฤpniony model pozytywny](https://huggingface.co/michalwilk123/distilbert-imdb-positive)
* [Udostฤpniony model negatywny](https://huggingface.co/michalwilk123/distilbert-imdb-negative)
Aby za kaลผdym razem nie musieฤ pobieraฤ ani martwiฤ siฤ o lokalizacje modelu,
udostฤpniลem go na platformie huggingface.
Poniลผej pobieram udostฤpniony model, przez co powyลผsze trenowanie nie jest juลผ nam wiฤcej potrzebne
```
! pip install transformers datasets torch &> /dev/null
from transformers import AutoTokenizer, AutoModelForMaskedLM
import torch
# okreลlamy na jakim urzฤ
dzeniu bฤdzie oodtwarzany model:
# * cpu: na procesorze
# * cuda: na kartach graficznych z architekturฤ
nVidia
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tokenizer = AutoTokenizer.from_pretrained("michalwilk123/distilbert-imdb-negative", use_fast=True)
model = AutoModelForMaskedLM.from_pretrained("michalwilk123/distilbert-imdb-negative").to(device)
```
# Testowanie negatywnego modelu
```
text = "This movie is " + tokenizer.mask_token + "!" # nasze zdanie ktรณre chcemy testowaฤ
inputs = tokenizer(text, return_tensors = "pt").to(device) # tworzymy listฤ tokenรณw ze zdania
# zapamiฤtujemy lokacje zamaskowanego wyrazu (tokenizer moลผe dodawaฤ dodatkowe specjalne tokeny)
mask_index = torch.where(inputs["input_ids"][0] == tokenizer.mask_token_id)
outputs = model(**inputs) # przerzucamy nasze zdanie przez model
mask_word = outputs.logits[0, mask_index, :]
top_10 = torch.topk(mask_word, 10, dim = 1)[1][0]
# wyลwietlamy 10 najbardziej prawdopodobnych wyrazรณw
for token in top_10:
word = tokenizer.decode(token)
new_sentence = text.replace(tokenizer.mask_token, word)
print(new_sentence)
```
# Pobieranie stworzonego pozytywnego modelu z platformy huggingface
```
from transformers import AutoTokenizer, AutoModelForMaskedLM
import torch
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tokenizer = AutoTokenizer.from_pretrained("michalwilk123/distilbert-imdb-positive", use_fast=True)
model = AutoModelForMaskedLM.from_pretrained("michalwilk123/distilbert-imdb-positive").to(device)
```
# Testowanie pozytywnego modelu
Korzystamy z modelu w nastฤpujฤ
cy sposรณb:
Podajemy nasz stokenizowany tekst do funkcji forward przetrenowanego modelu.
Jako rezultat tej funkcji otrzymujemy tensor(czyli jakby listฤ) zawierajฤ
cej
wagi prawdopodobieลstw wszystkich sลรณw z naszego sลownika.
Tak wiฤc podajฤ
c na samym poczฤ
tku sลownik z N sลowami otrzymamy tensor o wymiarach N x 1.
Ich wartoลฤ odzwierciedla __prawdopodobieลstwo__ wyboru danego sลowa w danym
miejscu. Jednak nie jest to nasza tradycyjna definicja prawdopodobieลstwa,
poniewaลผ moลผe przyjmowaฤ wartoลci spoza przedziaลu [0, 1].
```
text = "This movie is " + tokenizer.mask_token + "!"
inputs = tokenizer(text, return_tensors = "pt").to(device)
mask_index = torch.where(inputs["input_ids"][0] == tokenizer.mask_token_id)
outputs = model(**inputs)
mask_word = outputs.logits[0, mask_index, :]
top_10 = torch.topk(mask_word, 10, dim = 1)[1][0]
for token in top_10:
word = tokenizer.decode([token])
new_sentence = text.replace(tokenizer.mask_token, word)
print(new_sentence)
```
# Pobieranie i testowanie modelu klasyfikatora
Tutaj warto by przerobiฤ nasz rezultat na naszฤ
tradycyjnฤ
definicje prawdopodobieลstwa.
Model zwraca tensor wag generowanych przez funkcje logitowฤ
. Aby zmieniฤ wartoลฤ funkcji logitowej na nasze tradycyjne prawdopodobieลstwo od 0 do 1 wrzucamy nasze wartoลci do funkcji __softmax__.
```
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizer = AutoTokenizer.from_pretrained("textattack/bert-base-uncased-imdb")
model = AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-imdb").to(device)
from torch.nn import functional as F
negative_sen = "This movie is awful!"
positive_sen = "This movie is awesome!"
outputs_n = model(**tokenizer(negative_sen, return_tensors = "pt").to(device))
nch = F.softmax(outputs_n['logits'], dim=-1).tolist()
outputs_p = model(**tokenizer(positive_sen, return_tensors = "pt").to(device))
pch = F.softmax(outputs_p['logits'], dim=-1).tolist()
print(
"NO | N | P\n",
"1 | %.2f | %.2f\n" % (round(nch[0][0], 2), round(nch[0][1], 2)),
"2 | %.2f | %.2f\n" % (round(pch[0][0], 2), round(pch[0][1], 2)),
)
```
|
github_jupyter
|
! pip install transformers datasets torch &> /dev/null
from datasets import load_dataset, Dataset, DatasetDict
dataset = load_dataset("imdb")
def split_polarity_based(dataset) -> tuple:
"""
Splitting dataset basing on Label. The expected output of this function
is tuple of two datasets: negative and positive (0 = neg, 1 = pos)
The inner structure of datasets is preserved
"""
negative, positive = DatasetDict(), DatasetDict()
for k in dataset:
if k == 'unsupervised':
continue
negative[k] = dataset[k].filter(lambda dat: dat['label'] == 0)
positive[k] = dataset[k].filter(lambda dat: dat['label'] == 1)
return negative, positive
neg_dataset, pos_dataset = split_polarity_based(dataset)
from transformers import (AutoTokenizer,
AutoModelForMaskedLM,
DataCollatorForLanguageModeling)
import torch
MODEL_NAME = "distilbert-base-uncased"
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME, use_fast=True
)
model = AutoModelForMaskedLM.from_pretrained(MODEL_NAME).to(device)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm_probability=0.15
)
tokenized_negative = neg_dataset.map(
lambda dat:tokenizer(dat['text'], truncation=True),
batched=True, num_proc=4, remove_columns=["text", "label"]
)
tokenized_positive = pos_dataset.map(
lambda dat:tokenizer(dat['text'], truncation=True),
batched=True, num_proc=4, remove_columns=["text", "label"]
)
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
"modele-negatywne",
evaluation_strategy = "epoch",
learning_rate=2e-5,
weight_decay=0.01,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_negative['train'],
eval_dataset=tokenized_negative["test"],
data_collator=data_collator,
)
# Odkomentuj poniลผszฤ
linijkฤ aby wykonaฤ 'fine-tune' na podanych danych
# Estymowany czas treningu i ewaluacji modelu: ~2h
# trainer.train()
# Odkomentuj poniลผsze linijki aby zapisaฤ przetrenowany model na
# lokalnym folderze
# model.save_pretrained("distilbert-imdb-negative")
# tokenizer.save_pretrained("distilbert-imdb-negative")
training_args = TrainingArguments(
"modele-pozytywne",
evaluation_strategy = "epoch",
learning_rate=2e-5,
weight_decay=0.01,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_positive['train'],
eval_dataset=tokenized_positive["test"],
data_collator=data_collator,
)
# Odkomentuj poniลผszฤ
linijkฤ aby wykonaฤ 'fine-tune' na podanych danych
# Estymowany czas treningu i ewaluacji modelu: ~2h
# trainer.train()
# Odkomentuj poniลผsze linijki aby zapisaฤ przetrenowany model na
# lokalnym folderze
# model.save_pretrained("distilbert-imdb-positive")
# tokenizer.save_pretrained("distilbert-imdb-positive")
! pip install transformers datasets torch &> /dev/null
from transformers import AutoTokenizer, AutoModelForMaskedLM
import torch
# okreลlamy na jakim urzฤ
dzeniu bฤdzie oodtwarzany model:
# * cpu: na procesorze
# * cuda: na kartach graficznych z architekturฤ
nVidia
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tokenizer = AutoTokenizer.from_pretrained("michalwilk123/distilbert-imdb-negative", use_fast=True)
model = AutoModelForMaskedLM.from_pretrained("michalwilk123/distilbert-imdb-negative").to(device)
text = "This movie is " + tokenizer.mask_token + "!" # nasze zdanie ktรณre chcemy testowaฤ
inputs = tokenizer(text, return_tensors = "pt").to(device) # tworzymy listฤ tokenรณw ze zdania
# zapamiฤtujemy lokacje zamaskowanego wyrazu (tokenizer moลผe dodawaฤ dodatkowe specjalne tokeny)
mask_index = torch.where(inputs["input_ids"][0] == tokenizer.mask_token_id)
outputs = model(**inputs) # przerzucamy nasze zdanie przez model
mask_word = outputs.logits[0, mask_index, :]
top_10 = torch.topk(mask_word, 10, dim = 1)[1][0]
# wyลwietlamy 10 najbardziej prawdopodobnych wyrazรณw
for token in top_10:
word = tokenizer.decode(token)
new_sentence = text.replace(tokenizer.mask_token, word)
print(new_sentence)
from transformers import AutoTokenizer, AutoModelForMaskedLM
import torch
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tokenizer = AutoTokenizer.from_pretrained("michalwilk123/distilbert-imdb-positive", use_fast=True)
model = AutoModelForMaskedLM.from_pretrained("michalwilk123/distilbert-imdb-positive").to(device)
text = "This movie is " + tokenizer.mask_token + "!"
inputs = tokenizer(text, return_tensors = "pt").to(device)
mask_index = torch.where(inputs["input_ids"][0] == tokenizer.mask_token_id)
outputs = model(**inputs)
mask_word = outputs.logits[0, mask_index, :]
top_10 = torch.topk(mask_word, 10, dim = 1)[1][0]
for token in top_10:
word = tokenizer.decode([token])
new_sentence = text.replace(tokenizer.mask_token, word)
print(new_sentence)
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizer = AutoTokenizer.from_pretrained("textattack/bert-base-uncased-imdb")
model = AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-imdb").to(device)
from torch.nn import functional as F
negative_sen = "This movie is awful!"
positive_sen = "This movie is awesome!"
outputs_n = model(**tokenizer(negative_sen, return_tensors = "pt").to(device))
nch = F.softmax(outputs_n['logits'], dim=-1).tolist()
outputs_p = model(**tokenizer(positive_sen, return_tensors = "pt").to(device))
pch = F.softmax(outputs_p['logits'], dim=-1).tolist()
print(
"NO | N | P\n",
"1 | %.2f | %.2f\n" % (round(nch[0][0], 2), round(nch[0][1], 2)),
"2 | %.2f | %.2f\n" % (round(pch[0][0], 2), round(pch[0][1], 2)),
)
| 0.701406 | 0.91848 |
# Chi2_score() usage example
The purpose of this notebook: to provide alternative to SelectKBest().
The scikit-learn's method sklearn.SelectKBest(score_func=chi2) returns faulty results, when chi2 is used as the scoring parameter. as described in the bug #21455 available here: https://github.com/scikit-learn/scikit-learn/issues/21455 . I discovered this using sklearn's version 0.24.1, but as I understand the bug is still there in the latest edition of scikit-learn 1.0.1 released October 2021.
Until the fix is developed, developers may use the method chi2_util.chi2_score(), as demonstrated below. This method is a wrapper around scipy.stats.chi2_contingency(), which is an alternative implementation of chi-square test. Below I show how to use it.
## prepare the environment
```
#imports
import pandas as pd
# read in the sample data
df = pd.read_csv('sample300.csv')
# you don't need to do this. I below rename the data to remain in line with the story line
# https://ondata.blog/articles/dont-trust-data-science-ask-the-people/
# but you don't need to do this. Renaming of the features is not needed.
df = df.rename(columns = {'A': 'education',
'E': 'expertise',
'label': 'success'})
label = 'success'
# here's how the data looks
df.head()
```
## calculate chi2 score
In the result you get the complete dataframe of features sorted by ranks.
```
import chi2_util
# what are our categorical feature columns? In this case, all columns except label
cat_feature_cols = list(set(df.columns) - set([label]))
result = chi2_util.chi2_score(df,
features = cat_feature_cols,
target = label,
alpha = 0.05,
deep = True)
result
```
## How to use this result table
Here's a few examples what you can do.
```
# get the names of top 3 features
result.index[:3].tolist()
# get the chi2 scores for top 5 features
result['chi2'][:5]
# get the p-values for all features
result['p']
```
# And what happens if the chi2 conditions are not met?
For completeness of this demonstration, let's draw attention to the fact that chi2_score
implements the following chi2 condition, known from the theory: <b>at least 80% cells must have the expected count
5 or more</b>. The method aggregates the cells so that the condition is met.
As an experiment, let's see what would happen if this condition was not met?
By setting the parameter deep=False, we can disable the aggregation, and enforce calculation of the chi2 on all cells as they are.
The literature states that this may lead to unpredictable results.
Indeed, we can see the results below are different than above:
```
chi2_util.chi2_score( df,
features = cat_feature_cols,
target = label,
alpha = 0.05,
deep = False)
```
We can see that the feature B landed high. Why is this so? This is because it has many
categories with very small 'expected' and 'observed' counts (see below). They are not really meaningful,
because of small practical impact, however the when we explicitly disabled the aggregation
then chi2 computation took them into account. Hence the feature B got unjustly ranked high.
This demonstrates that strict implementation of the chi2 test conditions is important,
otherwise the results cannot be trusted.
```
df.pivot_table(values = 'D', columns = label, index = 'B', aggfunc = len).fillna(0)
```
# The versions
Should anything not work, this may be to do with the dependencies so compare your versions of the libraries to mine, below:
```
pd.__version__
import sklearn
sklearn.__version__
import scipy
scipy.__version__
from platform import python_version
python_version()
import sklearn; sklearn.show_versions()
```
|
github_jupyter
|
#imports
import pandas as pd
# read in the sample data
df = pd.read_csv('sample300.csv')
# you don't need to do this. I below rename the data to remain in line with the story line
# https://ondata.blog/articles/dont-trust-data-science-ask-the-people/
# but you don't need to do this. Renaming of the features is not needed.
df = df.rename(columns = {'A': 'education',
'E': 'expertise',
'label': 'success'})
label = 'success'
# here's how the data looks
df.head()
import chi2_util
# what are our categorical feature columns? In this case, all columns except label
cat_feature_cols = list(set(df.columns) - set([label]))
result = chi2_util.chi2_score(df,
features = cat_feature_cols,
target = label,
alpha = 0.05,
deep = True)
result
# get the names of top 3 features
result.index[:3].tolist()
# get the chi2 scores for top 5 features
result['chi2'][:5]
# get the p-values for all features
result['p']
chi2_util.chi2_score( df,
features = cat_feature_cols,
target = label,
alpha = 0.05,
deep = False)
df.pivot_table(values = 'D', columns = label, index = 'B', aggfunc = len).fillna(0)
pd.__version__
import sklearn
sklearn.__version__
import scipy
scipy.__version__
from platform import python_version
python_version()
import sklearn; sklearn.show_versions()
| 0.358578 | 0.930205 |
<a href="https://colab.research.google.com/gist/GEJ1/68a7525f6e38a074f1474db3e0f894d6/analisis_linkedin.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg"></a>
# **Analisis de datos de Tiktok**
LinkedIn nos permite descargar nuestros propios datos de la plataforma (ej: Texto de nuestros posteos, comentarios, mensajes, personas a las que seguimos o nos siguen, etc).
```
# Instalamos las librerias que vamos a necesitar pero no vienen pre instaladas en el entorno de Colab
!pip install wordcloud
! pip install nltk
# Importamos
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
# Para obtener la lista de "stopwords" y asi descartarlas
import nltk
from nltk.corpus import stopwords
#Generaciรณn de lista de signos de puntuaciรณn
import string
# Deszipeamos el archivo
# Cambiar 'nombre_del_archivo_que_les_dio_linkedin' por el nombre del archivo que subieron a Colab
# !unzip /content/nombre_del_archivo_que_les_dio_linkedin.zip
!unzip /content/Complete_LinkedInDataExport_12-03-2021.zip
from google.colab import files
files.upload()
df_shares = pd.read_csv('analisis_comments_tiktok.csv',encoding='utf-8')
df_shares
texto_de_publicaciones = df_shares['comment']
texto_de_publicaciones = [i for i in texto_de_publicaciones if type(i) == str]
# Obtengo la lista de stopwords (conectores, preposiciones, etc) en espanol gracias a nltk
nltk.download('stopwords')
stop_words = stopwords.words('spanish')
# Uso set para borrar repetidos
texto = [i for i in set(texto_de_publicaciones) if type(i) == str]
texto = ''.join(texto)
def limpiar_puntuacion_stopwords(texto):
"""
Funcion para limpiar el string
#Modificado de la siguiente fuente: https://antonio-fernandez-troyano.medium.com/nube-de-palabras-word-cloud-con-python-a-partir-de-varias-webs-111e94220822
Parameters
---------------
texto (str) -> Texto a limpiar
Returns
---------------
texto_limpio (str) -> Texto limpio luego de sacarle signos de puntuacion y stopwords
"""
puntuacion = []
for s in string.punctuation:
puntuacion.append(str(s))
sp_puntuacion = ["ยฟ", "ยก", "โ", "โ", "โฆ", ":", "โ", "ยป", "ยซ"]
puntuacion += sp_puntuacion
#Reemplazamos signos de puntuaciรณn por "":
for p in puntuacion:
texto_limpio = texto.lower().replace(p,"")
for p in puntuacion:
texto_limpio = texto_limpio.replace(p,"")
#Reemplazamos stop_words por "":
for stop in stop_words:
texto_limpio_lista = texto_limpio.split()
texto_limpio_lista = [i.strip() for i in texto_limpio_lista]
try:
while stop in texto_limpio_lista: texto_limpio_lista.remove(stop)
except:
print("Error")
pass
texto_limpio= " ".join(texto_limpio_lista)
return texto_limpio
# Limpiamos
clean_texto = limpiar_puntuacion_stopwords(texto)
# Hacemos el wordcloud
word_cloud = WordCloud(height=800, width=800, background_color='white',max_words=100, min_font_size=5).generate(clean_texto)
# word_cloud.to_file("./img/ejemplo_sencillo.png") #Guardamos la imagen generada
plt.figure(figsize=(10,8))
plt.imshow(word_cloud)
plt.axis('off')
plt.tight_layout(pad=0)
plt.show()
word_cloud.to_file('wordcloud.png')
!pip install -q plotly==4.2.1
```
|
github_jupyter
|
# Instalamos las librerias que vamos a necesitar pero no vienen pre instaladas en el entorno de Colab
!pip install wordcloud
! pip install nltk
# Importamos
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
# Para obtener la lista de "stopwords" y asi descartarlas
import nltk
from nltk.corpus import stopwords
#Generaciรณn de lista de signos de puntuaciรณn
import string
# Deszipeamos el archivo
# Cambiar 'nombre_del_archivo_que_les_dio_linkedin' por el nombre del archivo que subieron a Colab
# !unzip /content/nombre_del_archivo_que_les_dio_linkedin.zip
!unzip /content/Complete_LinkedInDataExport_12-03-2021.zip
from google.colab import files
files.upload()
df_shares = pd.read_csv('analisis_comments_tiktok.csv',encoding='utf-8')
df_shares
texto_de_publicaciones = df_shares['comment']
texto_de_publicaciones = [i for i in texto_de_publicaciones if type(i) == str]
# Obtengo la lista de stopwords (conectores, preposiciones, etc) en espanol gracias a nltk
nltk.download('stopwords')
stop_words = stopwords.words('spanish')
# Uso set para borrar repetidos
texto = [i for i in set(texto_de_publicaciones) if type(i) == str]
texto = ''.join(texto)
def limpiar_puntuacion_stopwords(texto):
"""
Funcion para limpiar el string
#Modificado de la siguiente fuente: https://antonio-fernandez-troyano.medium.com/nube-de-palabras-word-cloud-con-python-a-partir-de-varias-webs-111e94220822
Parameters
---------------
texto (str) -> Texto a limpiar
Returns
---------------
texto_limpio (str) -> Texto limpio luego de sacarle signos de puntuacion y stopwords
"""
puntuacion = []
for s in string.punctuation:
puntuacion.append(str(s))
sp_puntuacion = ["ยฟ", "ยก", "โ", "โ", "โฆ", ":", "โ", "ยป", "ยซ"]
puntuacion += sp_puntuacion
#Reemplazamos signos de puntuaciรณn por "":
for p in puntuacion:
texto_limpio = texto.lower().replace(p,"")
for p in puntuacion:
texto_limpio = texto_limpio.replace(p,"")
#Reemplazamos stop_words por "":
for stop in stop_words:
texto_limpio_lista = texto_limpio.split()
texto_limpio_lista = [i.strip() for i in texto_limpio_lista]
try:
while stop in texto_limpio_lista: texto_limpio_lista.remove(stop)
except:
print("Error")
pass
texto_limpio= " ".join(texto_limpio_lista)
return texto_limpio
# Limpiamos
clean_texto = limpiar_puntuacion_stopwords(texto)
# Hacemos el wordcloud
word_cloud = WordCloud(height=800, width=800, background_color='white',max_words=100, min_font_size=5).generate(clean_texto)
# word_cloud.to_file("./img/ejemplo_sencillo.png") #Guardamos la imagen generada
plt.figure(figsize=(10,8))
plt.imshow(word_cloud)
plt.axis('off')
plt.tight_layout(pad=0)
plt.show()
word_cloud.to_file('wordcloud.png')
!pip install -q plotly==4.2.1
| 0.359814 | 0.835282 |
```
import os
import json
import pickle
import random
from collections import defaultdict, Counter
from indra.literature.adeft_tools import universal_extract_text
from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id
from adeft.discover import AdeftMiner
from adeft.gui import ground_with_gui
from adeft.modeling.label import AdeftLabeler
from adeft.modeling.classify import AdeftClassifier
from adeft.disambiguate import AdeftDisambiguator
from adeft_indra.ground.ground import AdeftGrounder
from adeft_indra.model_building.s3 import model_to_s3
from adeft_indra.model_building.escape import escape_filename
from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \
get_plaintexts_for_pmids
adeft_grounder = AdeftGrounder()
shortforms = ['AF']
model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms))
results_path = os.path.abspath(os.path.join('../..', 'results', model_name))
miners = dict()
all_texts = {}
for shortform in shortforms:
pmids = get_pmids_for_agent_text(shortform)
if len(pmids) > 10000:
pmids = random.choices(pmids, k=10000)
text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms)
text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5}
miners[shortform] = AdeftMiner(shortform)
miners[shortform].process_texts(text_dict.values())
all_texts.update(text_dict)
longform_dict = {}
for shortform in shortforms:
longforms = miners[shortform].get_longforms()
longforms = [(longform, count, score) for longform, count, score in longforms
if count*score > 2]
longform_dict[shortform] = longforms
combined_longforms = Counter()
for longform_rows in longform_dict.values():
combined_longforms.update({longform: count for longform, count, score
in longform_rows})
grounding_map = {}
names = {}
for longform in combined_longforms:
groundings = adeft_grounder.ground(longform)
if groundings:
grounding = groundings[0]['grounding']
grounding_map[longform] = grounding
names[grounding] = groundings[0]['name']
longforms, counts = zip(*combined_longforms.most_common())
pos_labels = []
list(zip(longforms, counts))
grounding_map, names, pos_labels = ground_with_gui(longforms, counts,
grounding_map=grounding_map,
names=names, pos_labels=pos_labels, no_browser=True, port=8890)
result = [grounding_map, names, pos_labels]
result
from adeft.disambiguate import load_disambiguator
d = load_disambiguator('AF')
grounding_dict, names, pos_labels = d.grounding_dict, d.names, d.pos_labels
grounding_map = grounding_dict['AF']
result = [grounding_map, names, pos_labels]
result
grounding_map, names, pos_labels = [{'atrial fibrillation': 'MESH:D001281',
'annulus fibrosus': 'MESH:D000070616',
'amniotic fluid': 'MESH:D000653',
'aflatoxin': 'CHEBI:CHEBI:22271',
'auranofin': 'CHEBI:CHEBI:2922',
'autofluorescence': 'MESH:D061848',
'aminofluorene': 'PUBCHEM:22817',
'anulus fibrosus': 'MESH:D000070616',
'antiferromagnetic': 'antiferromagnetic',
'antisecretory factor': 'MESH:C049628',
'aspergillus fumigatus': 'MESH:D001232',
'aqueous fraction': 'ungrounded',
'arcuate fasciculus': 'ungrounded',
'aminoflavone': 'MESH:C413760',
'activity function': 'ungrounded',
'amentoflavone': 'CHEBI:CHEBI:2631',
'ascofuranone': 'MESH:C006640',
'activity fraction': 'ungrounded',
'adventitial fibroblasts': 'ungrounded',
'atrial fibrillation flutter': 'MESH:D001281',
'af': 'ungrounded',
'albiflorin': 'CHEBI:CHEBI:132793',
'acicular ferrite': 'ungrounded',
'aortic flow': 'ungrounded',
'aggressive fibromatosis': 'MESH:D018222',
'antifouling': 'ungrounded',
'asialofetuin': 'MESH:C017029',
'atrial flutter': 'MESH:D001282',
'acetone fraction': 'ungrounded',
'anthocyanin fraction': 'ungrounded',
'alcohol fed': 'ungrounded',
'auto fluorescence': 'MESH:D061848',
'allantoic fluid': 'ungrounded',
'aggregation factor': 'MESH:C013065',
'adipocyte fraction': 'ungrounded',
'anhydro d fructose': 'ungrounded',
'actin filament': 'FPLX:Actin',
'acne fulminans': 'ungrounded',
'aerobic fitness': 'ungrounded',
'ascitic fluid': 'MESH:D001202',
'atrial fibrillation or flutter': 'MESH:D001281',
'abdominal fat': 'MESH:D050153',
'afferent facilitate': 'ungrounded',
'arginyl fructose': 'ungrounded',
'asthmatic fibroblasts': 'MESH:D005347',
'activity factor': 'ungrounded',
'f axysporum': 'ungrounded',
'accelerated fraction': 'ungrounded',
'attributable fraction': 'ungrounded',
'a trial fibrillation': 'MESH:D001281',
'access flap': 'ungrounded',
'adaptive flies': 'ungrounded',
'altered feedback': 'ungrounded',
'arial fibrillation': 'ungrounded',
'adherent fraction': 'ungrounded',
'alkaloid fraction': 'ungrounded',
'fak recombinant adenovirus': 'MESH:D000256',
'annulus fibrosis': 'MESH:D005355',
'allele frequency': 'MESH:D005787',
'arrival formula': 'ungrounded',
'adjacency filter': 'ungrounded',
'alcohol fermentation': 'ungrounded',
'adaptive function': 'ungrounded',
'acifluorfen': 'CHEBI:CHEBI:73172',
'amyloid fibrillation': 'ungrounded',
'angiogenic factor': 'ungrounded',
'aflatoxin b1': 'CHEBI:CHEBI:22271'},
{'MESH:D001281': 'Atrial Fibrillation',
'MESH:D050153': 'Abdominal Fat',
'CHEBI:CHEBI:73172': 'acifluorfen',
'FPLX:Actin': 'Actin',
'CHEBI:CHEBI:22271': 'aflatoxin',
'MESH:C013065': 'cell aggregation factors',
'MESH:D018222': 'Fibromatosis, Aggressive',
'CHEBI:CHEBI:132793': 'albiflorin',
'MESH:D005787': 'Gene Frequency',
'CHEBI:CHEBI:2631': 'amentoflavone',
'MESH:C413760': 'aminoflavone',
'PUBCHEM:22817': '1-Aminofluorene',
'MESH:D000653': 'Amniotic Fluid',
'MESH:D005355': 'Fibrosis',
'MESH:D000070616': 'Annulus Fibrosus',
'antiferromagnetic': 'antiferromagnetic',
'MESH:C049628': 'antisecretory factor',
'MESH:D001202': 'Ascitic Fluid',
'MESH:C006640': 'ascofuranone',
'MESH:C017029': 'asialofetuin',
'MESH:D001232': 'Aspergillus fumigatus',
'MESH:D005347': 'Fibroblasts',
'MESH:D001282': 'Atrial Flutter',
'CHEBI:CHEBI:2922': 'auranofin',
'MESH:D061848': 'Optical Imaging',
'MESH:D000256': 'Adenoviridae'},
['CHEBI:CHEBI:22271',
'CHEBI:CHEBI:2922',
'MESH:C049628',
'MESH:D000070616',
'MESH:D000653',
'MESH:D001281',
'MESH:D061848',
'PUBCHEM:22817']]
excluded_longforms = ['af']
grounding_dict = {shortform: {longform: grounding_map[longform]
for longform, _, _ in longforms if longform in grounding_map
and longform not in excluded_longforms}
for shortform, longforms in longform_dict.items()}
result = [grounding_dict, names, pos_labels]
if not os.path.exists(results_path):
os.mkdir(results_path)
with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f:
json.dump(result, f)
additional_entities = []
unambiguous_agent_texts = {}
labeler = AdeftLabeler(grounding_dict)
corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items())
agent_text_pmid_map = defaultdict(list)
for text, label, id_ in corpus:
agent_text_pmid_map[label].append(id_)
entities = pos_labels + additional_entities
entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1),
major_topic=True))for entity in entities}
intersection1 = []
for entity1, pmids1 in entity_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection1.append((entity1, entity2, len(pmids1 & pmids2)))
intersection2 = []
for entity1, pmids1 in agent_text_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection2.append((entity1, entity2, len(set(pmids1) & pmids2)))
intersection1
intersection2
all_used_pmids = set()
for entity, agent_texts in unambiguous_agent_texts.items():
used_pmids = set()
for agent_text in agent_texts:
pmids = set(get_pmids_for_agent_text(agent_text))
new_pmids = list(pmids - all_texts.keys() - used_pmids)
text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
used_pmids.update(new_pmids)
all_used_pmids.update(used_pmids)
for entity, pmids in entity_pmid_map.items():
new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids)
if len(new_pmids) > 10000:
new_pmids = random.choices(new_pmids, k=10000)
text_dict = get_plaintexts_for_pmids(new_pmids)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
%%capture
classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729)
param_grid = {'C': [100.0], 'max_features': [10000]}
texts, labels, pmids = zip(*corpus)
classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5)
classifier.stats
disamb = AdeftDisambiguator(classifier, grounding_dict, names)
disamb.dump(model_name, results_path)
print(disamb.info())
model_to_s3(disamb)
```
|
github_jupyter
|
import os
import json
import pickle
import random
from collections import defaultdict, Counter
from indra.literature.adeft_tools import universal_extract_text
from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id
from adeft.discover import AdeftMiner
from adeft.gui import ground_with_gui
from adeft.modeling.label import AdeftLabeler
from adeft.modeling.classify import AdeftClassifier
from adeft.disambiguate import AdeftDisambiguator
from adeft_indra.ground.ground import AdeftGrounder
from adeft_indra.model_building.s3 import model_to_s3
from adeft_indra.model_building.escape import escape_filename
from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \
get_plaintexts_for_pmids
adeft_grounder = AdeftGrounder()
shortforms = ['AF']
model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms))
results_path = os.path.abspath(os.path.join('../..', 'results', model_name))
miners = dict()
all_texts = {}
for shortform in shortforms:
pmids = get_pmids_for_agent_text(shortform)
if len(pmids) > 10000:
pmids = random.choices(pmids, k=10000)
text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms)
text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5}
miners[shortform] = AdeftMiner(shortform)
miners[shortform].process_texts(text_dict.values())
all_texts.update(text_dict)
longform_dict = {}
for shortform in shortforms:
longforms = miners[shortform].get_longforms()
longforms = [(longform, count, score) for longform, count, score in longforms
if count*score > 2]
longform_dict[shortform] = longforms
combined_longforms = Counter()
for longform_rows in longform_dict.values():
combined_longforms.update({longform: count for longform, count, score
in longform_rows})
grounding_map = {}
names = {}
for longform in combined_longforms:
groundings = adeft_grounder.ground(longform)
if groundings:
grounding = groundings[0]['grounding']
grounding_map[longform] = grounding
names[grounding] = groundings[0]['name']
longforms, counts = zip(*combined_longforms.most_common())
pos_labels = []
list(zip(longforms, counts))
grounding_map, names, pos_labels = ground_with_gui(longforms, counts,
grounding_map=grounding_map,
names=names, pos_labels=pos_labels, no_browser=True, port=8890)
result = [grounding_map, names, pos_labels]
result
from adeft.disambiguate import load_disambiguator
d = load_disambiguator('AF')
grounding_dict, names, pos_labels = d.grounding_dict, d.names, d.pos_labels
grounding_map = grounding_dict['AF']
result = [grounding_map, names, pos_labels]
result
grounding_map, names, pos_labels = [{'atrial fibrillation': 'MESH:D001281',
'annulus fibrosus': 'MESH:D000070616',
'amniotic fluid': 'MESH:D000653',
'aflatoxin': 'CHEBI:CHEBI:22271',
'auranofin': 'CHEBI:CHEBI:2922',
'autofluorescence': 'MESH:D061848',
'aminofluorene': 'PUBCHEM:22817',
'anulus fibrosus': 'MESH:D000070616',
'antiferromagnetic': 'antiferromagnetic',
'antisecretory factor': 'MESH:C049628',
'aspergillus fumigatus': 'MESH:D001232',
'aqueous fraction': 'ungrounded',
'arcuate fasciculus': 'ungrounded',
'aminoflavone': 'MESH:C413760',
'activity function': 'ungrounded',
'amentoflavone': 'CHEBI:CHEBI:2631',
'ascofuranone': 'MESH:C006640',
'activity fraction': 'ungrounded',
'adventitial fibroblasts': 'ungrounded',
'atrial fibrillation flutter': 'MESH:D001281',
'af': 'ungrounded',
'albiflorin': 'CHEBI:CHEBI:132793',
'acicular ferrite': 'ungrounded',
'aortic flow': 'ungrounded',
'aggressive fibromatosis': 'MESH:D018222',
'antifouling': 'ungrounded',
'asialofetuin': 'MESH:C017029',
'atrial flutter': 'MESH:D001282',
'acetone fraction': 'ungrounded',
'anthocyanin fraction': 'ungrounded',
'alcohol fed': 'ungrounded',
'auto fluorescence': 'MESH:D061848',
'allantoic fluid': 'ungrounded',
'aggregation factor': 'MESH:C013065',
'adipocyte fraction': 'ungrounded',
'anhydro d fructose': 'ungrounded',
'actin filament': 'FPLX:Actin',
'acne fulminans': 'ungrounded',
'aerobic fitness': 'ungrounded',
'ascitic fluid': 'MESH:D001202',
'atrial fibrillation or flutter': 'MESH:D001281',
'abdominal fat': 'MESH:D050153',
'afferent facilitate': 'ungrounded',
'arginyl fructose': 'ungrounded',
'asthmatic fibroblasts': 'MESH:D005347',
'activity factor': 'ungrounded',
'f axysporum': 'ungrounded',
'accelerated fraction': 'ungrounded',
'attributable fraction': 'ungrounded',
'a trial fibrillation': 'MESH:D001281',
'access flap': 'ungrounded',
'adaptive flies': 'ungrounded',
'altered feedback': 'ungrounded',
'arial fibrillation': 'ungrounded',
'adherent fraction': 'ungrounded',
'alkaloid fraction': 'ungrounded',
'fak recombinant adenovirus': 'MESH:D000256',
'annulus fibrosis': 'MESH:D005355',
'allele frequency': 'MESH:D005787',
'arrival formula': 'ungrounded',
'adjacency filter': 'ungrounded',
'alcohol fermentation': 'ungrounded',
'adaptive function': 'ungrounded',
'acifluorfen': 'CHEBI:CHEBI:73172',
'amyloid fibrillation': 'ungrounded',
'angiogenic factor': 'ungrounded',
'aflatoxin b1': 'CHEBI:CHEBI:22271'},
{'MESH:D001281': 'Atrial Fibrillation',
'MESH:D050153': 'Abdominal Fat',
'CHEBI:CHEBI:73172': 'acifluorfen',
'FPLX:Actin': 'Actin',
'CHEBI:CHEBI:22271': 'aflatoxin',
'MESH:C013065': 'cell aggregation factors',
'MESH:D018222': 'Fibromatosis, Aggressive',
'CHEBI:CHEBI:132793': 'albiflorin',
'MESH:D005787': 'Gene Frequency',
'CHEBI:CHEBI:2631': 'amentoflavone',
'MESH:C413760': 'aminoflavone',
'PUBCHEM:22817': '1-Aminofluorene',
'MESH:D000653': 'Amniotic Fluid',
'MESH:D005355': 'Fibrosis',
'MESH:D000070616': 'Annulus Fibrosus',
'antiferromagnetic': 'antiferromagnetic',
'MESH:C049628': 'antisecretory factor',
'MESH:D001202': 'Ascitic Fluid',
'MESH:C006640': 'ascofuranone',
'MESH:C017029': 'asialofetuin',
'MESH:D001232': 'Aspergillus fumigatus',
'MESH:D005347': 'Fibroblasts',
'MESH:D001282': 'Atrial Flutter',
'CHEBI:CHEBI:2922': 'auranofin',
'MESH:D061848': 'Optical Imaging',
'MESH:D000256': 'Adenoviridae'},
['CHEBI:CHEBI:22271',
'CHEBI:CHEBI:2922',
'MESH:C049628',
'MESH:D000070616',
'MESH:D000653',
'MESH:D001281',
'MESH:D061848',
'PUBCHEM:22817']]
excluded_longforms = ['af']
grounding_dict = {shortform: {longform: grounding_map[longform]
for longform, _, _ in longforms if longform in grounding_map
and longform not in excluded_longforms}
for shortform, longforms in longform_dict.items()}
result = [grounding_dict, names, pos_labels]
if not os.path.exists(results_path):
os.mkdir(results_path)
with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f:
json.dump(result, f)
additional_entities = []
unambiguous_agent_texts = {}
labeler = AdeftLabeler(grounding_dict)
corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items())
agent_text_pmid_map = defaultdict(list)
for text, label, id_ in corpus:
agent_text_pmid_map[label].append(id_)
entities = pos_labels + additional_entities
entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1),
major_topic=True))for entity in entities}
intersection1 = []
for entity1, pmids1 in entity_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection1.append((entity1, entity2, len(pmids1 & pmids2)))
intersection2 = []
for entity1, pmids1 in agent_text_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection2.append((entity1, entity2, len(set(pmids1) & pmids2)))
intersection1
intersection2
all_used_pmids = set()
for entity, agent_texts in unambiguous_agent_texts.items():
used_pmids = set()
for agent_text in agent_texts:
pmids = set(get_pmids_for_agent_text(agent_text))
new_pmids = list(pmids - all_texts.keys() - used_pmids)
text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
used_pmids.update(new_pmids)
all_used_pmids.update(used_pmids)
for entity, pmids in entity_pmid_map.items():
new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids)
if len(new_pmids) > 10000:
new_pmids = random.choices(new_pmids, k=10000)
text_dict = get_plaintexts_for_pmids(new_pmids)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
%%capture
classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729)
param_grid = {'C': [100.0], 'max_features': [10000]}
texts, labels, pmids = zip(*corpus)
classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5)
classifier.stats
disamb = AdeftDisambiguator(classifier, grounding_dict, names)
disamb.dump(model_name, results_path)
print(disamb.info())
model_to_s3(disamb)
| 0.331444 | 0.13569 |
# Long-Form Question Answering
[](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial12_LFQA.ipynb)
### Prepare environment
#### Colab: Enable the GPU runtime
Make sure you enable the GPU runtime to experience decent speed in this tutorial.
**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**
<img src="https://raw.githubusercontent.com/deepset-ai/haystack/master/docs/_src/img/colab_gpu_runtime.jpg">
```
# Make sure you have a GPU running
!nvidia-smi
# Install the latest release of Haystack in your own environment
#! pip install farm-haystack
# Install the latest master of Haystack
!pip install --upgrade pip
!pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab,faiss]
from haystack.utils import convert_files_to_dicts, fetch_archive_from_http, clean_wiki_text
from haystack.nodes import Seq2SeqGenerator
```
### Document Store
FAISS is a library for efficient similarity search on a cluster of dense vectors.
The `FAISSDocumentStore` uses a SQL(SQLite in-memory be default) database under-the-hood
to store the document text and other meta data. The vector embeddings of the text are
indexed on a FAISS Index that later is queried for searching answers.
The default flavour of FAISSDocumentStore is "Flat" but can also be set to "HNSW" for
faster search at the expense of some accuracy. Just set the faiss_index_factor_str argument in the constructor.
For more info on which suits your use case: https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
```
from haystack.document_stores import FAISSDocumentStore
document_store = FAISSDocumentStore(embedding_dim=128, faiss_index_factory_str="Flat")
```
### Cleaning & indexing documents
Similarly to the previous tutorials, we download, convert and index some Game of Thrones articles to our DocumentStore
```
# Let's first get some files that we want to use
doc_dir = "data/article_txt_got"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/wiki_gameofthrones_txt.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
# Convert files to dicts
dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
# Now, let's write the dicts containing documents to our DB.
document_store.write_documents(dicts)
```
### Initalize Retriever and Reader/Generator
#### Retriever
**Here:** We use a `RetribertRetriever` and we invoke `update_embeddings` to index the embeddings of documents in the `FAISSDocumentStore`
```
from haystack.nodes import EmbeddingRetriever
retriever = EmbeddingRetriever(document_store=document_store,
embedding_model="yjernite/retribert-base-uncased",
model_format="retribert")
document_store.update_embeddings(retriever)
```
Before we blindly use the `RetribertRetriever` let's empirically test it to make sure a simple search indeed finds the relevant documents.
```
from haystack.utils import print_documents
from haystack.pipelines import DocumentSearchPipeline
p_retrieval = DocumentSearchPipeline(retriever)
res = p_retrieval.run(
query="Tell me something about Arya Stark?",
params={"Retriever": {"top_k": 10}}
)
print_documents(res, max_text_len=512)
```
#### Reader/Generator
Similar to previous Tutorials we now initalize our reader/generator.
Here we use a `Seq2SeqGenerator` with the *yjernite/bart_eli5* model (see: https://huggingface.co/yjernite/bart_eli5)
```
generator = Seq2SeqGenerator(model_name_or_path="yjernite/bart_eli5")
```
### Pipeline
With a Haystack `Pipeline` you can stick together your building blocks to a search pipeline.
Under the hood, `Pipelines` are Directed Acyclic Graphs (DAGs) that you can easily customize for your own use cases.
To speed things up, Haystack also comes with a few predefined Pipelines. One of them is the `GenerativeQAPipeline` that combines a retriever and a reader/generator to answer our questions.
You can learn more about `Pipelines` in the [docs](https://haystack.deepset.ai/docs/latest/pipelinesmd).
```
from haystack.pipelines import GenerativeQAPipeline
pipe = GenerativeQAPipeline(generator, retriever)
```
## Voilร ! Ask a question!
```
pipe.run(
query="Why did Arya Stark's character get portrayed in a television adaptation?",
params={"Retriever": {"top_k": 1}}
)
pipe.run(query="What kind of character does Arya Stark play?", params={"Retriever": {"top_k": 1}})
```
## About us
This [Haystack](https://github.com/deepset-ai/haystack/) notebook was made with love by [deepset](https://deepset.ai/) in Berlin, Germany
We bring NLP to the industry via open source!
Our focus: Industry specific language models & large scale QA systems.
Some of our other work:
- [German BERT](https://deepset.ai/german-bert)
- [GermanQuAD and GermanDPR](https://deepset.ai/germanquad)
- [FARM](https://github.com/deepset-ai/FARM)
Get in touch:
[Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Slack](https://haystack.deepset.ai/community/join) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai)
By the way: [we're hiring!](https://www.deepset.ai/jobs)
|
github_jupyter
|
# Make sure you have a GPU running
!nvidia-smi
# Install the latest release of Haystack in your own environment
#! pip install farm-haystack
# Install the latest master of Haystack
!pip install --upgrade pip
!pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab,faiss]
from haystack.utils import convert_files_to_dicts, fetch_archive_from_http, clean_wiki_text
from haystack.nodes import Seq2SeqGenerator
from haystack.document_stores import FAISSDocumentStore
document_store = FAISSDocumentStore(embedding_dim=128, faiss_index_factory_str="Flat")
# Let's first get some files that we want to use
doc_dir = "data/article_txt_got"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/wiki_gameofthrones_txt.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
# Convert files to dicts
dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
# Now, let's write the dicts containing documents to our DB.
document_store.write_documents(dicts)
from haystack.nodes import EmbeddingRetriever
retriever = EmbeddingRetriever(document_store=document_store,
embedding_model="yjernite/retribert-base-uncased",
model_format="retribert")
document_store.update_embeddings(retriever)
from haystack.utils import print_documents
from haystack.pipelines import DocumentSearchPipeline
p_retrieval = DocumentSearchPipeline(retriever)
res = p_retrieval.run(
query="Tell me something about Arya Stark?",
params={"Retriever": {"top_k": 10}}
)
print_documents(res, max_text_len=512)
generator = Seq2SeqGenerator(model_name_or_path="yjernite/bart_eli5")
from haystack.pipelines import GenerativeQAPipeline
pipe = GenerativeQAPipeline(generator, retriever)
pipe.run(
query="Why did Arya Stark's character get portrayed in a television adaptation?",
params={"Retriever": {"top_k": 1}}
)
pipe.run(query="What kind of character does Arya Stark play?", params={"Retriever": {"top_k": 1}})
| 0.600423 | 0.937555 |
# 2.8 ๆจ่ซใฎๅฎๆฝใฎไป้ฒใๅญฆ็ฟใจๆค่จผใฎDataLoaderใซๅฎๆฝใใ
ๆฌใใกใคใซใงใฏใๅญฆ็ฟใใใSSDใง็ฉไฝๆคๅบใ่กใใพใใ
VOC2012ใฎ่จ็ทดใใผใฟใปใใใจๆค่จผใใผใฟใปใใใซๅฏพใใฆใๅญฆ็ฟๆธใฟSSDใฎๆจ่ซใๅฎๆฝใใๆจ่ซ็ตๆใจๆญฃใใ็ญใใงใใใขใใใผใทใงใณใใผใฟใฎไธกๆนใ่กจ็คบใใใใใกใคใซใงใใ
ๅญฆ็ฟใใใSSDใขใใซใๆญฃใใใขใใใผใทใงใณใใผใฟใจใฉใใใใ่ฟใใฎใใชใฉใ็ขบ่ชใใใใฑใผในใงใฏใใใกใใใไฝฟ็จใใ ใใใ
# ไบๅๆบๅ
- ใใฉใซใใutilsใใซ2.3๏ฝ2.7ใพใงใงๅฎ่ฃ
ใใๅ
ๅฎนใใพใจใใssd_model.pyใใใใใจใ็ขบ่ชใใฆใใ ใใ
- ๅญฆ็ฟใใใ้ใฟใใฉใกใผใฟใ็จๆ
```
import cv2 # OpenCVใฉใคใใฉใช
import matplotlib.pyplot as plt
import numpy as np
import torch
%matplotlib inline
```
# ๆจ่ซ็จใฎ้ขๆฐใจใฏใฉในใไฝๆใใ
```
def ssd_predict(img_index, img_list, dataset, net=None, dataconfidence_level=0.5):
"""
SSDใงไบๆธฌใใใ้ขๆฐใ
Parameters
----------
img_index: int
ใใผใฟใปใใๅ
ใฎไบๆธฌๅฏพ่ฑก็ปๅใฎใคใณใใใฏในใ
img_list: list
็ปๅใฎใใกใคใซใในใฎใชในใ
dataset: PyTorchใฎDataset
็ปๅใฎDataset
net: PyTorchใฎNetwork
ๅญฆ็ฟใใใSSDใใใใฏใผใฏ
dataconfidence_level: float
ไบๆธฌใง็บ่ฆใจใใ็ขบไฟกๅบฆใฎ้พๅค
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
# rgbใฎ็ปๅใใผใฟใๅๅพ
image_file_path = img_list[img_index]
img = cv2.imread(image_file_path) # [้ซใ][ๅน
][่ฒBGR]
height, width, channels = img.shape # ็ปๅใฎใตใคใบใๅๅพ
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ๆญฃ่งฃใฎBBoxใๅๅพ
im, gt = dataset.__getitem__(img_index)
true_bbox = gt[:, 0:4] * [width, height, width, height]
true_label_index = gt[:, 4].astype(int)
# SSDใงไบๆธฌ
net.eval() # ใใใใฏใผใฏใๆจ่ซใขใผใใธ
x = im.unsqueeze(0) # ใใใใใๅ๏ผtorch.Size([1, 3, 300, 300])
detections = net(x)
# detectionsใฎๅฝขใฏใtorch.Size([1, 21, 200, 5]) โป200ใฏtop_kใฎๅค
# confidence_levelใๅบๆบไปฅไธใๅใๅบใ
predict_bbox = []
pre_dict_label_index = []
scores = []
detections = detections.cpu().detach().numpy()
# ๆกไปถไปฅไธใฎๅคใๆฝๅบ
find_index = np.where(detections[:, 0:, :, 0] >= dataconfidence_level)
detections = detections[find_index]
for i in range(len(find_index[1])): # ๆฝๅบใใ็ฉไฝๆฐๅใซใผใใๅใ
if (find_index[1][i]) > 0: # ่ๆฏใฏใฉในใงใชใใใฎ
sc = detections[i][0] # ็ขบไฟกๅบฆ
bbox = detections[i][1:] * [width, height, width, height]
lable_ind = find_index[1][i]-1 # find_indexใฏใใใใใๆฐใใฏใฉในใtopใฎtuple
# ๏ผๆณจ้๏ผ
# ่ๆฏใฏใฉในใ0ใชใฎใง1ใๅผใ
# ่ฟใๅคใฎใชในใใซ่ฟฝๅ
predict_bbox.append(bbox)
pre_dict_label_index.append(lable_ind)
scores.append(sc)
return rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
def vis_bbox(rgb_img, bbox, label_index, scores, label_names):
"""
็ฉไฝๆคๅบใฎไบๆธฌ็ตๆใ็ปๅใง่กจ็คบใใใ้ขๆฐใ
Parameters
----------
rgb_img:rgbใฎ็ปๅ
ๅฏพ่ฑกใฎ็ปๅใใผใฟ
bbox: list
็ฉไฝใฎBBoxใฎใชในใ
label_index: list
็ฉไฝใฎใฉใใซใธใฎใคใณใใใฏใน
scores: list
็ฉไฝใฎ็ขบไฟกๅบฆใ
label_names: list
ใฉใใซๅใฎ้
ๅ
Returns
-------
ใชใใrgb_imgใซ็ฉไฝๆคๅบ็ตๆใๅ ใใฃใ็ปๅใ่กจ็คบใใใใ
"""
# ๆ ใฎ่ฒใฎ่จญๅฎ
num_classes = len(label_names) # ใฏใฉในๆฐ๏ผ่ๆฏใฎใใ๏ผ
colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
# ็ปๅใฎ่กจ็คบ
plt.figure(figsize=(10, 10))
plt.imshow(rgb_img)
currentAxis = plt.gca()
# BBoxๅใฎใซใผใ
for i, bb in enumerate(bbox):
# ใฉใใซๅ
label_name = label_names[label_index[i]]
color = colors[label_index[i]] # ใฏใฉในใใจใซๅฅใฎ่ฒใฎๆ ใไธใใ
# ๆ ใซใคใใใฉใใซใไพ๏ผperson;0.72ใ
if scores is not None:
sc = scores[i]
display_txt = '%s: %.2f' % (label_name, sc)
else:
display_txt = '%s: ans' % (label_name)
# ๆ ใฎๅบงๆจ
xy = (bb[0], bb[1])
width = bb[2] - bb[0]
height = bb[3] - bb[1]
# ้ทๆนๅฝขใๆ็ปใใ
currentAxis.add_patch(plt.Rectangle(
xy, width, height, fill=False, edgecolor=color, linewidth=2))
# ้ทๆนๅฝขใฎๆ ใฎๅทฆไธใซใฉใใซใๆ็ปใใ
currentAxis.text(xy[0], xy[1], display_txt, bbox={
'facecolor': color, 'alpha': 0.5})
class SSDPredictShow():
"""SSDใงใฎไบๆธฌใจ็ปๅใฎ่กจ็คบใใพใจใใฆ่กใใฏใฉใน"""
def __init__(self, img_list, dataset, eval_categories, net=None, dataconfidence_level=0.6):
self.img_list = img_list
self.dataset = dataset
self.net = net
self.dataconfidence_level = dataconfidence_level
self.eval_categories = eval_categories
def show(self, img_index, predict_or_ans):
"""
็ฉไฝๆคๅบใฎไบๆธฌใจ่กจ็คบใใใ้ขๆฐใ
Parameters
----------
img_index: int
ใใผใฟใปใใๅ
ใฎไบๆธฌๅฏพ่ฑก็ปๅใฎใคใณใใใฏในใ
predict_or_ans: text
'precit'ใใใใฏ'ans'ใงBBoxใฎไบๆธฌใจๆญฃ่งฃใฎใฉใกใใ่กจ็คบใใใใๆๅฎใใ
Returns
-------
ใชใใrgb_imgใซ็ฉไฝๆคๅบ็ตๆใๅ ใใฃใ็ปๅใ่กจ็คบใใใใ
"""
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores = ssd_predict(img_index, self.img_list,
self.dataset,
self.net,
self.dataconfidence_level)
if predict_or_ans == "predict":
vis_bbox(rgb_img, bbox=predict_bbox, label_index=pre_dict_label_index,
scores=scores, label_names=self.eval_categories)
elif predict_or_ans == "ans":
vis_bbox(rgb_img, bbox=true_bbox, label_index=true_label_index,
scores=None, label_names=self.eval_categories)
```
# ๆจ่ซใๅฎ่กใใ
```
from utils.ssd_model import make_datapath_list, VOCDataset, DataTransform, Anno_xml2list, od_collate_fn
# ใใกใคใซใในใฎใชในใใๅๅพ
rootpath = "./data/VOCdevkit/VOC2012/"
train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(
rootpath)
# Datasetใไฝๆ
voc_classes = ['aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
color_mean = (104, 117, 123) # (BGR)ใฎ่ฒใฎๅนณๅๅค
input_size = 300 # ็ปๅใฎinputใตใคใบใ300ร300ใซใใ
train_dataset = VOCDataset(train_img_list, train_anno_list, phase="val", transform=DataTransform(
input_size, color_mean), transform_anno=Anno_xml2list(voc_classes))
val_dataset = VOCDataset(val_img_list, val_anno_list, phase="val", transform=DataTransform(
input_size, color_mean), transform_anno=Anno_xml2list(voc_classes))
from utils.ssd_model import SSD
# SSD300ใฎ่จญๅฎ
ssd_cfg = {
'num_classes': 21, # ่ๆฏใฏใฉในใๅซใใๅ่จใฏใฉในๆฐ
'input_size': 300, # ็ปๅใฎๅ
ฅๅใตใคใบ
'bbox_aspect_num': [4, 6, 6, 6, 4, 4], # ๅบๅใใDBoxใฎใขในใใฏใๆฏใฎ็จฎ้ก
'feature_maps': [38, 19, 10, 5, 3, 1], # ๅsourceใฎ็ปๅใตใคใบ
'steps': [8, 16, 32, 64, 100, 300], # DBOXใฎๅคงใใใๆฑบใใ
'min_sizes': [30, 60, 111, 162, 213, 264], # DBOXใฎๅคงใใใๆฑบใใ
'max_sizes': [60, 111, 162, 213, 264, 315], # DBOXใฎๅคงใใใๆฑบใใ
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
}
# SSDใใใใฏใผใฏใขใใซ
net = SSD(phase="inference", cfg=ssd_cfg)
net.eval()
# SSDใฎๅญฆ็ฟๆธใฟใฎ้ใฟใ่จญๅฎ
net_weights = torch.load('./weights/ssd300_50.pth',
map_location={'cuda:0': 'cpu'})
#net_weights = torch.load('./weights/ssd300_mAP_77.43_v2.pth',
# map_location={'cuda:0': 'cpu'})
net.load_state_dict(net_weights)
# GPUใไฝฟใใใใ็ขบ่ช
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("ไฝฟ็จใใใคใน๏ผ", device)
print('ใใใใฏใผใฏ่จญๅฎๅฎไบ๏ผๅญฆ็ฟๆธใฟใฎ้ใฟใใญใผใใใพใใ')
# ็ตๆใฎๆ็ป
ssd = SSDPredictShow(img_list=train_img_list, dataset=train_dataset, eval_categories=voc_classes,
net=net, dataconfidence_level=0.6)
img_index = 0
ssd.show(img_index, "predict")
ssd.show(img_index, "ans")
# ็ตๆใฎๆ็ป
ssd = SSDPredictShow(img_list=val_img_list, dataset=val_dataset, eval_categories=voc_classes,
net=net, dataconfidence_level=0.6)
img_index = 0
ssd.show(img_index, "predict")
ssd.show(img_index, "ans")
```
ไปฅไธ
|
github_jupyter
|
import cv2 # OpenCVใฉใคใใฉใช
import matplotlib.pyplot as plt
import numpy as np
import torch
%matplotlib inline
def ssd_predict(img_index, img_list, dataset, net=None, dataconfidence_level=0.5):
"""
SSDใงไบๆธฌใใใ้ขๆฐใ
Parameters
----------
img_index: int
ใใผใฟใปใใๅ
ใฎไบๆธฌๅฏพ่ฑก็ปๅใฎใคใณใใใฏในใ
img_list: list
็ปๅใฎใใกใคใซใในใฎใชในใ
dataset: PyTorchใฎDataset
็ปๅใฎDataset
net: PyTorchใฎNetwork
ๅญฆ็ฟใใใSSDใใใใฏใผใฏ
dataconfidence_level: float
ไบๆธฌใง็บ่ฆใจใใ็ขบไฟกๅบฆใฎ้พๅค
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
# rgbใฎ็ปๅใใผใฟใๅๅพ
image_file_path = img_list[img_index]
img = cv2.imread(image_file_path) # [้ซใ][ๅน
][่ฒBGR]
height, width, channels = img.shape # ็ปๅใฎใตใคใบใๅๅพ
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ๆญฃ่งฃใฎBBoxใๅๅพ
im, gt = dataset.__getitem__(img_index)
true_bbox = gt[:, 0:4] * [width, height, width, height]
true_label_index = gt[:, 4].astype(int)
# SSDใงไบๆธฌ
net.eval() # ใใใใฏใผใฏใๆจ่ซใขใผใใธ
x = im.unsqueeze(0) # ใใใใใๅ๏ผtorch.Size([1, 3, 300, 300])
detections = net(x)
# detectionsใฎๅฝขใฏใtorch.Size([1, 21, 200, 5]) โป200ใฏtop_kใฎๅค
# confidence_levelใๅบๆบไปฅไธใๅใๅบใ
predict_bbox = []
pre_dict_label_index = []
scores = []
detections = detections.cpu().detach().numpy()
# ๆกไปถไปฅไธใฎๅคใๆฝๅบ
find_index = np.where(detections[:, 0:, :, 0] >= dataconfidence_level)
detections = detections[find_index]
for i in range(len(find_index[1])): # ๆฝๅบใใ็ฉไฝๆฐๅใซใผใใๅใ
if (find_index[1][i]) > 0: # ่ๆฏใฏใฉในใงใชใใใฎ
sc = detections[i][0] # ็ขบไฟกๅบฆ
bbox = detections[i][1:] * [width, height, width, height]
lable_ind = find_index[1][i]-1 # find_indexใฏใใใใใๆฐใใฏใฉในใtopใฎtuple
# ๏ผๆณจ้๏ผ
# ่ๆฏใฏใฉในใ0ใชใฎใง1ใๅผใ
# ่ฟใๅคใฎใชในใใซ่ฟฝๅ
predict_bbox.append(bbox)
pre_dict_label_index.append(lable_ind)
scores.append(sc)
return rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
def vis_bbox(rgb_img, bbox, label_index, scores, label_names):
"""
็ฉไฝๆคๅบใฎไบๆธฌ็ตๆใ็ปๅใง่กจ็คบใใใ้ขๆฐใ
Parameters
----------
rgb_img:rgbใฎ็ปๅ
ๅฏพ่ฑกใฎ็ปๅใใผใฟ
bbox: list
็ฉไฝใฎBBoxใฎใชในใ
label_index: list
็ฉไฝใฎใฉใใซใธใฎใคใณใใใฏใน
scores: list
็ฉไฝใฎ็ขบไฟกๅบฆใ
label_names: list
ใฉใใซๅใฎ้
ๅ
Returns
-------
ใชใใrgb_imgใซ็ฉไฝๆคๅบ็ตๆใๅ ใใฃใ็ปๅใ่กจ็คบใใใใ
"""
# ๆ ใฎ่ฒใฎ่จญๅฎ
num_classes = len(label_names) # ใฏใฉในๆฐ๏ผ่ๆฏใฎใใ๏ผ
colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
# ็ปๅใฎ่กจ็คบ
plt.figure(figsize=(10, 10))
plt.imshow(rgb_img)
currentAxis = plt.gca()
# BBoxๅใฎใซใผใ
for i, bb in enumerate(bbox):
# ใฉใใซๅ
label_name = label_names[label_index[i]]
color = colors[label_index[i]] # ใฏใฉในใใจใซๅฅใฎ่ฒใฎๆ ใไธใใ
# ๆ ใซใคใใใฉใใซใไพ๏ผperson;0.72ใ
if scores is not None:
sc = scores[i]
display_txt = '%s: %.2f' % (label_name, sc)
else:
display_txt = '%s: ans' % (label_name)
# ๆ ใฎๅบงๆจ
xy = (bb[0], bb[1])
width = bb[2] - bb[0]
height = bb[3] - bb[1]
# ้ทๆนๅฝขใๆ็ปใใ
currentAxis.add_patch(plt.Rectangle(
xy, width, height, fill=False, edgecolor=color, linewidth=2))
# ้ทๆนๅฝขใฎๆ ใฎๅทฆไธใซใฉใใซใๆ็ปใใ
currentAxis.text(xy[0], xy[1], display_txt, bbox={
'facecolor': color, 'alpha': 0.5})
class SSDPredictShow():
"""SSDใงใฎไบๆธฌใจ็ปๅใฎ่กจ็คบใใพใจใใฆ่กใใฏใฉใน"""
def __init__(self, img_list, dataset, eval_categories, net=None, dataconfidence_level=0.6):
self.img_list = img_list
self.dataset = dataset
self.net = net
self.dataconfidence_level = dataconfidence_level
self.eval_categories = eval_categories
def show(self, img_index, predict_or_ans):
"""
็ฉไฝๆคๅบใฎไบๆธฌใจ่กจ็คบใใใ้ขๆฐใ
Parameters
----------
img_index: int
ใใผใฟใปใใๅ
ใฎไบๆธฌๅฏพ่ฑก็ปๅใฎใคใณใใใฏในใ
predict_or_ans: text
'precit'ใใใใฏ'ans'ใงBBoxใฎไบๆธฌใจๆญฃ่งฃใฎใฉใกใใ่กจ็คบใใใใๆๅฎใใ
Returns
-------
ใชใใrgb_imgใซ็ฉไฝๆคๅบ็ตๆใๅ ใใฃใ็ปๅใ่กจ็คบใใใใ
"""
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores = ssd_predict(img_index, self.img_list,
self.dataset,
self.net,
self.dataconfidence_level)
if predict_or_ans == "predict":
vis_bbox(rgb_img, bbox=predict_bbox, label_index=pre_dict_label_index,
scores=scores, label_names=self.eval_categories)
elif predict_or_ans == "ans":
vis_bbox(rgb_img, bbox=true_bbox, label_index=true_label_index,
scores=None, label_names=self.eval_categories)
from utils.ssd_model import make_datapath_list, VOCDataset, DataTransform, Anno_xml2list, od_collate_fn
# ใใกใคใซใในใฎใชในใใๅๅพ
rootpath = "./data/VOCdevkit/VOC2012/"
train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(
rootpath)
# Datasetใไฝๆ
voc_classes = ['aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
color_mean = (104, 117, 123) # (BGR)ใฎ่ฒใฎๅนณๅๅค
input_size = 300 # ็ปๅใฎinputใตใคใบใ300ร300ใซใใ
train_dataset = VOCDataset(train_img_list, train_anno_list, phase="val", transform=DataTransform(
input_size, color_mean), transform_anno=Anno_xml2list(voc_classes))
val_dataset = VOCDataset(val_img_list, val_anno_list, phase="val", transform=DataTransform(
input_size, color_mean), transform_anno=Anno_xml2list(voc_classes))
from utils.ssd_model import SSD
# SSD300ใฎ่จญๅฎ
ssd_cfg = {
'num_classes': 21, # ่ๆฏใฏใฉในใๅซใใๅ่จใฏใฉในๆฐ
'input_size': 300, # ็ปๅใฎๅ
ฅๅใตใคใบ
'bbox_aspect_num': [4, 6, 6, 6, 4, 4], # ๅบๅใใDBoxใฎใขในใใฏใๆฏใฎ็จฎ้ก
'feature_maps': [38, 19, 10, 5, 3, 1], # ๅsourceใฎ็ปๅใตใคใบ
'steps': [8, 16, 32, 64, 100, 300], # DBOXใฎๅคงใใใๆฑบใใ
'min_sizes': [30, 60, 111, 162, 213, 264], # DBOXใฎๅคงใใใๆฑบใใ
'max_sizes': [60, 111, 162, 213, 264, 315], # DBOXใฎๅคงใใใๆฑบใใ
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
}
# SSDใใใใฏใผใฏใขใใซ
net = SSD(phase="inference", cfg=ssd_cfg)
net.eval()
# SSDใฎๅญฆ็ฟๆธใฟใฎ้ใฟใ่จญๅฎ
net_weights = torch.load('./weights/ssd300_50.pth',
map_location={'cuda:0': 'cpu'})
#net_weights = torch.load('./weights/ssd300_mAP_77.43_v2.pth',
# map_location={'cuda:0': 'cpu'})
net.load_state_dict(net_weights)
# GPUใไฝฟใใใใ็ขบ่ช
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("ไฝฟ็จใใใคใน๏ผ", device)
print('ใใใใฏใผใฏ่จญๅฎๅฎไบ๏ผๅญฆ็ฟๆธใฟใฎ้ใฟใใญใผใใใพใใ')
# ็ตๆใฎๆ็ป
ssd = SSDPredictShow(img_list=train_img_list, dataset=train_dataset, eval_categories=voc_classes,
net=net, dataconfidence_level=0.6)
img_index = 0
ssd.show(img_index, "predict")
ssd.show(img_index, "ans")
# ็ตๆใฎๆ็ป
ssd = SSDPredictShow(img_list=val_img_list, dataset=val_dataset, eval_categories=voc_classes,
net=net, dataconfidence_level=0.6)
img_index = 0
ssd.show(img_index, "predict")
ssd.show(img_index, "ans")
| 0.678114 | 0.916297 |
# ้ๆบๅบๅๆต่ฏ
*็ๆๆๆ (c) 2021 ็พๅบฆ้ๅญ่ฎก็ฎ็ ็ฉถๆ๏ผไฟ็ๆๆๆๅฉใ*
## ๅ
ๅฎนๆฆ่ฆ
**ๆณจๆ๏ผ่ฟ่กๆฌๆ็จ็จๅบๆ่ฑ่ดน็ๆถ้ดๅ Quntum Hub ็นๆฐไผๆ นๆฎ็จๆทๆ่พๅ
ฅ็ๅๆฐไธๅ่ไธๅใ็จๆท้ๅธธ้่ฆ่ฑ่ดน็บฆๅไธชๅฐๆถๅ 100 ไธช็นๆฐๆฅ่ทๅพ็ธๅฏนๅฏ้ ็็ปๆใๆณ่ฆ่ทๅๆดๅค็นๆฐ๏ผ่ฏท้่ฟ [Quantum Hub](https://quantum-hub.baidu.com) ่็ณปๆไปฌใ้ฆๅ
๏ผ็ปๅฝ [Quantum Hub](https://quantum-hub.baidu.com)๏ผ็ถๅ่ฟๅ
ฅโๆ่งๅ้ฆโ้กต้ข๏ผ็นๅปโ่ทๅ็นๆฐโ๏ผ็ถๅ่พๅ
ฅๅฟ
่ฆ็ไฟกๆฏใๆไบคๆจ็ๅ้ฆๅนถ็ญๅพ
ๅๅคใ**
ๅจๅฎ้ชไธญ๏ผ้ๅธธๆไธค็งๆนๆณๆฅๆ่ฟฐ่ถ
ๅฏผ้ๅญ่ฎก็ฎๆบ็่กจ็ฐ๏ผ้ๅญ่ฟ็จๅฑๆ๏ผQuantum Process Tomography, QPT๏ผๅ้ๆบๅบๅๆต่ฏ๏ผRandomized Benchmarking, RB๏ผ\[1\]ใ้ๅญ่ฟ็จๅฑๆๅฏไปฅๅฎๆดๅฐ่กจๅพไธไธช้ๅญ้จ็็นๅพ๏ผไฝ้่ฟ้ๅญ่ฟ็จๅฑๆๆฅ่กจๅพๅนถไผๅ้ๅญ้จๆฏๆๅบฆๅคๆๅๆถ่่ตๆบ็ใ่ไธ๏ผ้ๅญๆๅถๅคๅๆต้๏ผState Preparation And Measurement, SPAM๏ผ็้่ฏฏไนไผๅฝฑๅ่ฟ็จๅฑๆใ่้ๆบๅบๅๆต่ฏๆฏไธไธชไฝฟ็จ้ๆบๅๆนๆณๅฏน้ๅญ้จ่ฟ่กๅบๅๆต่ฏ็ๆนๆณ๏ผๅฎๅฏๆฉๅฑไธๅฏนๅถๅคๆต้้่ฏฏไธๆๆ๏ผ้่ฟๅไธชๅๆฐไพฟ่ฝๅคๅฏน้จ้ๅ่ฟ่กๅบๅๆต่ฏใๅ ๆญค๏ผ็นๅซๆฏๅฝ้ๅญๆฏ็น็ๆฐ้ๅขๅ ๆถ๏ผไฝฟ็จ้ๆบๅบๅๆต่ฏๅฏน้ๅญ็กฌไปถ่ฟ่ก้่ฏฏ่กจๅพๆฏๅๅ้ซๆ็ใ
ๆฌๆ็จๅฐๆผ็คบๅฆไฝไฝฟ็จ้่ๅฏน่ชๅฎไนๅชๅฃฐๆจกๆๅจไธญ็ๆไธไธช้ๅญๆฏ็น่ฟ่ก้ๆบๅบๅๆต่ฏ๏ผ่กจๅพ็นๅฎ้จๆไฝ็ๅนณๅ้่ฏฏ็ใๆ็จๅคง็บฒๅฆไธ๏ผ
- ็ฎไป
- ๅๅคๅทฅไฝ
- ๅฎไนๅซๅชๆจกๆๅจ
- ๅฎๆฝ้ๆบๅบๅๆต่ฏ
- ๆป็ป
## ็ฎไป
**ๅบๆฌ้ๆบๅบๅๆต่ฏ**
ๆไปฌ้ๅธธ้ๆบ้ๅ $m$ ไธช Clifford ้จไพๆฌกไฝ็จๅฐ้ๅญๆฏ็นไธ๏ผๅนถๆทปๅ ็ฌฌ $m+1$ ไธช้จไฝฟๅพๆดไธชๅบๅๅจ็ๆณๆ
ๅตไธ็ญๆไบไธไธชๅไฝ็ฉ้ต้
ๅๆข๏ผ

ๅฆไธๅพๆ็คบ๏ผ$C_{i}$ ไปฃ่กจ็ฌฌ $i\ (i = 1, 2, 3, \dots, m)$ ไธช้ๆบ้ๅ็ Clifford ้จใ็ๆณๆ
ๅตไธ๏ผๅณๆฒกๆไปปไฝๅชๅฃฐ็ๅฝฑๅ๏ผๅ่ฎพ้ๅญๆฏ็นๅๆไธบ $|\psi\rangle$๏ผ้ฃไน็ป่ฟ่ฏฅ้ๆบๅบๅๆต่ฏ \[2\] ๅบๅๆไฝๅ้ๅญๆฏ็น็ๆซๆไธๅฎไธๅๆ็ธ็ญ๏ผๅณไปฅ 100% ็ๆฆ็ไป็ถ็ปดๆไธบๆ $|\psi\rangle$๏ผๆไปฌไปฅๆซๆๅๅๆ็ธๅ็ๆฆ็ไฝไธบ้ๆบๅบๅๆต่ฏๅบๅไฟ็ๅบฆ็ๅบฆ้ใ็ถ่ๅจ็ฐๅฎไธญ๏ผ่ฏฅๅบๅไฟ็ๅบฆไผๅ ไธบ้็ๅบๅ้ฟๅบฆๅขๅ ๆ็งฏ็ดฏ็ๅชๅฃฐ็ๅขๅ ่ๆๆฐๅผ่กฐๅใๅฆๆๅ่ฎพๅชๅฃฐไธ้จๅๆถ้ดๆ ๅ
ณ๏ผๅณๅชๅฃฐ็ๅๅธไธ้้จๅๆถ้ด็ๅๅ่ๅๅ๏ผๅๅฏไปฅ้่ฟไธๅผๅฏน่ฏฅ่กฐๅๆฒ็บฟ่ฟ่กๆๅ๏ผ
$$
\mathcal{F}^{(0)}=Ap_{\rm basic}^m+B,
$$
ๅ
ถไธญ $m$ ๆฏๆๆฝๅ ็ Clifford ้จๆฐ้ใๅ
ณไบ้ๆบๅบๅๆต่ฏๆด็ป่็ๅบ็ก็ฅ่ฏๅ็ธๅ
ณ็่ฎบ๏ผ่ฏป่
ๅฏไปฅๅ้
\[3\]ใ
ๅฆไธๆๆ่ฟฐ๏ผ้ๆบๅบๅๆต่ฏ็ไธไธชไผ็นๆฏๅ
ถ่ฝๅคๆ้คๅถๅคๅๆต้็้่ฏฏ๏ผๅณๅฐ่ฟ็ง้่ฏฏๅ
ๅซ่ฟไธๅผไธญ็ๅๆฐ $A$ ๅ $B$ ไธญ่ไธไผๅฝฑๅๆฒ็บฟ่กฐๅๅๆฐ $p$ใๆดๅ
ทไฝๅฐ๏ผๅฆๆๅฐๅๅงๆถๅป็ๅฏๅบฆ็ฎ็ฌฆ $\rho$ ๅๆต้็ฎ็ฌฆ $\hat{E}$ ไปฅๆณกๅฉ็ฎ็ฌฆ $\hat{P}_i$ ่กจ็คบ๏ผ
$$
\rho=\sum_jx_j\hat{P}_i/d,
$$
$$
\hat{E}=\sum_j\tilde{e}_j\hat{P}_j,
$$
้ฃไนๅๆฐ $A = \sum_{j\neq 0}\tilde{e}_jx_j$, $B = \tilde{e}_0$๏ผๅ
ถไธญ $d\equiv{2^n}$๏ผ$n$ ๆฏ้ๅญๆฏ็นๆฐ็ฎใๅฝๆไปฌๆๅๅฐๅฏนๆฒ็บฟ่ฟ่กๆๅๅนถๅพๅฐๅๆฐ $p_{basic}$๏ผไพฟ่ฝๅค่ฟไธๆญฅ้่ฟไธๅผ่ทๅพ่ฏฅ้ๅญ็กฌไปถไธ Clifford ้จ็ๅนณๅ้่ฏฏ็ EPC(Error-rate Per Clifford)๏ผ
$$
{\rm EPC}=\frac{(1-p_{\rm basic})(d-1)}{d}.
$$
**ไบคๆๅผ้ๆบๅบๅๆต่ฏ**
ไบคๆๅผ้ๆบๅบๅๆต่ฏ็จไบๅบๅๆต่ฏๆไธ็นๅฎ้ๅญ้จ็ๅนณๅ้่ฏฏ็ใๅฝๆไปฌๆๅๅฎ็ฐไธ่ฟฐๅบๆฌ้ๆบๅบๅๆต่ฏๅพๅฐๅบๅไฟ็ๅบฆ่กฐๅๆฒ็บฟๅ๏ผๅฏไปฅๅฐๅ
ถไฝไธบๅ่ๆฒ็บฟ๏ผๅนถไธไบคๆๅผ้ๆบๅบๅๆต่ฏๅพๅฐ็่กฐๅๆฒ็บฟ่ฟ่กๅฏนๆฏๅพๅฐๆไธไธชๅ
ทไฝ้ๅญ้จ็ๅนณๅ้่ฏฏ็ใๆไปฌ้ๆบๅฐ้ๅไธ็ณปๅ Clifford ้จ๏ผๅนถๅฐๆณ่ฆๅบๅๆต่ฏ็็ฎๆ ้จๆๅ
ฅๆฏไธไธช Clifford ้จไนๅ๏ผ็ถๅ่ฎพ่ฎกๆๅไธไธช้จไฝฟๅพ็ๆณๆดไฝๆไฝๅๆ ทๅฐไธบๅฝขๅฆๅไฝ็ฉ้ต็้
ๅๆขใไธๅพๆ็คบ็ไบคๆๅผ้ๆบๅบๅๆต่ฏๅบๅไปฅ Hadamard ้จ๏ผH ้จ๏ผไฝไธบ็ฎๆ ๆต่ฏ้จ๏ผ

ๅนถไฝฟ็จไธๅผๅฏนๅบๅไฟ็ๅบฆ่กฐๅๆฒ็บฟ่ฟ่กๆๅ๏ผ
$$
\mathcal{F}^{(0)\prime}=A^{\prime}p_{\rm gate}^m+B^{\prime}.
$$
ๆๅ๏ผ้่ฟไธๅบๆฌ้ๆบๅบๅๆต่ฏๆๅพๆฒ็บฟ่ฟ่กๆฏ่พ่ฎก็ฎ่ทๅพๅนณๅ้จ้่ฏฏ็ EPG(Error-rate Per Gate):
$$
r_{\rm gate}=\frac{(1-p_{\rm gate}/p_{\rm ref})(d-1)}{d}.
$$
่ฟไธชไป็ๅฎๅฎ้ชๆฐๆฎไธญ่ทๅ็ๅนณๅ้่ฏฏ็ $r$ ่ฝๅค็จๆฅ่กจๅพ้ๅญ้จ็่กจ็ฐๅฅฝๅใ
ไธ้ขๅฐไป็ปๅฆไฝไฝฟ็จ้่ๅฏน้ๅญ็กฌไปถไธญๆไธไธช้ๅญๆฏ็น่ฟ่ก้ๆบๅบๅๆต่ฏใ
## ๅๅคๅทฅไฝ
้ฆๅ
ๆไปฌ้่ฆ่ฐ็จๅฟ
่ฆ็ๅ
๏ผๅนถ้่ฟ่พๅ
ฅ token ๆฅๅ
ฅไบ็ซฏๆๅก:
```
# Import the necessary packages
from Quanlse.Utils.RandomizedBenchmarking import RB
from Quanlse.Utils.Functions import basis, tensor
from Quanlse.QOperation import FixedGate
from Quanlse.Simulator import PulseModel
from Quanlse.Scheduler.Superconduct import SchedulerSuperconduct
from Quanlse.Scheduler.Superconduct.GeneratorRBPulse import SingleQubitCliffordPulseGenerator
from math import pi
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
# Import Define class and set the token
# Please visit http://quantum-hub.baidu.com
from Quanlse import Define
Define.hubToken = ''
```
## ๅฎไน่ๆ้ๅญ็กฌไปถๆจกๆๅจ
ไธบไบๅฎๆ้ๆบๅบๅๆต่ฏ๏ผ้่ฆๅฎไนไธไธชๅซๅช็่ๆ้ๅญ็กฌไปถไฝไธบๆไปฌ็็กฌไปถๅนณๅฐ๏ผๅนถ้ๆฉ้่ฆๅบๅๆต่ฏ็้ๅญๆฏ็นๅ็ฎๆ ้จใ
้่ๆฏๆ็จๆท่ชๅฎไนๅคๆฏ็นๅซๅชๆจกๆๅจ๏ผๆดๅค็ป่ๅฏๅ็
ง[ๅคๆฏ็นๅซๅชๆจกๆๅจ](https://quanlse.baidu.com/#/doc/tutorial-multi-qubit-noisy-simulator)ใ่ฟ้๏ผๆไปฌไฝฟ็จ้่ๅฎไนไธไธชไธคๆฏ็นๅซๅช้ๅญ่ๆ็กฌไปถ๏ผๆฏไธไธช้ๅญๆฏ็น้ฝๆฏไธ่ฝ็บงไบบ้ ๅๅญ็ณป็ป๏ผๅนถ่กจๅพ H ้จไฝ็จๅจ็ฌฌไธไธช้ๅญๆฏ็นไธ็่กจ็ฐ๏ผ
```
# Define the basic parameters of the simulator
sysLevel = 3 # The number of energy levels of each qubit
qubitNum = 2 # The number of qubits simulator has
# Qubit frequency & anharmonicity
wq0 = 5.033 * (2 * pi) # The frequency for qubit 0, in 2 pi GHz
wq1 = 5.292 * (2 * pi) # The frequency for qubit 1, in 2 pi GHz
anharm0 = - 0.37612 * (2 * pi) # The anharmonicity for qubit 0, in 2 pi GHz
anharm1 = - 0.32974 * (2 * pi) # The anharmonicity for qubit 1, in 2 pi GHz
qubitFreq = {0: wq0, 1: wq1}
qubitAnharm = {0: anharm0, 1: anharm1}
# Coupling map between qubits
g01 = 0.002 * (2 * pi)
couplingMap = {(0, 1): g01}
# Taking T1 & T2 dissipation into consideration, in the unit of nanosecond
t1List = {0: 70270, 1: 59560}
t2List = {0: 43150, 1: 23790}
# Sampling time
dt = 1.
# Build a virtual QPU
model = PulseModel(subSysNum=qubitNum,
sysLevel=sysLevel,
couplingMap=couplingMap,
qubitFreq=qubitFreq,
dt=dt,
qubitAnharm=qubitAnharm,
T1=t1List, T2=t2List,
ampSigma=0.0001)
ham = model.createQHamiltonian()
# The initial state of this simulator
initialState = tensor(basis(3, 0), basis(3, 0))
# Decide the qubit we want to benchmark
targetQubitNum = 0
hamTarget = ham.subSystem(targetQubitNum)
# Decide one specific gate we want to benchmark
targetGate = FixedGate.H
```
ไธ่ฟฐๅฎไนๅฎๆๅ๏ผไพฟๅๅฅฝไบ่ฟ่ก้ๆบๅบๅๆต่ฏ็ๅๅคใ
็ฑไบๅ็ปญไผๆถๅๅฐๅคง้่ๅฒๅบๅ๏ผๆไปฌ้่ฆๅฎไพๅไธไธช้่่ถ
ๅฏผ่ฐๅบฆๅจ `SchedulerSuperconduct()` ็จๆฅๅฏน่ๅฒ่ฟ่ก่ชๅฎไนๆๅธ๏ผ
```
sche = SchedulerSuperconduct(dt=dt, ham=hamTarget, generator=SingleQubitCliffordPulseGenerator(hamTarget))
```
## ๅฎๆฝ้ๆบๅบๅๆต่ฏ
่ฐ็จ `RB` ๆจกๅ๏ผ้่ฆไผ ๅ
ฅไธไบๅฟ
่ฆ็ๅๆฐ่ฟ่ก้ๆบๅบๅๆต่ฏ๏ผ่พๅ
ฅๅๆฐๅ
ๆฌ๏ผๆไปฌๆๅฎไน็้ๅญ็กฌไปถ `model` ไธ่ฏฅ็กฌไปถไธ้ๅญๆฏ็น็ๅๆ `initialState`๏ผๆ้่ฆๅบๅๆต่ฏ็้ๅญๆฏ็น็ดขๅผ `targetQubitNum`๏ผไธๅ Clifford ้จไธชๆฐๅ่กจ `size`๏ผๆฏไธไธช Clifford ้จไธชๆฐ $m$ ๆ้ๆบ็ๆ็็ธๅ้ฟๅบฆ็ๅบๅไธชๆฐ `width`๏ผๆไฝฟ็จ็่ฐๅบฆๅจ `sche` ๅ้ๆ ท็ `dt`ใๅฆๆ้่ฆไฝฟ็จไบคๆๅผ้ๆบๅบๅๆต่ฏ๏ผๅ้่ฆๅฆๅคๅฐ่พๅ
ฅ `interleaved=True` ไปฅๅๅบๅๆต่ฏ็ฎๆ ้จ `targetGate`๏ผๅฆๆ้่ฆๆจกๆๅผๆพ็ณป็ป็ๆผๅ๏ผๅ่ฟ้่ฆ่ฎพ็ฝฎ `isOpen=True`๏ผ
```
# Create a list to store the outcome
sizeSequenceFidelityBasic = []
sizeSequenceFidelityInterleaved = []
# Core parameters of an RB experiment
size = [1, 10, 20, 50, 75, 100, 125, 150, 175, 200]
width = 10
# Start RB experiment. First get a basicRB curve used for reference. Then implement the interleavedRB to benchmark our Hadamard gate
for i in size:
widthSequenceFidelityBasic = RB(model=model, targetQubitNum=targetQubitNum, initialState=initialState, size=i, width=width, sche=sche,
dt=dt, interleaved=False, isOpen=True)
sizeSequenceFidelityBasic.append(widthSequenceFidelityBasic)
print(sizeSequenceFidelityBasic)
for j in size:
widthSequenceFidelityInterleaved = RB(model=model, targetQubitNum=targetQubitNum, initialState=initialState, size=j, width=width,
targetGate=targetGate, sche=sche, dt=dt, interleaved=True, isOpen=True)
sizeSequenceFidelityInterleaved.append(widthSequenceFidelityInterleaved)
print(sizeSequenceFidelityInterleaved)
```
ๅฝๆไปฌๆๅ่ฟ่กไธ่ฟฐไธค็ง้ๆบๅบๅๆต่ฏๆนๆณๅนถ่ทๅพๅคง้ๅฎ้ชๆฐๆฎ็ปๅถๆ่กฐๅๆฒ็บฟๅ๏ผไพฟ่ฝๅค้่ฟไธ้ขๆฒ็บฟๆๅ็ๆนๆณ่ทๅพ EPC ๅ EPG๏ผ
```
# Define the fitting function
def fit(x, a, p, b):
"""
Define the fitting curve
"""
return a * (p ** x) + b
# Define the function of calculating the EPG(Error-rate Per Gate) with p_{gate} and p_{ref}
def targetGateErrorRate(pGate, pRef, dimension):
"""
Calculate the specific gate error rate
"""
return ((1 - (pGate / pRef)) * (dimension - 1)) / dimension
# Get the EPC(Error-rate Per Clifford) and p_{ref}
fitparaBasic, fitcovBasic = curve_fit(fit, size, sizeSequenceFidelityBasic, p0=[0.5, 1, 0.5], maxfev=500000,
bounds=[0, 1])
pfitBasic = fitparaBasic[1]
rClifford = (1 - pfitBasic) / 2
print('EPC =', rClifford)
# Get the parameter p_{gate}
fitparaInterleaved, fitcovInterleaved = curve_fit(fit, size, sizeSequenceFidelityInterleaved,
p0=[fitparaBasic[0], 1, fitparaBasic[2]], maxfev=500000,
bounds=[0, 1])
pfitInterleaved = fitparaInterleaved[1]
yfitBasic = fitparaBasic[0] * (pfitBasic ** size) + fitparaBasic[2]
yfitInterleaved = fitparaInterleaved[0] * (pfitInterleaved ** size) + fitparaInterleaved[2]
EPG = targetGateErrorRate(pfitInterleaved, pfitBasic, dimension=2)
print('EPG =', EPG)
```
ๅนถๅๆถ็ปๅถๅฎ้ชๆฐๆฎๅๆๅๆฒ็บฟๆฅๅฏ่งๅ่ฏฅ่กฐๅ็ฐ่ฑก:
```
# Plot the decay curve of our RB experiment
plt.figure(figsize=(18, 6), dpi=80)
plt.figure(1)
ax1 = plt.subplot(121)
ax1.plot(size, sizeSequenceFidelityBasic, '.b', label='experiment simulation data')
ax1.plot(size, yfitBasic, 'r', label='fitting curve')
plt.xlabel('$m$')
plt.ylabel('Sequence Fidelity')
plt.title('basic RB using Quanlse')
plt.legend()
ax2 = plt.subplot(122)
ax2.plot(size, sizeSequenceFidelityInterleaved, '.b', label='experiment simulation data')
ax2.plot(size, yfitInterleaved, 'r', label='fitting curve')
plt.xlabel('$m$')
plt.ylabel('Sequence Fidelity')
plt.title('interleaved RB using Quanlse')
plt.legend()
plt.show()
```
ๅ
ถไธญ๏ผ$m$ ไปฃ่กจๅบๅไธญ Clifford ้จ็ไธชๆฐใๅฏไปฅ็ๅบ๏ผ้่ฟๆฌๆนๆก๏ผๆไปฌๅฏไปฅ่ชๅจ็ๆ้้
็ฎๆ ้ๅญ็กฌไปถ็้จๆไฝ็้ซ็ฒพๅบฆ่ๅฒ๏ผๅจ่ๅฒๆฐ้ๆพ่ๅขๅ ๆถๅฏนๅ
ถ่ฟ่ก่ๅฒ่ฐๅบฆ๏ผๅนถ่ฟไธๆญฅๅฏน้ๅญ็กฌไปถ่ฟ่ก้ๆบๅบๅๆต่ฏๅฎ้ชๆฅ่ทๅพไธๅพๆ็คบ็่กฐๅๆฒ็บฟ๏ผๆฒ็บฟๅๆ ไบ้็้จ็ๆฐ้๏ผ่ๅฒๆฐ๏ผๅขๅ ่็ดฏ็งฏ็ๅชๅฃฐๅฏผ่ดๅบๅไฟ็ๅบฆๆๆฐ่กฐๅ่ฟไธ็ฐ่ฑกใ
## ๆป็ป
ๆฌๆ็จๆ่ฟฐไบๅฆไฝไฝฟ็จ้่ๅฏน้ๅญ็กฌไปถ่ฟ่ก้ๆบๅบๅๆต่ฏๆฅ่กจๅพๆไธไธช้จ็ๅนณๅ้่ฏฏ็ใ็จๆทๅฏไปฅ้่ฟ้พๆฅ [tutorial-randomized-benchmarking.ipynb](https://github.com/baidu/Quanlse/blob/main/Tutorial/CN/tutorial-randomized-benchmarking-cn.ipynb) ่ทณ่ฝฌๅฐ็ธๅบ็ GitHub ้กต้ข่ทๅ็ธๅ
ณไปฃ็ ใๆไปฌๆจ่็จๆทไฝฟ็จไธๅไบๆฌๆ็จ็ๅๆฐๆฅ่ทๅพๆดๅฅฝ็ๆฒ็บฟๆๅๆๆ๏ผๅนถๅผๅๆดไธบๅๆฒฟ็้ๆบๅบๅๆต่ฏๅ็งๆนๆณใ
## ๅ่ๆ็ฎ
\[1\] [Kelly, Julian, et al. "Optimal quantum control using randomized benchmarking." *Physical review letters* 112.24 (2014): 240504.](https://doi.org/10.1103/PhysRevLett.112.240504)
\[2\] [Magesan, Easwar, et al. "Efficient measurement of quantum gate error by interleaved randomized benchmarking." *Physical review letters* 109.8 (2012): 080505.](https://doi.org/10.1103/PhysRevLett.109.080505)
\[3\] [Magesan, Easwar, Jay M. Gambetta, and Joseph Emerson. "Scalable and robust randomized benchmarking of quantum processes." *Physical review letters* 106.18 (2011): 180504.](https://doi.org/10.1103/PhysRevLett.106.180504)
|
github_jupyter
|
# Import the necessary packages
from Quanlse.Utils.RandomizedBenchmarking import RB
from Quanlse.Utils.Functions import basis, tensor
from Quanlse.QOperation import FixedGate
from Quanlse.Simulator import PulseModel
from Quanlse.Scheduler.Superconduct import SchedulerSuperconduct
from Quanlse.Scheduler.Superconduct.GeneratorRBPulse import SingleQubitCliffordPulseGenerator
from math import pi
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
# Import Define class and set the token
# Please visit http://quantum-hub.baidu.com
from Quanlse import Define
Define.hubToken = ''
# Define the basic parameters of the simulator
sysLevel = 3 # The number of energy levels of each qubit
qubitNum = 2 # The number of qubits simulator has
# Qubit frequency & anharmonicity
wq0 = 5.033 * (2 * pi) # The frequency for qubit 0, in 2 pi GHz
wq1 = 5.292 * (2 * pi) # The frequency for qubit 1, in 2 pi GHz
anharm0 = - 0.37612 * (2 * pi) # The anharmonicity for qubit 0, in 2 pi GHz
anharm1 = - 0.32974 * (2 * pi) # The anharmonicity for qubit 1, in 2 pi GHz
qubitFreq = {0: wq0, 1: wq1}
qubitAnharm = {0: anharm0, 1: anharm1}
# Coupling map between qubits
g01 = 0.002 * (2 * pi)
couplingMap = {(0, 1): g01}
# Taking T1 & T2 dissipation into consideration, in the unit of nanosecond
t1List = {0: 70270, 1: 59560}
t2List = {0: 43150, 1: 23790}
# Sampling time
dt = 1.
# Build a virtual QPU
model = PulseModel(subSysNum=qubitNum,
sysLevel=sysLevel,
couplingMap=couplingMap,
qubitFreq=qubitFreq,
dt=dt,
qubitAnharm=qubitAnharm,
T1=t1List, T2=t2List,
ampSigma=0.0001)
ham = model.createQHamiltonian()
# The initial state of this simulator
initialState = tensor(basis(3, 0), basis(3, 0))
# Decide the qubit we want to benchmark
targetQubitNum = 0
hamTarget = ham.subSystem(targetQubitNum)
# Decide one specific gate we want to benchmark
targetGate = FixedGate.H
sche = SchedulerSuperconduct(dt=dt, ham=hamTarget, generator=SingleQubitCliffordPulseGenerator(hamTarget))
# Create a list to store the outcome
sizeSequenceFidelityBasic = []
sizeSequenceFidelityInterleaved = []
# Core parameters of an RB experiment
size = [1, 10, 20, 50, 75, 100, 125, 150, 175, 200]
width = 10
# Start RB experiment. First get a basicRB curve used for reference. Then implement the interleavedRB to benchmark our Hadamard gate
for i in size:
widthSequenceFidelityBasic = RB(model=model, targetQubitNum=targetQubitNum, initialState=initialState, size=i, width=width, sche=sche,
dt=dt, interleaved=False, isOpen=True)
sizeSequenceFidelityBasic.append(widthSequenceFidelityBasic)
print(sizeSequenceFidelityBasic)
for j in size:
widthSequenceFidelityInterleaved = RB(model=model, targetQubitNum=targetQubitNum, initialState=initialState, size=j, width=width,
targetGate=targetGate, sche=sche, dt=dt, interleaved=True, isOpen=True)
sizeSequenceFidelityInterleaved.append(widthSequenceFidelityInterleaved)
print(sizeSequenceFidelityInterleaved)
# Define the fitting function
def fit(x, a, p, b):
"""
Define the fitting curve
"""
return a * (p ** x) + b
# Define the function of calculating the EPG(Error-rate Per Gate) with p_{gate} and p_{ref}
def targetGateErrorRate(pGate, pRef, dimension):
"""
Calculate the specific gate error rate
"""
return ((1 - (pGate / pRef)) * (dimension - 1)) / dimension
# Get the EPC(Error-rate Per Clifford) and p_{ref}
fitparaBasic, fitcovBasic = curve_fit(fit, size, sizeSequenceFidelityBasic, p0=[0.5, 1, 0.5], maxfev=500000,
bounds=[0, 1])
pfitBasic = fitparaBasic[1]
rClifford = (1 - pfitBasic) / 2
print('EPC =', rClifford)
# Get the parameter p_{gate}
fitparaInterleaved, fitcovInterleaved = curve_fit(fit, size, sizeSequenceFidelityInterleaved,
p0=[fitparaBasic[0], 1, fitparaBasic[2]], maxfev=500000,
bounds=[0, 1])
pfitInterleaved = fitparaInterleaved[1]
yfitBasic = fitparaBasic[0] * (pfitBasic ** size) + fitparaBasic[2]
yfitInterleaved = fitparaInterleaved[0] * (pfitInterleaved ** size) + fitparaInterleaved[2]
EPG = targetGateErrorRate(pfitInterleaved, pfitBasic, dimension=2)
print('EPG =', EPG)
# Plot the decay curve of our RB experiment
plt.figure(figsize=(18, 6), dpi=80)
plt.figure(1)
ax1 = plt.subplot(121)
ax1.plot(size, sizeSequenceFidelityBasic, '.b', label='experiment simulation data')
ax1.plot(size, yfitBasic, 'r', label='fitting curve')
plt.xlabel('$m$')
plt.ylabel('Sequence Fidelity')
plt.title('basic RB using Quanlse')
plt.legend()
ax2 = plt.subplot(122)
ax2.plot(size, sizeSequenceFidelityInterleaved, '.b', label='experiment simulation data')
ax2.plot(size, yfitInterleaved, 'r', label='fitting curve')
plt.xlabel('$m$')
plt.ylabel('Sequence Fidelity')
plt.title('interleaved RB using Quanlse')
plt.legend()
plt.show()
| 0.822546 | 0.856872 |
# Evaluation on fixed-metre poetry
This Notebook contains the evaluation metrics for [`Rantanplan`](https://pypi.org/project/rantanplan/0.4.3/) v0.4.3
```
from datetime import datetime
print(f"Last run: {datetime.utcnow().strftime('%B %d %Y - %H:%M:%S')}")
```
## Setup
Installing dependencies and downloading necessary corpora using [`Averell`](https://pypi.org/project/averell/).
```
!pip install -q pandas numpy "spacy<2.3.0" spacy_affixes
!pip install -q --no-cache https://github.com/linhd-postdata/averell/archive/803685bd7e00cc7def6837f9843ab560085e8fca.zip
%%bash --out _
# pip install https://github.com/explosion/spacy-models/archive/es_core_news_md-2.2.5.zip
python -m spacy download es_core_news_md
python -m spacy_affixes download es
!pip install -q "rantanplan==0.4.3"
!averell list
%%bash
averell download 3 4 > /dev/null 2>&1
averell export 3 --granularity line
mv corpora/line.json sonnets.json
averell export 4 --granularity line
mv corpora/line.json adso.json
du -h *.json
```
Defining helper functions
```
import json
import math
import re
from io import StringIO
import numpy as np
import pandas as pd
def clean_text(string):
output = string.strip()
# replacements = (("โ", '"'), ("โ", '"'), ("//", ""), ("ยซ", '"'), ("ยป",'"'))
replacements = (("โ", ''), ("โ", ''), ("//", ""), ("ยซ", ''), ("ยป",''))
for replacement in replacements:
output = output.replace(*replacement)
output = re.sub(r'(?is)\s+', ' ', output)
output = re.sub(r"(\w)-(\w)", r"\1\2", output) # "Villa-nueva" breaks Navarro-Colorado's system
return output
adso = pd.DataFrame.from_records(
json.load(open("adso.json"))
)[["line_text", "metrical_pattern"]].reset_index(drop=True)
adso.line_text = adso.line_text.apply(clean_text)
adso
sonnets = pd.DataFrame.from_records(
json.load(open("sonnets.json"))
).query("manually_checked == True")[["line_text", "metrical_pattern"]].reset_index(drop=True)
sonnets.line_text = sonnets.line_text.apply(clean_text)
sonnets
```
Importing `Rantanplan` main functions and warming up the cache.
```
from rantanplan.rhymes import analyze_rhyme
from rantanplan import get_scansion
%%time
get_scansion("Prueba")
pass
```
## Navarro-Colorado
Preparing corpora and measuring running times for Navarro-Colorado scansion system.
```
!mkdir -p sonnets
!mkdir -p adso
!mkdir -p outputs
with open("adso/adso.txt", "w") as file:
file.write("\n".join(adso["line_text"].values))
with open("sonnets/sonnets.txt", "w") as file:
file.write("\n".join(sonnets["line_text"].values))
```
We built and pushed a Docker image with Navarro-Colorado scansion system. The execution of the next cells will take a very long time and will produce a very verbose output in the process which we will omit. Alternatively, the files `./data/navarro_colorado_adso.xml` and `./data/navarro_colorado_sonnets.xml` contain the output of the last run.
```
%%bash --err adso_timing --out adso_output
time -p docker run -v $(pwd)/adso:/adso/data_in -v $(pwd)/outputs:/adso/data_out linhdpostdata/adso
cp outputs/adso.xml data/navarro_colorado_adso.xml
navarro_colorado_adso_times = dict(pair.split(" ") for pair in adso_timing.strip().split("\n")[-3:])
%%bash --err sonnets_timing --out sonnets_output
time -p docker run -v $(pwd)/sonnets:/adso/data_in -v $(pwd)/outputs:/adso/data_out linhdpostdata/adso
cp outputs/sonnets.xml data/navarro_colorado_sonnets.xml
navarro_colorado_sonnets_times = dict(pair.split(" ") for pair in sonnets_timing.strip().split("\n")[-3:])
```
Loading the outputs of the ADSO System into Pandas `DataFrame`'s
```
from glob import glob
from xml.etree import ElementTree
def load_tei(filename):
lines = []
with open(filename, "r") as xml:
contents = xml.read()
tree = ElementTree.fromstring(contents)
tags = tree.findall(".//{http://www.tei-c.org/ns/1.0}l")
for tag in tags:
text = clean_text(tag.text)
lines.append((text, tag.attrib['met']))
return pd.DataFrame(lines, columns=["line_text", "metrical_pattern"])
navarro_colorado_adso = load_tei("outputs/adso.xml")
navarro_colorado_adso
navarro_colorado_sonnets = load_tei("outputs/sonnets.xml")
navarro_colorado_sonnets
```
### Accuracy on ADSO
```
correct = sum(navarro_colorado_adso.metrical_pattern == adso.metrical_pattern)
accuracy_navarro_colorado_adso = correct / adso.metrical_pattern.size
print(f"Navarro-Colorado on ADSO: {accuracy_navarro_colorado_adso:.4f} ({navarro_colorado_adso_times['real']}s)")
```
### Accuracy on Sonnets
```
correct = sum(navarro_colorado_sonnets.metrical_pattern == sonnets.metrical_pattern)
accuracy_navarro_colorado_sonnets = correct / sonnets.metrical_pattern.size
print(f"Navarro-Colorado on Sonnets: {accuracy_navarro_colorado_sonnets:.4f} ({navarro_colorado_sonnets_times['real']}s)")
```
---
## Gervรกs
Gervรกs was kind enough to run its system against the ADSO corpus and sending us the results for evaluation. We are including the raw results and the transformations functions we used to evaluate its performance.
```
with open("data/gervas_adso.txt", "r") as file:
lines = file.read().split("\n")
gervas = pd.DataFrame.from_records(
[(lines[index-1], *lines[index].split(" ")) for index, line in enumerate(lines) if index % 2 != 0]
).drop([1, 6, 9, 10], axis=1).rename(columns={
0: "line_text",
2: "stress",
3: "indexed_metrical_pattern",
4: "length",
5: "metrical_yype",
7: "consonant_ending",
8: "asonant_ending",
})
def indexed2binary(df):
binary = ["-" for i in range(int(df["length"]))]
for pos in df["indexed_metrical_pattern"].split("'"):
binary[int(pos) - 1] = "+"
return "".join(binary)
gervas["metrical_pattern"] = gervas.apply(indexed2binary, axis=1)
gervas["line_text"] = gervas.line_text.apply(clean_text)
```
Calculating overlap of verses evaluated by Gervรกs and those in ADSO
```
overlap_adso = list(set(gervas.line_text.tolist()) & set(adso.line_text.drop_duplicates().tolist()))
print(f"{len(overlap_adso)} lines from ADSO")
overlap_sonnets = list(set(gervas.line_text.tolist()) & set(sonnets.line_text.drop_duplicates().tolist()))
print(f"{len(overlap_sonnets)} lines from Sonnets")
```
### Accuracy on ADSO
```
gervas_metrical_patterns = (gervas[gervas.line_text.isin(overlap_adso)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
adso_metrical_patterns = (adso[adso.line_text.isin(overlap_adso)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
accuracy_gervas_adso = sum(gervas_metrical_patterns == adso_metrical_patterns) / len(adso_metrical_patterns)
print(f"Gervรกs on {len(overlap_adso)} ADSO lines: {accuracy_gervas_adso:.4f}")
```
Despite the difference in the number of lines, this value is way lower than the originally reported by Gervรกs (0.8873).
### Accuracy on Sonnets
```
gervas_metrical_patterns = (gervas[gervas.line_text.isin(overlap_sonnets)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
sonnets_metrical_patterns = (sonnets[sonnets.line_text.isin(overlap_sonnets)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
accuracy_gervas_sonnets = sum(gervas_metrical_patterns == sonnets_metrical_patterns) / len(sonnets_metrical_patterns)
print(f"Gervรกs on {len(overlap_sonnets)} Sonnets lines: {accuracy_gervas_sonnets:.4f}")
```
---
## Rantanplan
Importing libraries. We will disable cache so subsequent calls while timing execution doesn't get affeted.
```
import rantanplan.pipeline
from rantanplan import get_scansion
```
Measuring running times for Rantanplan on ADSO
```
adso_text = "\n".join(adso.line_text.values)
adso_lengths = [11] * adso.line_text.size
# disabling cache
rantanplan_adso_times = %timeit -o rantanplan.pipeline._load_pipeline = {}; get_scansion(adso_text, rhythmical_lengths=adso_lengths)
rantanplan_adso = get_scansion(adso_text, rhythmical_lengths=adso_lengths)
rantanplan_adso_stress = [line["rhythm"]["stress"] for line in rantanplan_adso]
```
Measuring running times for Rantanplan on Sonnets
```
sonnets_text = "\n".join(sonnets.line_text.values)
sonnets_lengths = [11] * sonnets.line_text.size
# disabling cache
rantanplan_sonnets_times = %timeit -o rantanplan.pipeline._load_pipeline = {}; rantanplan_sonnets = get_scansion(sonnets_text, rhythmical_lengths=sonnets_lengths)
rantanplan_sonnets = get_scansion(sonnets_text, rhythmical_lengths=sonnets_lengths)
rantanplan_sonnets_stress = [line["rhythm"]["stress"] for line in rantanplan_sonnets]
```
### Accuracy on ADSO
```
rantanplan_adso_stress = [line["rhythm"]["stress"] for line in rantanplan_adso]
accuracy_rantanplan_adso = sum(rantanplan_adso_stress == adso.metrical_pattern) / adso.metrical_pattern.size
print(f"Rantanplan on ADSO: {accuracy_rantanplan_adso:.4f} ({rantanplan_adso_times.average:.4f}s)")
```
### Accuracy on Sonnets
```
rantanplan_sonnets_stress = [line["rhythm"]["stress"] for line in rantanplan_sonnets]
accuracy_rantanplan_sonnets = sum(rantanplan_sonnets_stress == sonnets.metrical_pattern) / sonnets.metrical_pattern.size
print(f"Rantanplan on Sonnets: {accuracy_rantanplan_sonnets:.4f} ({rantanplan_sonnets_times.average:.4f}s)")
```
---
# Results
```
from IPython.display import display, HTML
```
## ADSO
```
display(HTML(
pd.DataFrame([
["Gervรกs", accuracy_gervas_adso, "N/A"],
["Navarro-Colorado", accuracy_navarro_colorado_adso, float(navarro_colorado_adso_times["real"])],
["Rantanplan", accuracy_rantanplan_adso, rantanplan_adso_times.average]
], columns=["Model", "Accuracy", "Time"]).to_html(index=False)
))
```
## Sonnets
```
display(HTML(
pd.DataFrame([
["Gervรกs", accuracy_gervas_sonnets, "N/A"],
["Navarro-Colorado", accuracy_navarro_colorado_sonnets, float(navarro_colorado_sonnets_times["real"])],
["Rantanplan", accuracy_rantanplan_sonnets, rantanplan_sonnets_times.average]
], columns=["Model", "Accuracy", "Time"]).to_html(index=False)
))
```
|
github_jupyter
|
from datetime import datetime
print(f"Last run: {datetime.utcnow().strftime('%B %d %Y - %H:%M:%S')}")
!pip install -q pandas numpy "spacy<2.3.0" spacy_affixes
!pip install -q --no-cache https://github.com/linhd-postdata/averell/archive/803685bd7e00cc7def6837f9843ab560085e8fca.zip
%%bash --out _
# pip install https://github.com/explosion/spacy-models/archive/es_core_news_md-2.2.5.zip
python -m spacy download es_core_news_md
python -m spacy_affixes download es
!pip install -q "rantanplan==0.4.3"
!averell list
%%bash
averell download 3 4 > /dev/null 2>&1
averell export 3 --granularity line
mv corpora/line.json sonnets.json
averell export 4 --granularity line
mv corpora/line.json adso.json
du -h *.json
import json
import math
import re
from io import StringIO
import numpy as np
import pandas as pd
def clean_text(string):
output = string.strip()
# replacements = (("โ", '"'), ("โ", '"'), ("//", ""), ("ยซ", '"'), ("ยป",'"'))
replacements = (("โ", ''), ("โ", ''), ("//", ""), ("ยซ", ''), ("ยป",''))
for replacement in replacements:
output = output.replace(*replacement)
output = re.sub(r'(?is)\s+', ' ', output)
output = re.sub(r"(\w)-(\w)", r"\1\2", output) # "Villa-nueva" breaks Navarro-Colorado's system
return output
adso = pd.DataFrame.from_records(
json.load(open("adso.json"))
)[["line_text", "metrical_pattern"]].reset_index(drop=True)
adso.line_text = adso.line_text.apply(clean_text)
adso
sonnets = pd.DataFrame.from_records(
json.load(open("sonnets.json"))
).query("manually_checked == True")[["line_text", "metrical_pattern"]].reset_index(drop=True)
sonnets.line_text = sonnets.line_text.apply(clean_text)
sonnets
from rantanplan.rhymes import analyze_rhyme
from rantanplan import get_scansion
%%time
get_scansion("Prueba")
pass
!mkdir -p sonnets
!mkdir -p adso
!mkdir -p outputs
with open("adso/adso.txt", "w") as file:
file.write("\n".join(adso["line_text"].values))
with open("sonnets/sonnets.txt", "w") as file:
file.write("\n".join(sonnets["line_text"].values))
%%bash --err adso_timing --out adso_output
time -p docker run -v $(pwd)/adso:/adso/data_in -v $(pwd)/outputs:/adso/data_out linhdpostdata/adso
cp outputs/adso.xml data/navarro_colorado_adso.xml
navarro_colorado_adso_times = dict(pair.split(" ") for pair in adso_timing.strip().split("\n")[-3:])
%%bash --err sonnets_timing --out sonnets_output
time -p docker run -v $(pwd)/sonnets:/adso/data_in -v $(pwd)/outputs:/adso/data_out linhdpostdata/adso
cp outputs/sonnets.xml data/navarro_colorado_sonnets.xml
navarro_colorado_sonnets_times = dict(pair.split(" ") for pair in sonnets_timing.strip().split("\n")[-3:])
from glob import glob
from xml.etree import ElementTree
def load_tei(filename):
lines = []
with open(filename, "r") as xml:
contents = xml.read()
tree = ElementTree.fromstring(contents)
tags = tree.findall(".//{http://www.tei-c.org/ns/1.0}l")
for tag in tags:
text = clean_text(tag.text)
lines.append((text, tag.attrib['met']))
return pd.DataFrame(lines, columns=["line_text", "metrical_pattern"])
navarro_colorado_adso = load_tei("outputs/adso.xml")
navarro_colorado_adso
navarro_colorado_sonnets = load_tei("outputs/sonnets.xml")
navarro_colorado_sonnets
correct = sum(navarro_colorado_adso.metrical_pattern == adso.metrical_pattern)
accuracy_navarro_colorado_adso = correct / adso.metrical_pattern.size
print(f"Navarro-Colorado on ADSO: {accuracy_navarro_colorado_adso:.4f} ({navarro_colorado_adso_times['real']}s)")
correct = sum(navarro_colorado_sonnets.metrical_pattern == sonnets.metrical_pattern)
accuracy_navarro_colorado_sonnets = correct / sonnets.metrical_pattern.size
print(f"Navarro-Colorado on Sonnets: {accuracy_navarro_colorado_sonnets:.4f} ({navarro_colorado_sonnets_times['real']}s)")
with open("data/gervas_adso.txt", "r") as file:
lines = file.read().split("\n")
gervas = pd.DataFrame.from_records(
[(lines[index-1], *lines[index].split(" ")) for index, line in enumerate(lines) if index % 2 != 0]
).drop([1, 6, 9, 10], axis=1).rename(columns={
0: "line_text",
2: "stress",
3: "indexed_metrical_pattern",
4: "length",
5: "metrical_yype",
7: "consonant_ending",
8: "asonant_ending",
})
def indexed2binary(df):
binary = ["-" for i in range(int(df["length"]))]
for pos in df["indexed_metrical_pattern"].split("'"):
binary[int(pos) - 1] = "+"
return "".join(binary)
gervas["metrical_pattern"] = gervas.apply(indexed2binary, axis=1)
gervas["line_text"] = gervas.line_text.apply(clean_text)
overlap_adso = list(set(gervas.line_text.tolist()) & set(adso.line_text.drop_duplicates().tolist()))
print(f"{len(overlap_adso)} lines from ADSO")
overlap_sonnets = list(set(gervas.line_text.tolist()) & set(sonnets.line_text.drop_duplicates().tolist()))
print(f"{len(overlap_sonnets)} lines from Sonnets")
gervas_metrical_patterns = (gervas[gervas.line_text.isin(overlap_adso)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
adso_metrical_patterns = (adso[adso.line_text.isin(overlap_adso)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
accuracy_gervas_adso = sum(gervas_metrical_patterns == adso_metrical_patterns) / len(adso_metrical_patterns)
print(f"Gervรกs on {len(overlap_adso)} ADSO lines: {accuracy_gervas_adso:.4f}")
gervas_metrical_patterns = (gervas[gervas.line_text.isin(overlap_sonnets)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
sonnets_metrical_patterns = (sonnets[sonnets.line_text.isin(overlap_sonnets)]
.drop_duplicates("line_text")
.sort_values("line_text")
.metrical_pattern
.values)
accuracy_gervas_sonnets = sum(gervas_metrical_patterns == sonnets_metrical_patterns) / len(sonnets_metrical_patterns)
print(f"Gervรกs on {len(overlap_sonnets)} Sonnets lines: {accuracy_gervas_sonnets:.4f}")
import rantanplan.pipeline
from rantanplan import get_scansion
adso_text = "\n".join(adso.line_text.values)
adso_lengths = [11] * adso.line_text.size
# disabling cache
rantanplan_adso_times = %timeit -o rantanplan.pipeline._load_pipeline = {}; get_scansion(adso_text, rhythmical_lengths=adso_lengths)
rantanplan_adso = get_scansion(adso_text, rhythmical_lengths=adso_lengths)
rantanplan_adso_stress = [line["rhythm"]["stress"] for line in rantanplan_adso]
sonnets_text = "\n".join(sonnets.line_text.values)
sonnets_lengths = [11] * sonnets.line_text.size
# disabling cache
rantanplan_sonnets_times = %timeit -o rantanplan.pipeline._load_pipeline = {}; rantanplan_sonnets = get_scansion(sonnets_text, rhythmical_lengths=sonnets_lengths)
rantanplan_sonnets = get_scansion(sonnets_text, rhythmical_lengths=sonnets_lengths)
rantanplan_sonnets_stress = [line["rhythm"]["stress"] for line in rantanplan_sonnets]
rantanplan_adso_stress = [line["rhythm"]["stress"] for line in rantanplan_adso]
accuracy_rantanplan_adso = sum(rantanplan_adso_stress == adso.metrical_pattern) / adso.metrical_pattern.size
print(f"Rantanplan on ADSO: {accuracy_rantanplan_adso:.4f} ({rantanplan_adso_times.average:.4f}s)")
rantanplan_sonnets_stress = [line["rhythm"]["stress"] for line in rantanplan_sonnets]
accuracy_rantanplan_sonnets = sum(rantanplan_sonnets_stress == sonnets.metrical_pattern) / sonnets.metrical_pattern.size
print(f"Rantanplan on Sonnets: {accuracy_rantanplan_sonnets:.4f} ({rantanplan_sonnets_times.average:.4f}s)")
from IPython.display import display, HTML
display(HTML(
pd.DataFrame([
["Gervรกs", accuracy_gervas_adso, "N/A"],
["Navarro-Colorado", accuracy_navarro_colorado_adso, float(navarro_colorado_adso_times["real"])],
["Rantanplan", accuracy_rantanplan_adso, rantanplan_adso_times.average]
], columns=["Model", "Accuracy", "Time"]).to_html(index=False)
))
display(HTML(
pd.DataFrame([
["Gervรกs", accuracy_gervas_sonnets, "N/A"],
["Navarro-Colorado", accuracy_navarro_colorado_sonnets, float(navarro_colorado_sonnets_times["real"])],
["Rantanplan", accuracy_rantanplan_sonnets, rantanplan_sonnets_times.average]
], columns=["Model", "Accuracy", "Time"]).to_html(index=False)
))
| 0.423696 | 0.821546 |
# Week 2. Data iteration
---
- Model-centric view: Take the data you have, and develop a model that does as well as possible on it, i.e, hold the data fixed and iteratively improve the code/model.
- Data centric view: The quality of the data is paramount. Use tools ot improve that data quality; this will allow multiple models to do well, i.e, hold the code fixed and iteratively improve the data.
### Data augmentation
---
- **Goal:** Create realistic examples that (i) the algorithm does poorly on, but (ii) humans (or other baseline) do well on.
- **Checklist:**
1. Does it sound realistic?
2. Is the $x \rightarrow y$ mapping clear? (e.g, can humans recognize speech?)
3. Is the algorithm currently doing poorly on it?
### Data iteration loop
---
- Add/improve Data (holding model fixed)
- Training
- Error analysis
### Can adding data hurt performance ?
---
For unstructured data problems, if:
- The model is large (low bias)
- The mapping $x \rightarrow y$ is clear (e.g, given only the input $x$, humans can make accurate predictions).
Then, **adding data rarely hurts accuracy**.
### Photo OCR counterexample
---
<img src = "https://i.gyazo.com/f2675d7ae2fbc6e0c67563a2442994ab.png" width = "500px">
### Structured data (adding features)
---
Vegetarians are frequenly recommended restaurants with only meat options.
Possible features to add?
- Is person vegetarian (based on past orders)?
- Does restaurant have vegetarian optoins (based on menu)?
<img src = "https://i.gyazo.com/2abc15c57776f8534b05535d6225d57f.png" width = "500px">
### Experiment tracking
---
What to track?
- Algorithm/code versioning
- Dataset used
- Hyperparameters
-Resuls
Tracking tools
- Text files
- Spreadsheet
- Experiment tracking system
Desirable features?
- Information needed to replicate results
- Experiment results, ideally with summary metrics/analysis
- Perhaps also: Resource monitoring, visualization, model error analysis
### From Big Data to Good Data
---
<img src = "https://i.gyazo.com/452392683a71ea3fdc28e619e2a96e8b.png" width = "500px">
### Additionaly Resources
---
Week 2: Select and Train Model
If you wish to dive more deeply into the topics covered this week, feel free to check out these optional references. You wonโt have to read these to complete this weekโs practice quizzes.
Establishing a baseline
Error analysis
Experiment tracking
Papers
Brundage, M., Avin, S., Wang, J., Belfield, H., Krueger, G., Hadfield, G., โฆ Anderljung, M. (n.d.). Toward trustworthy AI development: Mechanisms for supporting verifiable claimsโ. Retrieved May 7, 2021http://arxiv.org/abs/2004.07213v2
Nakkiran, P., Kaplun, G., Bansal, Y., Yang, T., Barak, B., & Sutskever, I. (2019). Deep double descent: Where bigger models and more data hurt. Retrieved from http://arxiv.org/abs/1912.02292
|
github_jupyter
|
# Week 2. Data iteration
---
- Model-centric view: Take the data you have, and develop a model that does as well as possible on it, i.e, hold the data fixed and iteratively improve the code/model.
- Data centric view: The quality of the data is paramount. Use tools ot improve that data quality; this will allow multiple models to do well, i.e, hold the code fixed and iteratively improve the data.
### Data augmentation
---
- **Goal:** Create realistic examples that (i) the algorithm does poorly on, but (ii) humans (or other baseline) do well on.
- **Checklist:**
1. Does it sound realistic?
2. Is the $x \rightarrow y$ mapping clear? (e.g, can humans recognize speech?)
3. Is the algorithm currently doing poorly on it?
### Data iteration loop
---
- Add/improve Data (holding model fixed)
- Training
- Error analysis
### Can adding data hurt performance ?
---
For unstructured data problems, if:
- The model is large (low bias)
- The mapping $x \rightarrow y$ is clear (e.g, given only the input $x$, humans can make accurate predictions).
Then, **adding data rarely hurts accuracy**.
### Photo OCR counterexample
---
<img src = "https://i.gyazo.com/f2675d7ae2fbc6e0c67563a2442994ab.png" width = "500px">
### Structured data (adding features)
---
Vegetarians are frequenly recommended restaurants with only meat options.
Possible features to add?
- Is person vegetarian (based on past orders)?
- Does restaurant have vegetarian optoins (based on menu)?
<img src = "https://i.gyazo.com/2abc15c57776f8534b05535d6225d57f.png" width = "500px">
### Experiment tracking
---
What to track?
- Algorithm/code versioning
- Dataset used
- Hyperparameters
-Resuls
Tracking tools
- Text files
- Spreadsheet
- Experiment tracking system
Desirable features?
- Information needed to replicate results
- Experiment results, ideally with summary metrics/analysis
- Perhaps also: Resource monitoring, visualization, model error analysis
### From Big Data to Good Data
---
<img src = "https://i.gyazo.com/452392683a71ea3fdc28e619e2a96e8b.png" width = "500px">
### Additionaly Resources
---
Week 2: Select and Train Model
If you wish to dive more deeply into the topics covered this week, feel free to check out these optional references. You wonโt have to read these to complete this weekโs practice quizzes.
Establishing a baseline
Error analysis
Experiment tracking
Papers
Brundage, M., Avin, S., Wang, J., Belfield, H., Krueger, G., Hadfield, G., โฆ Anderljung, M. (n.d.). Toward trustworthy AI development: Mechanisms for supporting verifiable claimsโ. Retrieved May 7, 2021http://arxiv.org/abs/2004.07213v2
Nakkiran, P., Kaplun, G., Bansal, Y., Yang, T., Barak, B., & Sutskever, I. (2019). Deep double descent: Where bigger models and more data hurt. Retrieved from http://arxiv.org/abs/1912.02292
| 0.812384 | 0.972099 |
### Proximity
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from scipy.optimize import minimize
from scipy.spatial.distance import cdist, pdist
from scipy import stats
from sklearn.neighbors import DistanceMetric
from tslearn.datasets import UCR_UEA_datasets
from tslearn.neighbors import NearestNeighbors, KNeighborsTimeSeries
from sklearn.metrics import accuracy_score
from scipy.interpolate import interp1d
import tensorflow as tf
from sklearn import preprocessing
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Conv1D, GlobalAveragePooling1D, BatchNormalization, Conv2D
from tensorflow.keras.layers import GlobalAveragePooling1D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.backend import function
from sklearn.neighbors import LocalOutlierFactor
from tslearn.utils import to_sklearn_dataset
from tensorflow import keras
print(tf.__version__)
import seaborn as sns
from scipy.spatial import distance
def ucr_data_loader(dataset):
X_train, y_train, X_test, y_test = UCR_UEA_datasets().load_dataset(dataset)
return X_train, y_train, X_test, y_test
def label_encoder(training_labels, testing_labels):
le = preprocessing.LabelEncoder()
le.fit(np.concatenate((training_labels, testing_labels), axis=0))
y_train = le.transform(training_labels)
y_test = le.transform(testing_labels)
return y_train, y_test
def native_guide_retrieval(query, predicted_label, distance, n_neighbors):
df = pd.DataFrame(y_train, columns = ['label'])
df.index.name = 'index'
df[df['label'] == 1].index.values, df[df['label'] != 1].index.values
ts_length = X_train.shape[1]
knn = KNeighborsTimeSeries(n_neighbors=n_neighbors, metric = distance)
knn.fit(X_train[list(df[df['label'] != predicted_label].index.values)])
dist,ind = knn.kneighbors(query.reshape(1,ts_length), return_distance=True)
return dist[0], df[df['label'] != predicted_label].index[ind[0][:]]
for dataset in ['CBF', 'chinatown', 'coffee', 'ecg200', 'gunpoint']:
X_train, y_train, X_test, y_test = ucr_data_loader(str(dataset))
y_train, y_test = label_encoder(y_train, y_test)
min_edit_cf = np.load('../W-CF/' + str(dataset) + '_wachter_cf.npy')
cam_swap_cf = np.load('../Native-Guide/' + str(dataset)+'_native_guide_isw.npy')
model = keras.models.load_model('../fcn_weights/'+str(dataset)+'_best_model.hdf5')
y_pred = np.argmax(model.predict(X_test), axis=1)
nuns = []
for instance in range(len(X_test)):
nuns.append(native_guide_retrieval(X_test[instance], y_pred[instance], 'euclidean', 1)[1][0])
nuns = np.array(nuns)
l1_nun = []
l1_min_edit = []
l1_cam_swap = []
l2_nun = []
l2_min_edit = []
l2_cam_swap = []
l_inf_nun = []
l_inf_min_edit = []
l_inf_cam_swap = []
for instance in range(len(X_test)):
l1_nun.append(distance.cityblock(X_train[nuns[instance]],X_test[instance]))
l1_min_edit.append(distance.cityblock(min_edit_cf[instance],X_test[instance]))
l1_cam_swap.append(distance.cityblock(cam_swap_cf[instance],X_test[instance]))
l2_nun.append(np.linalg.norm(X_train[nuns[instance]]-X_test[instance]))
l2_min_edit.append(np.linalg.norm(min_edit_cf[instance]-X_test[instance]))
l2_cam_swap.append(np.linalg.norm(cam_swap_cf[instance]-X_test[instance]))
l_inf_nun.append(distance.chebyshev(X_train[nuns[instance]],X_test[instance]))
l_inf_min_edit.append(distance.chebyshev(min_edit_cf[instance],X_test[instance]))
l_inf_cam_swap.append(distance.chebyshev(cam_swap_cf[instance],X_test[instance]))
print({dataset + '_l1' : (np.mean(np.array(l1_min_edit)/np.array(l1_nun)).round(2), np.mean(np.array(l1_cam_swap)/np.array(l1_nun)).round(2))})
print({dataset + '_l2' : (np.mean(np.array(l2_min_edit)/np.array(l2_nun)).round(2), np.mean(np.array(l2_cam_swap)/np.array(l2_nun)).round(2))})
print({dataset + '_l_inf' : (np.mean(np.array(l_inf_min_edit)/np.array(l_inf_nun)).round(2), np.mean(np.array(l_inf_cam_swap)/np.array(l_inf_nun)).round(2))
})
```
### Proximity Analysis and Plotting
Plots for different distances
```
#plt.title(r'W1 disk and central $\pm2^\circ$ subtracted', fontsize='small')
labels = ['CBF', 'Chinatown', 'Coffee', 'Ecg200', 'Gunpoint']
l_inf_w = [2.07,1.52, 1.93, 1.26, 0.91]
l_inf_cam = [0.85,0.82, 1, 0.84, 0.82]
x = np.arange(len(labels)) # the label locations
width = 0.3 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, l_inf_w, width, label='w-CF')
rects2 = ax.bar(x + width/2, l_inf_cam, width, label='Native-Guide CF')
ax.hlines(1,xmin=-0.5, xmax=4.5, colors='red', linestyles='--', label='NUN-CF')
ax.set_ylabel('RCF - $L_{\infty}$', size = 'xx-large', fontweight='bold')
ax.set_title('Proximity - $L_{\infty}$ Norm', size='xx-large', fontweight='bold')
ax.set_xticks(x)
ax.set_xticklabels(labels, size='large')
ax.legend()
fig.tight_layout()
#plt.savefig("../Images/L_inf.pdf")
plt.show()
labels = ['CBF', 'Chinatown', 'Coffee', 'Ecg200', 'Gunpoint']
l1_w = [0.15,0.61, 0.13, 0.1, 0.13]
l1_cam = [0.34,0.31, 0.26, 0.45, 0.29]
x = np.arange(len(labels)) # the label locations
width = 0.3 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, l1_w, width, label='w-CF')
rects2 = ax.bar(x + width/2, l1_cam, width, label='Native-Guide CF')
ax.hlines(1,xmin=-0.5, xmax=4.5, colors='red', linestyles='--', label='NUN-CF')
ax.set_ylabel('RCF - $L_{1}$', size = 'xx-large', fontweight='bold')
ax.set_title('Proximity - $L_{1}$ Norm', size='xx-large', fontweight='bold')
ax.set_xticks(x)
ax.set_xticklabels(labels, size='large')
ax.legend()
ax.set_ylim([0,1.4])
fig.tight_layout()
#plt.savefig("../Images/L_1.pdf")
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from scipy.optimize import minimize
from scipy.spatial.distance import cdist, pdist
from scipy import stats
from sklearn.neighbors import DistanceMetric
from tslearn.datasets import UCR_UEA_datasets
from tslearn.neighbors import NearestNeighbors, KNeighborsTimeSeries
from sklearn.metrics import accuracy_score
from scipy.interpolate import interp1d
import tensorflow as tf
from sklearn import preprocessing
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Conv1D, GlobalAveragePooling1D, BatchNormalization, Conv2D
from tensorflow.keras.layers import GlobalAveragePooling1D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.backend import function
from sklearn.neighbors import LocalOutlierFactor
from tslearn.utils import to_sklearn_dataset
from tensorflow import keras
print(tf.__version__)
import seaborn as sns
from scipy.spatial import distance
def ucr_data_loader(dataset):
X_train, y_train, X_test, y_test = UCR_UEA_datasets().load_dataset(dataset)
return X_train, y_train, X_test, y_test
def label_encoder(training_labels, testing_labels):
le = preprocessing.LabelEncoder()
le.fit(np.concatenate((training_labels, testing_labels), axis=0))
y_train = le.transform(training_labels)
y_test = le.transform(testing_labels)
return y_train, y_test
def native_guide_retrieval(query, predicted_label, distance, n_neighbors):
df = pd.DataFrame(y_train, columns = ['label'])
df.index.name = 'index'
df[df['label'] == 1].index.values, df[df['label'] != 1].index.values
ts_length = X_train.shape[1]
knn = KNeighborsTimeSeries(n_neighbors=n_neighbors, metric = distance)
knn.fit(X_train[list(df[df['label'] != predicted_label].index.values)])
dist,ind = knn.kneighbors(query.reshape(1,ts_length), return_distance=True)
return dist[0], df[df['label'] != predicted_label].index[ind[0][:]]
for dataset in ['CBF', 'chinatown', 'coffee', 'ecg200', 'gunpoint']:
X_train, y_train, X_test, y_test = ucr_data_loader(str(dataset))
y_train, y_test = label_encoder(y_train, y_test)
min_edit_cf = np.load('../W-CF/' + str(dataset) + '_wachter_cf.npy')
cam_swap_cf = np.load('../Native-Guide/' + str(dataset)+'_native_guide_isw.npy')
model = keras.models.load_model('../fcn_weights/'+str(dataset)+'_best_model.hdf5')
y_pred = np.argmax(model.predict(X_test), axis=1)
nuns = []
for instance in range(len(X_test)):
nuns.append(native_guide_retrieval(X_test[instance], y_pred[instance], 'euclidean', 1)[1][0])
nuns = np.array(nuns)
l1_nun = []
l1_min_edit = []
l1_cam_swap = []
l2_nun = []
l2_min_edit = []
l2_cam_swap = []
l_inf_nun = []
l_inf_min_edit = []
l_inf_cam_swap = []
for instance in range(len(X_test)):
l1_nun.append(distance.cityblock(X_train[nuns[instance]],X_test[instance]))
l1_min_edit.append(distance.cityblock(min_edit_cf[instance],X_test[instance]))
l1_cam_swap.append(distance.cityblock(cam_swap_cf[instance],X_test[instance]))
l2_nun.append(np.linalg.norm(X_train[nuns[instance]]-X_test[instance]))
l2_min_edit.append(np.linalg.norm(min_edit_cf[instance]-X_test[instance]))
l2_cam_swap.append(np.linalg.norm(cam_swap_cf[instance]-X_test[instance]))
l_inf_nun.append(distance.chebyshev(X_train[nuns[instance]],X_test[instance]))
l_inf_min_edit.append(distance.chebyshev(min_edit_cf[instance],X_test[instance]))
l_inf_cam_swap.append(distance.chebyshev(cam_swap_cf[instance],X_test[instance]))
print({dataset + '_l1' : (np.mean(np.array(l1_min_edit)/np.array(l1_nun)).round(2), np.mean(np.array(l1_cam_swap)/np.array(l1_nun)).round(2))})
print({dataset + '_l2' : (np.mean(np.array(l2_min_edit)/np.array(l2_nun)).round(2), np.mean(np.array(l2_cam_swap)/np.array(l2_nun)).round(2))})
print({dataset + '_l_inf' : (np.mean(np.array(l_inf_min_edit)/np.array(l_inf_nun)).round(2), np.mean(np.array(l_inf_cam_swap)/np.array(l_inf_nun)).round(2))
})
#plt.title(r'W1 disk and central $\pm2^\circ$ subtracted', fontsize='small')
labels = ['CBF', 'Chinatown', 'Coffee', 'Ecg200', 'Gunpoint']
l_inf_w = [2.07,1.52, 1.93, 1.26, 0.91]
l_inf_cam = [0.85,0.82, 1, 0.84, 0.82]
x = np.arange(len(labels)) # the label locations
width = 0.3 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, l_inf_w, width, label='w-CF')
rects2 = ax.bar(x + width/2, l_inf_cam, width, label='Native-Guide CF')
ax.hlines(1,xmin=-0.5, xmax=4.5, colors='red', linestyles='--', label='NUN-CF')
ax.set_ylabel('RCF - $L_{\infty}$', size = 'xx-large', fontweight='bold')
ax.set_title('Proximity - $L_{\infty}$ Norm', size='xx-large', fontweight='bold')
ax.set_xticks(x)
ax.set_xticklabels(labels, size='large')
ax.legend()
fig.tight_layout()
#plt.savefig("../Images/L_inf.pdf")
plt.show()
labels = ['CBF', 'Chinatown', 'Coffee', 'Ecg200', 'Gunpoint']
l1_w = [0.15,0.61, 0.13, 0.1, 0.13]
l1_cam = [0.34,0.31, 0.26, 0.45, 0.29]
x = np.arange(len(labels)) # the label locations
width = 0.3 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, l1_w, width, label='w-CF')
rects2 = ax.bar(x + width/2, l1_cam, width, label='Native-Guide CF')
ax.hlines(1,xmin=-0.5, xmax=4.5, colors='red', linestyles='--', label='NUN-CF')
ax.set_ylabel('RCF - $L_{1}$', size = 'xx-large', fontweight='bold')
ax.set_title('Proximity - $L_{1}$ Norm', size='xx-large', fontweight='bold')
ax.set_xticks(x)
ax.set_xticklabels(labels, size='large')
ax.legend()
ax.set_ylim([0,1.4])
fig.tight_layout()
#plt.savefig("../Images/L_1.pdf")
plt.show()
| 0.603815 | 0.809012 |
# Lecture B: Notebook Basics
## The Notebook dashboard
When you first start the notebook server, your browser will open to the notebook dashboard. The dashboard serves as a home page for the notebook. Its main purpose is to display the notebooks and files in the current directory. For example, here is a screenshot of the dashboard page for the `examples` directory in the Jupyter repository:
<img src="images/dashboard_files_tab.png" width="791px"/>
The top of the notebook list displays clickable breadcrumbs of the current directory. By clicking on these breadcrumbs or on sub-directories in the notebook list, you can navigate your file system.
To create a new notebook, click on the "New" button at the top of the list and select a kernel from the dropdown (as seen below). Which kernels are listed depend on what's installed on the server. Some of the kernels in the screenshot below may not exist as an option to you.
<img src="images/dashboard_files_tab_new.png" width="202px" />
Notebooks and files can be uploaded to the current directory by dragging a notebook file onto the notebook list or by the "click here" text above the list.
The notebook list shows green "Running" text and a green notebook icon next to running notebooks (as seen below). Notebooks remain running until you explicitly shut them down; closing the notebook's page is not sufficient.
<img src="images/dashboard_files_tab_run.png" width="777px"/>
To shutdown, delete, duplicate, or rename a notebook check the checkbox next to it and an array of controls will appear at the top of the notebook list (as seen below). You can also use the same operations on directories and files when applicable.
<img src="images/dashboard_files_tab_btns.png" width="301px" />
To see all of your running notebooks along with their directories, click on the "Running" tab:
<img src="images/dashboard_running_tab.png" width="786px" />
This view provides a convenient way to track notebooks that you start as you navigate the file system in a long running notebook server.
## Overview of the Notebook UI
If you create a new notebook or open an existing one, you will be taken to the notebook user interface (UI). This UI allows you to run code and author notebook documents interactively. The notebook UI has the following main areas:
* Menu
* Toolbar
* Notebook area and cells
The notebook has an interactive tour of these elements that can be started in the "Help:User Interface Tour" menu item.
## Modal editor
Starting with IPython 2.0, the Jupyter Notebook has a modal user interface. This means that the keyboard does different things depending on which mode the Notebook is in. There are two modes: edit mode and command mode.
### Edit mode
Edit mode is indicated by a green cell border and a prompt showing in the editor area:
<img src="images/edit_mode.png">
When a cell is in edit mode, you can type into the cell, like a normal text editor.
<div class="alert alert-success">
Enter edit mode by pressing `Enter` or using the mouse to click on a cell's editor area.
</div>
### Command mode
Command mode is indicated by a grey cell border with a blue left margin:
<img src="images/command_mode.png">
When you are in command mode, you are able to edit the notebook as a whole, but not type into individual cells. Most importantly, in command mode, the keyboard is mapped to a set of shortcuts that let you perform notebook and cell actions efficiently. For example, if you are in command mode and you press `c`, you will copy the current cell - no modifier is needed.
<div class="alert alert-error">
Don't try to type into a cell in command mode; unexpected things will happen!
</div>
<div class="alert alert-success">
Enter command mode by pressing `Esc` or using the mouse to click *outside* a cell's editor area.
</div>
## Mouse navigation
All navigation and actions in the Notebook are available using the mouse through the menubar and toolbar, which are both above the main Notebook area:
<img src="images/menubar_toolbar.png" width="786px" />
The first idea of mouse based navigation is that **cells can be selected by clicking on them.** The currently selected cell gets a grey or green border depending on whether the notebook is in edit or command mode. If you click inside a cell's editor area, you will enter edit mode. If you click on the prompt or output area of a cell you will enter command mode.
If you are running this notebook in a live session (not on http://nbviewer.jupyter.org) try selecting different cells and going between edit and command mode. Try typing into a cell.
The second idea of mouse based navigation is that **cell actions usually apply to the currently selected cell**. Thus if you want to run the code in a cell, you would select it and click the <button class='btn btn-default btn-xs'><i class="fa fa-step-forward icon-step-forward"></i></button> button in the toolbar or the "Cell:Run" menu item. Similarly, to copy a cell you would select it and click the <button class='btn btn-default btn-xs'><i class="fa fa-copy icon-copy"></i></button> button in the toolbar or the "Edit:Copy" menu item. With this simple pattern, you should be able to do most everything you need with the mouse.
Markdown and heading cells have one other state that can be modified with the mouse. These cells can either be rendered or unrendered. When they are rendered, you will see a nice formatted representation of the cell's contents. When they are unrendered, you will see the raw text source of the cell. To render the selected cell with the mouse, click the <button class='btn btn-default btn-xs'><i class="fa fa-step-forward icon-step-forward"></i></button> button in the toolbar or the "Cell:Run" menu item. To unrender the selected cell, double click on the cell.
## Keyboard Navigation
The modal user interface of the Jupyter Notebook has been optimized for efficient keyboard usage. This is made possible by having two different sets of keyboard shortcuts: one set that is active in edit mode and another in command mode.
The most important keyboard shortcuts are `Enter`, which enters edit mode, and `Esc`, which enters command mode.
In edit mode, most of the keyboard is dedicated to typing into the cell's editor. Thus, in edit mode there are relatively few shortcuts. In command mode, the entire keyboard is available for shortcuts, so there are many more. The `Help`->`Keyboard Shortcuts` dialog lists the available shortcuts.
We recommend learning the command mode shortcuts in the following rough order:
1. Basic navigation: `enter`, `shift-enter`, `up/k`, `down/j`
2. Saving the notebook: `s`
2. Change Cell types: `y`, `m`, `1-6`, `t`
3. Cell creation: `a`, `b`
4. Cell editing: `x`, `c`, `v`, `d`, `z`
5. Kernel operations: `i`, `0` (press twice)
|
github_jupyter
|
# Lecture B: Notebook Basics
## The Notebook dashboard
When you first start the notebook server, your browser will open to the notebook dashboard. The dashboard serves as a home page for the notebook. Its main purpose is to display the notebooks and files in the current directory. For example, here is a screenshot of the dashboard page for the `examples` directory in the Jupyter repository:
<img src="images/dashboard_files_tab.png" width="791px"/>
The top of the notebook list displays clickable breadcrumbs of the current directory. By clicking on these breadcrumbs or on sub-directories in the notebook list, you can navigate your file system.
To create a new notebook, click on the "New" button at the top of the list and select a kernel from the dropdown (as seen below). Which kernels are listed depend on what's installed on the server. Some of the kernels in the screenshot below may not exist as an option to you.
<img src="images/dashboard_files_tab_new.png" width="202px" />
Notebooks and files can be uploaded to the current directory by dragging a notebook file onto the notebook list or by the "click here" text above the list.
The notebook list shows green "Running" text and a green notebook icon next to running notebooks (as seen below). Notebooks remain running until you explicitly shut them down; closing the notebook's page is not sufficient.
<img src="images/dashboard_files_tab_run.png" width="777px"/>
To shutdown, delete, duplicate, or rename a notebook check the checkbox next to it and an array of controls will appear at the top of the notebook list (as seen below). You can also use the same operations on directories and files when applicable.
<img src="images/dashboard_files_tab_btns.png" width="301px" />
To see all of your running notebooks along with their directories, click on the "Running" tab:
<img src="images/dashboard_running_tab.png" width="786px" />
This view provides a convenient way to track notebooks that you start as you navigate the file system in a long running notebook server.
## Overview of the Notebook UI
If you create a new notebook or open an existing one, you will be taken to the notebook user interface (UI). This UI allows you to run code and author notebook documents interactively. The notebook UI has the following main areas:
* Menu
* Toolbar
* Notebook area and cells
The notebook has an interactive tour of these elements that can be started in the "Help:User Interface Tour" menu item.
## Modal editor
Starting with IPython 2.0, the Jupyter Notebook has a modal user interface. This means that the keyboard does different things depending on which mode the Notebook is in. There are two modes: edit mode and command mode.
### Edit mode
Edit mode is indicated by a green cell border and a prompt showing in the editor area:
<img src="images/edit_mode.png">
When a cell is in edit mode, you can type into the cell, like a normal text editor.
<div class="alert alert-success">
Enter edit mode by pressing `Enter` or using the mouse to click on a cell's editor area.
</div>
### Command mode
Command mode is indicated by a grey cell border with a blue left margin:
<img src="images/command_mode.png">
When you are in command mode, you are able to edit the notebook as a whole, but not type into individual cells. Most importantly, in command mode, the keyboard is mapped to a set of shortcuts that let you perform notebook and cell actions efficiently. For example, if you are in command mode and you press `c`, you will copy the current cell - no modifier is needed.
<div class="alert alert-error">
Don't try to type into a cell in command mode; unexpected things will happen!
</div>
<div class="alert alert-success">
Enter command mode by pressing `Esc` or using the mouse to click *outside* a cell's editor area.
</div>
## Mouse navigation
All navigation and actions in the Notebook are available using the mouse through the menubar and toolbar, which are both above the main Notebook area:
<img src="images/menubar_toolbar.png" width="786px" />
The first idea of mouse based navigation is that **cells can be selected by clicking on them.** The currently selected cell gets a grey or green border depending on whether the notebook is in edit or command mode. If you click inside a cell's editor area, you will enter edit mode. If you click on the prompt or output area of a cell you will enter command mode.
If you are running this notebook in a live session (not on http://nbviewer.jupyter.org) try selecting different cells and going between edit and command mode. Try typing into a cell.
The second idea of mouse based navigation is that **cell actions usually apply to the currently selected cell**. Thus if you want to run the code in a cell, you would select it and click the <button class='btn btn-default btn-xs'><i class="fa fa-step-forward icon-step-forward"></i></button> button in the toolbar or the "Cell:Run" menu item. Similarly, to copy a cell you would select it and click the <button class='btn btn-default btn-xs'><i class="fa fa-copy icon-copy"></i></button> button in the toolbar or the "Edit:Copy" menu item. With this simple pattern, you should be able to do most everything you need with the mouse.
Markdown and heading cells have one other state that can be modified with the mouse. These cells can either be rendered or unrendered. When they are rendered, you will see a nice formatted representation of the cell's contents. When they are unrendered, you will see the raw text source of the cell. To render the selected cell with the mouse, click the <button class='btn btn-default btn-xs'><i class="fa fa-step-forward icon-step-forward"></i></button> button in the toolbar or the "Cell:Run" menu item. To unrender the selected cell, double click on the cell.
## Keyboard Navigation
The modal user interface of the Jupyter Notebook has been optimized for efficient keyboard usage. This is made possible by having two different sets of keyboard shortcuts: one set that is active in edit mode and another in command mode.
The most important keyboard shortcuts are `Enter`, which enters edit mode, and `Esc`, which enters command mode.
In edit mode, most of the keyboard is dedicated to typing into the cell's editor. Thus, in edit mode there are relatively few shortcuts. In command mode, the entire keyboard is available for shortcuts, so there are many more. The `Help`->`Keyboard Shortcuts` dialog lists the available shortcuts.
We recommend learning the command mode shortcuts in the following rough order:
1. Basic navigation: `enter`, `shift-enter`, `up/k`, `down/j`
2. Saving the notebook: `s`
2. Change Cell types: `y`, `m`, `1-6`, `t`
3. Cell creation: `a`, `b`
4. Cell editing: `x`, `c`, `v`, `d`, `z`
5. Kernel operations: `i`, `0` (press twice)
| 0.827793 | 0.89419 |
https://towardsdatascience.com/why-using-a-mean-for-missing-data-is-a-bad-idea-alternative-imputation-algorithms-837c731c1008
### Why using a mean for missing data is a bad idea. Alternative imputation algorithms.
- We all know the pain when the dataset we want to use for Machine Learning contains missing data.
- The quick and easy workaround is to substitute a **mean for numerical features** and use a **mode for categorical ones**
- Even better, someone might just insert 0's or discard the data and proceed to the training of the model.
### Mean and mode ignore feature correlations
- Letโs have a look at a very simple example to visualize the problem. The following table have 3 variables: Age, Gender and Fitness Score. It shows a Fitness Score results (0โ10) performed by people of different age and gender.

- Now letโs assume that some of the data in Fitness Score is actually missing, so that after using a mean imputation we can compare results using both tables.

- Imputed values donโt really make sense โ in fact, they can have a negative effect on accuracy when training our ML model.
- For example, 78 year old women now has a Fitness Score of 5.1, which is typical for people aged between 42 and 60 years old.
- Mean imputation doesnโt take into account a fact that Fitness Score is correlated to Age and Gender features. It only inserts 5.1, a mean of the Fitness Score, while ignoring potential feature correlations.
### Mean reduces a variance of the data
- Based on the previous example, variance of the real Fitness Score and of their mean imputed equivalent will differ. Figure below presents the variance of those two cases:

- As we can see, the variance was reduced (that big change is because the dataset is very small) after using the Mean Imputation. Going deeper into mathematics, a smaller variance leads to the narrower confidence interval in the probability distribution[3]. This leads to nothing else than introducing a bias to our model.
### Alternative Imputation Algorithms
- Fortunately, there is a lot of brilliant alternatives to mean and mode imputations. A lot of them are based on already existing algorithms used for Machine Learning. The following list briefly describes most popular methods, as well as few less known imputation techniques.
#### MICE
- According to [4], it is the second most popular Imputation method, right after the mean. Initially, a simple imputation is performed (e.g. mean) to replace the missing data for each variable and we also note their positions in the dataset. Then, we take each feature and predict the missing data with Regression model. The remaining features are used as dependent variables for our Regression model. The process is iterated multiple times which updates the imputation values. The common number of iterations is usually 10, but it depends on the dataset. More detailed explanation of the algorithm can be found here[5].
#### KNN
- This popular imputation technique is based on the K-Nearest Neighbours algorithm. For a given instance with missing data, KNN Impute returns n most similar neighbours and replaces the missing element with a mean or mode of the neighbours. The choice between mode and mean depends if the feature is a continuous or a categorical one. Great paper for more in-depth understanding is here[6].
#### MissForest
- It is a non-standard, but a fairly flexible imputation algorithm. It uses RandomForest at its core to predict the missing data. It can be applied to both continuous and categorical variables which makes it advantageous over other imputation algorithms. Have a look what authors of MissForest wrote about its implementation[7].
#### Fuzzy K-means Clustering
- It is a less known Imputation technique, but it proves to be more accurate and faster than the basic clustering algorithms according to [8]. It computes the clusters of instances and fills in the missing values which dependns to which cluster the instance with missing data belongs to.
|
github_jupyter
|
https://towardsdatascience.com/why-using-a-mean-for-missing-data-is-a-bad-idea-alternative-imputation-algorithms-837c731c1008
### Why using a mean for missing data is a bad idea. Alternative imputation algorithms.
- We all know the pain when the dataset we want to use for Machine Learning contains missing data.
- The quick and easy workaround is to substitute a **mean for numerical features** and use a **mode for categorical ones**
- Even better, someone might just insert 0's or discard the data and proceed to the training of the model.
### Mean and mode ignore feature correlations
- Letโs have a look at a very simple example to visualize the problem. The following table have 3 variables: Age, Gender and Fitness Score. It shows a Fitness Score results (0โ10) performed by people of different age and gender.

- Now letโs assume that some of the data in Fitness Score is actually missing, so that after using a mean imputation we can compare results using both tables.

- Imputed values donโt really make sense โ in fact, they can have a negative effect on accuracy when training our ML model.
- For example, 78 year old women now has a Fitness Score of 5.1, which is typical for people aged between 42 and 60 years old.
- Mean imputation doesnโt take into account a fact that Fitness Score is correlated to Age and Gender features. It only inserts 5.1, a mean of the Fitness Score, while ignoring potential feature correlations.
### Mean reduces a variance of the data
- Based on the previous example, variance of the real Fitness Score and of their mean imputed equivalent will differ. Figure below presents the variance of those two cases:

- As we can see, the variance was reduced (that big change is because the dataset is very small) after using the Mean Imputation. Going deeper into mathematics, a smaller variance leads to the narrower confidence interval in the probability distribution[3]. This leads to nothing else than introducing a bias to our model.
### Alternative Imputation Algorithms
- Fortunately, there is a lot of brilliant alternatives to mean and mode imputations. A lot of them are based on already existing algorithms used for Machine Learning. The following list briefly describes most popular methods, as well as few less known imputation techniques.
#### MICE
- According to [4], it is the second most popular Imputation method, right after the mean. Initially, a simple imputation is performed (e.g. mean) to replace the missing data for each variable and we also note their positions in the dataset. Then, we take each feature and predict the missing data with Regression model. The remaining features are used as dependent variables for our Regression model. The process is iterated multiple times which updates the imputation values. The common number of iterations is usually 10, but it depends on the dataset. More detailed explanation of the algorithm can be found here[5].
#### KNN
- This popular imputation technique is based on the K-Nearest Neighbours algorithm. For a given instance with missing data, KNN Impute returns n most similar neighbours and replaces the missing element with a mean or mode of the neighbours. The choice between mode and mean depends if the feature is a continuous or a categorical one. Great paper for more in-depth understanding is here[6].
#### MissForest
- It is a non-standard, but a fairly flexible imputation algorithm. It uses RandomForest at its core to predict the missing data. It can be applied to both continuous and categorical variables which makes it advantageous over other imputation algorithms. Have a look what authors of MissForest wrote about its implementation[7].
#### Fuzzy K-means Clustering
- It is a less known Imputation technique, but it proves to be more accurate and faster than the basic clustering algorithms according to [8]. It computes the clusters of instances and fills in the missing values which dependns to which cluster the instance with missing data belongs to.
| 0.943828 | 0.98797 |
# Supervised Learning with GCN
Graph neural networks (GNNs) combines superiority of both graph analytics and machine learning.
GraphScope provides the capability to process learning tasks. In this tutorial, we demostrate
how GraphScope trains a model with GCN.
The learning task is node classification on a citation network. In this task, the algorithm has
to determine the label of the nodes in [Cora](https://linqs.soe.ucsc.edu/data) dataset.
The dataset consists of academic publications as the nodes and the citations between them as the links: if publication A cites publication B, then the graph has an edge from A to B. The nodes are classified into one of seven subjects, and our model will learn to predict this subject.
In this task, we use Graph Convolution Network (GCN) to train the model. The core of the GCN neural network model is a "graph convolution" layer. This layer is similar to a conventional dense layer, augmented by the graph adjacency matrix to use information about a node's connections.
This tutorial has the following steps:
- Creating a session and loading graph
- Launching learning engine and attaching the loaded graph.
- Defining train process with builtin GCN model and config hyperparameters
- Training and evaluating
First, let's create a session and load the dataset as a graph.
```
import os
import graphscope
k8s_volumes = {
"data": {
"type": "hostPath",
"field": {
"path": "/testingdata",
"type": "Directory"
},
"mounts": {
"mountPath": "/home/jovyan/datasets",
"readOnly": True
}
}
}
# create session
graphscope.set_option(show_log=True)
sess = graphscope.session(k8s_volumes=k8s_volumes)
# loading cora graph
graph = graphscope.Graph(sess)
graph = graph.add_vertices("/home/jovyan/datasets/cora/node.csv", "paper")
graph = graph.add_edges("/home/jovyan/datasets/cora/edge.csv", "cites")
```
Then, we need to define a feature list for training. The training feature list should be seleted from the vertex properties. In this case, we choose all the properties prefix with "feat_" as the training features.
With the featrue list, next we launch a learning engine with the `learning` method of session.
(You may find the detail of the method on [Session](https://graphscope.io/docs/reference/session.html).)
In this case, we specify the GCN training over `paper` nodes and `cites` edges.
With `gen_labels`, we split the `paper` nodes into three parts, 75% are used as training set, 10% are used for validation and 15% used for testing.
```
# define the features for learning
paper_features = []
for i in range(1433):
paper_features.append("feat_" + str(i))
# launch a learning engine.
lg = sess.learning(graph, nodes=[("paper", paper_features)],
edges=[("paper", "cites", "paper")],
gen_labels=[
("train", "paper", 100, (0, 75)),
("val", "paper", 100, (75, 85)),
("test", "paper", 100, (85, 100))
])
```
We use the builtin GCN model to define the training process. You can find more detail about all the builtin learning models on [Graph Learning Model](https://graphscope.io/docs/learning_engine.html#data-model)
In the example, we use tensorflow as NN backend trainer.
```
from graphscope.learning.examples import GCN
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
# supervised GCN.
def train(config, graph):
def model_fn():
return GCN(
graph,
config["class_num"],
config["features_num"],
config["batch_size"],
val_batch_size=config["val_batch_size"],
test_batch_size=config["test_batch_size"],
categorical_attrs_desc=config["categorical_attrs_desc"],
hidden_dim=config["hidden_dim"],
in_drop_rate=config["in_drop_rate"],
neighs_num=config["neighs_num"],
hops_num=config["hops_num"],
node_type=config["node_type"],
edge_type=config["edge_type"],
full_graph_mode=config["full_graph_mode"],
)
trainer = LocalTFTrainer(
model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"], config["learning_rate"], config["weight_decay"]
),
)
trainer.train_and_evaluate()
# define hyperparameters
config = {
"class_num": 7, # output dimension
"features_num": 1433,
"batch_size": 140,
"val_batch_size": 300,
"test_batch_size": 1000,
"categorical_attrs_desc": "",
"hidden_dim": 128,
"in_drop_rate": 0.5,
"hops_num": 2,
"neighs_num": [5, 5],
"full_graph_mode": False,
"agg_type": "gcn", # mean, sum
"learning_algo": "adam",
"learning_rate": 0.01,
"weight_decay": 0.0005,
"epoch": 5,
"node_type": "paper",
"edge_type": "cites",
}
```
After define training process and hyperparameters,
Now we can start the traning process with learning engine `lg` and the hyperparameters configurations.
```
train(config, lg)
```
Finally, don't forget to close the session.
```
sess.close()
```
|
github_jupyter
|
import os
import graphscope
k8s_volumes = {
"data": {
"type": "hostPath",
"field": {
"path": "/testingdata",
"type": "Directory"
},
"mounts": {
"mountPath": "/home/jovyan/datasets",
"readOnly": True
}
}
}
# create session
graphscope.set_option(show_log=True)
sess = graphscope.session(k8s_volumes=k8s_volumes)
# loading cora graph
graph = graphscope.Graph(sess)
graph = graph.add_vertices("/home/jovyan/datasets/cora/node.csv", "paper")
graph = graph.add_edges("/home/jovyan/datasets/cora/edge.csv", "cites")
# define the features for learning
paper_features = []
for i in range(1433):
paper_features.append("feat_" + str(i))
# launch a learning engine.
lg = sess.learning(graph, nodes=[("paper", paper_features)],
edges=[("paper", "cites", "paper")],
gen_labels=[
("train", "paper", 100, (0, 75)),
("val", "paper", 100, (75, 85)),
("test", "paper", 100, (85, 100))
])
from graphscope.learning.examples import GCN
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
# supervised GCN.
def train(config, graph):
def model_fn():
return GCN(
graph,
config["class_num"],
config["features_num"],
config["batch_size"],
val_batch_size=config["val_batch_size"],
test_batch_size=config["test_batch_size"],
categorical_attrs_desc=config["categorical_attrs_desc"],
hidden_dim=config["hidden_dim"],
in_drop_rate=config["in_drop_rate"],
neighs_num=config["neighs_num"],
hops_num=config["hops_num"],
node_type=config["node_type"],
edge_type=config["edge_type"],
full_graph_mode=config["full_graph_mode"],
)
trainer = LocalTFTrainer(
model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"], config["learning_rate"], config["weight_decay"]
),
)
trainer.train_and_evaluate()
# define hyperparameters
config = {
"class_num": 7, # output dimension
"features_num": 1433,
"batch_size": 140,
"val_batch_size": 300,
"test_batch_size": 1000,
"categorical_attrs_desc": "",
"hidden_dim": 128,
"in_drop_rate": 0.5,
"hops_num": 2,
"neighs_num": [5, 5],
"full_graph_mode": False,
"agg_type": "gcn", # mean, sum
"learning_algo": "adam",
"learning_rate": 0.01,
"weight_decay": 0.0005,
"epoch": 5,
"node_type": "paper",
"edge_type": "cites",
}
train(config, lg)
sess.close()
| 0.50293 | 0.980262 |
# Box World Navigation Project
---
This notebook describes the implementation of the Deep Reinforcement Learning agent used to solve the first project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893).
This notebook also serves as live and executable documentation. Therefore, there will be a little bit of code and modules imported.
Nevertheless, all the *important* stuff is implemented in the [deeprl](deeprl/) package. So, you are encouraged to take a look at that package as well. A fully trained agent should perform as in the video below (if the video doesn't show automatically, execute the cell manually).
```
%%HTML
<iframe width="560" height="315"
src="https://www.youtube.com/embed/B4JKTivr4qA"
frameborder="0"
allow="autoplay; encrypted-media" allowfullscreen>
</iframe>
```
## 1. Loading necessary packages
We begin by importing some necessary packages. If the code cell below returns an error, please revisit the [README](README.md) instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md), [NumPy](http://www.numpy.org/), and [PyTorch](http://pytorch.org).
If you are running a Windows system, please pay special attention to the README, as the code **will not** be able to automatically execute the environment.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import matplotlib.pyplot as plt
from deeprl.train import dqn
from deeprl.agent import Agent
from deeprl.model import QNetwork
from deeprl.util import print_source
from deeprl.util import load_environment
```
## 2. The algorithm
As aforementioned, this code uses Deep Reinforcement Learning to solve the environment. In particular, we use the Deep Q-Networks (DQN) algorithm.
DQN use *experience replay* and *fixed Q-targets* to be able to "stabilize" the learning process. Overall, the algorithm is:
* Take action $a_t$ according to an $\epsilon$-greedy policy
* Store transition $(s_t, a_t, r_{t+1}, s_{t+1})$ in replay memory $\mathcal{D}$
* Sample random mini-batch of transitions $(s, a, r, s')$ from $\mathcal{D}$
* Compute Q-learning targets with regards to old, fixed parameters $w^-$
* Optimize Mean Squared Error between Q-network and Q-learning targets
$$
\mathcal{L}_i(w_i) = \mathbb{E}_{s, a, r, s' \sim \mathcal{D}_i} \left[\left(r + \gamma\max_{a'}Q(s', a'; w_i^-)-Q(s, a; w_i)\right)^2\right]
$$
* Using a variant of stochastic gradient descent
### 2.1 The optimizer
The variant of stochastic gradient descent we use is the [Adam optimization algorithm](https://arxiv.org/abs/1412.6980).
### 2.2 The network architecture
For this problem, we're using a four-layer fully-connected neural network. Layer sizes are:
1. `state_size` inputs, 16 outputs
2. 16 inputs, 32 outputs
3. 32 inputs, 64 outputs
4. 64 inputs, `action_size` outputs
Where `state_size` is the size of the state in the current environment (37 for the banana environment), and `action_size` is the size of the actions in the current environment (4 for the banana environment). The PyTorch implementation can be seen in the cell below.
```
print_source(QNetwork.__init__)
print_source(QNetwork.forward)
```
### 2.3 The actual learning algorithm
With the textual description given above and having the network and optimizer defined, one can devise a learning algorithm written in PyTorch. Assuming `self.qnetwork_target` implements the network that uses weights $w^-$ and `self.qnetwork_local` implements the network that uses weights $w$. The algorithm can be implemented as below (again, the reader is encouraged to read the [full source](deeprl/agent.py)).
Notice that we *do not* fully transition the local weights $w$ to the $w^-$ weights. Rather, we perform a *soft* update, controlled by hyperparameter $\tau$. This allows us to slowly transition from set of weights to the other, giving more smooth operation to the algorithm.
```
print_source(Agent.learn)
```
### 2.4 Hyperparameters
As can be seen above, the algorithm has many hyperparameters. The set of hyperparameters used are:
* $\gamma = 0.99$ (Discount factor)
* $\tau = 1\times10^{-3}$ (Network soft-update parameter)
* $\alpha = 5\times10^{-5}$ (Learning rate used in the Adam optimizer)
* Batch size = 64
* $|\mathcal{D}| = 100000$ (Size of the replay buffer)
* Period between target updates = 5 (every 5 episodes we perform a soft update so that $w^- = \tau w + (1 - \tau)w^-$.
* Since we're using an $\epsilon$-greedy policy, we decay the $\epsilon$ parameter with each episode. For this particular agent, we start with $\epsilon=1$ and decay it by $0.995$ until it reaches $0.001$. Updates to epsilon are, therefore $\epsilon \leftarrow \epsilon * \mathrm{decay}$.
## 3 Training and evaluation
### 3.1 Training
With all of the above defined, we can train our agent. Training is performed by the `dqn` function, shown below.
What it does, essentially, is to load the environment, configure the agent with the environment parameters, and execute the learning process by decaying $\epsilon$ and checking whether the solution criterion is met. If it is, then it finishes training and persists the neural network model.
The optimization criterion we're using is achieving an average reward greater than 13 over 100 episodes.
```
print_source(dqn)
```
### 3.2 Evaluation
```
env = load_environment()
agent, scores, episodes = dqn(env, checkpointfn='checkpoint.pth')
env.close()
fig = plt.figure()
plt.plot(np.arange(len(scores)), scores, linewidth=1.0)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.grid()
plt.show()
```
## 4 Future ideas
Although the agent exhibits decent performance learning the environment successfully, it can be greatly improved. Some of them are outlined below.
* The very first thing to try would be to perform hyperparameter optimization, either by performing grid search or random search, or, more interestingly and more aligned with the techniques studied here: Bayesian Optimization! Properly tuned hyperparameters would probably improve the agent's performance
* Orthogonal to hyperparameter optimization, other relatively easy improvements would be:
* Implementing [Double Q-Learning](https://arxiv.org/abs/1509.06461)
* Implementing [Prioritized experience replay](https://arxiv.org/abs/1511.05952)
* Implementing a [Dueling network](https://arxiv.org/abs/1511.06581) architecture
* Apart from the aforementioned improvements, the agent currently doesn't really need a deep network, since it is learning from "easier" parameters. A more challenging (and interesting) task would be to *learn from raw pixels*, which should definitely be tried
An interesting area of investigation would be to build self-normalizing neural networks and check whether they perform better than their non-self-normalizing counterparts. Intuitively, that should be the case but it would probably be necessary to scale features to the range $[0, 1]$. This might be easier to perform when using raw pixels, since we know for sure the maximum values each pixel can have.
|
github_jupyter
|
%%HTML
<iframe width="560" height="315"
src="https://www.youtube.com/embed/B4JKTivr4qA"
frameborder="0"
allow="autoplay; encrypted-media" allowfullscreen>
</iframe>
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import matplotlib.pyplot as plt
from deeprl.train import dqn
from deeprl.agent import Agent
from deeprl.model import QNetwork
from deeprl.util import print_source
from deeprl.util import load_environment
print_source(QNetwork.__init__)
print_source(QNetwork.forward)
print_source(Agent.learn)
print_source(dqn)
env = load_environment()
agent, scores, episodes = dqn(env, checkpointfn='checkpoint.pth')
env.close()
fig = plt.figure()
plt.plot(np.arange(len(scores)), scores, linewidth=1.0)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.grid()
plt.show()
| 0.472927 | 0.992192 |
## Missing values with Adult data
This notebook demonstrates the effect of MAR and MNAR missing values on fairness using Adult data. <br>
In this notebook, we first import packages needed in this file
```
import sys
sys.path.append("models")
import numpy as np
from adult_model import get_distortion_adult, AdultDataset, reweight_df, get_evaluation
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
```
The function below process data and create missing values in the dataset. <br>
In Adult dataset, we have sex as sensitive attribute and use age (binned into decade) and education years as features to predict if the income is above or below \$50K pre year. <br>
In this dataset, we create missing values in the feature "Education Years" with MNAR and MAR type of missing values. In the function below, the missing value mechanism is MNAR that the missing values depends on the feature itself.
```
def load_preproc_data_adult(protected_attributes=None):
def custom_preprocessing(df):
"""The custom pre-processing function is adapted from
https://github.com/fair-preprocessing/nips2017/blob/master/Adult/code/Generate_Adult_Data.ipynb
"""
np.random.seed(1)
# Group age by decade
df['Age (decade)'] = df['age'].apply(lambda x: x // 10 * 10)
def group_edu(x):
if x == -1:
return 'missing_edu'
elif x <= 5:
return '<6'
elif x >= 13:
return '>12'
else:
return x
def age_cut(x):
if x >= 70:
return '>=70'
else:
return x
def group_race(x):
if x == "White":
return 1.0
else:
return 0.0
# Cluster education and age attributes.
# Limit education range
df['Education Years'] = df['education-num'].apply(
lambda x: group_edu(x))
df['Education Years'] = df['Education Years'].astype('category')
# Limit age range
df['Age (decade)'] = df['Age (decade)'].apply(lambda x: age_cut(x))
# Rename income variable
df['Income Binary'] = df['income-per-year']
# Recode sex and race
df['sex'] = df['sex'].replace({'Female': 0.0, 'Male': 1.0})
df['race'] = df['race'].apply(lambda x: group_race(x))
# Here we define a column called mis_prob to assign the probability of each observation
# being missed
df['mis_prob'] = 0
for index, row in df.iterrows():
# Here, the probability of missing values in Education Years depends on sex and
# Education Years, so in this case the missing values are under MNAR
# To change the distribution of missing values, we can change the probability here
if row['sex']==0 and row['Education Years'] =='>12':
df.loc[index,'mis_prob'] = 0.65
elif row['sex']==1 and row['Education Years'] =='=8':
df.loc[index,'mis_prob'] = 0.15
else:
df.loc[index,'mis_prob'] = 0.1
new_label = []
for index, row in df.iterrows():
if np.random.binomial(1, float(row['mis_prob']), 1)[0] == 1:
new_label.append('missing_edu')
else:
new_label.append(row['Education Years'])
df['Education Years'] = new_label
print('Number of missing values')
print(len(df.loc[df['Education Years'] == 'missing_edu', :]))
print('Total number of observations')
print(len(df))
return df
XD_features = ['Age (decade)', 'Education Years', 'sex']
D_features = [
'sex'] if protected_attributes is None else protected_attributes
Y_features = ['Income Binary']
X_features = list(set(XD_features) - set(D_features))
categorical_features = ['Age (decade)', 'Education Years']
all_privileged_classes = {"sex": [1.0]}
all_protected_attribute_maps = {"sex": {1.0: 'Male', 0.0: 'Female'}}
return AdultDataset(
label_name=Y_features[0],
favorable_classes=['>50K', '>50K.'],
protected_attribute_names=D_features,
privileged_classes=[all_privileged_classes[x] for x in D_features],
instance_weights_name=None,
categorical_features=categorical_features,
features_to_keep=X_features + Y_features + D_features,
na_values=['?'],
metadata={'label_maps': [{1.0: '>50K', 0.0: '<=50K'}],
'protected_attribute_maps': [all_protected_attribute_maps[x]
for x in D_features]},
custom_preprocessing=custom_preprocessing)
```
The code below is to load the data and run the fairness fixing algorithm proposed by Calmon et al. \[1\]. We set missing values as a new category in features containing missing values. <br>
Note that we modified the distortion function at ```get_distortion_adult```. In this function, we define the penalty for the fairness fixing algorithm to change values in each feature. In this distortion function, we set penalty to be 0 if the original observation value changes from the missing category to a non-missing category and we set a big penalty if the original value changes from a non-missing category to the missing category or the original values remain at the missing category. <br>
```
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_adult(['sex'])
optim_options = {
"distortion_fun": get_distortion_adult,
"epsilon": 0.02,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
dataset_orig_train, dataset_orig_vt = dataset_orig.split(
[0.7], shuffle=True)
OP = OptimPreproc(OptTools, optim_options,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
OP = OP.fit(dataset_orig_train)
dataset_transf_cat_test = OP.transform(dataset_orig_vt, transform_Y=True)
dataset_transf_cat_test = dataset_orig_vt.align_datasets(
dataset_transf_cat_test)
dataset_transf_cat_train = OP.transform(
dataset_orig_train, transform_Y=True)
dataset_transf_cat_train = dataset_orig_train.align_datasets(
dataset_transf_cat_train)
```
In this part we use the training data obtained from the fairness fixing algorithm by Calmon et al. \[1\] to train a logistic regression classifier and validate the classifier on the test set.
```
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_pred = lmod.predict(X_test)
print('Without reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
```
After getting the accuracy and fairness results, we apply our reweighting algorithm to train a new logistic regression classifier and validate the classifier on the same test set.
```
dataset_orig_train.instance_weights = reweight_df(dataset_orig_train)
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train, sample_weight=dataset_orig_train.instance_weights)
y_pred = lmod.predict(X_test)
print('With reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
```
By comparing the two results, the fairness scores increase with a small tradeoff in accuracy (about 1\% decrease in accuracy) <br>
The code below process data and create missing values with MAR missing type. <br>
The function below process data and create missing values in the dataset. In the function below, the missing value mechanism is MAR that the missing values do not depend on the feature itself.<br>
```
def load_preproc_data_adult(protected_attributes=None):
def custom_preprocessing(df):
"""The custom pre-processing function is adapted from
https://github.com/fair-preprocessing/nips2017/blob/master/Adult/code/Generate_Adult_Data.ipynb
"""
np.random.seed(1)
# Group age by decade
df['Age (decade)'] = df['age'].apply(lambda x: x // 10 * 10)
def group_edu(x):
if x == -1:
return 'missing_edu'
elif x <= 5:
return '<6'
elif x >= 13:
return '>12'
else:
return x
def age_cut(x):
if x >= 70:
return '>=70'
else:
return x
def group_race(x):
if x == "White":
return 1.0
else:
return 0.0
# Cluster education and age attributes.
# Limit education range
df['Education Years'] = df['education-num'].apply(
lambda x: group_edu(x))
df['Education Years'] = df['Education Years'].astype('category')
# Limit age range
df['Age (decade)'] = df['Age (decade)'].apply(lambda x: age_cut(x))
# Rename income variable
df['Income Binary'] = df['income-per-year']
# Recode sex and race
df['sex'] = df['sex'].replace({'Female': 0.0, 'Male': 1.0})
df['race'] = df['race'].apply(lambda x: group_race(x))
# Here we define a column called mis_prob to assign the probability of each observation
# being missed
df['mis_prob'] = 0
for index, row in df.iterrows():
# Here, the probability of missing values in Education Years depends on sex and
# Income Binary, so in this case the missing values are under MAR because the missingness
# does not depend on the feature Education Years
# To change the distribution of missing values, we can change the probability here
if row['sex']==0 and row['Income Binary'] =='>50K':
df.loc[index,'mis_prob'] = 0.4
elif row['sex']==0:
df.loc[index,'mis_prob'] = 0.1
else:
df.loc[index,'mis_prob'] = 0.05
new_label = []
for index, row in df.iterrows():
if np.random.binomial(1, float(row['mis_prob']), 1)[0] == 1:
new_label.append('missing_edu')
else:
new_label.append(row['Education Years'])
df['Education Years'] = new_label
print('Total number of missing values')
print(len(df.loc[df['Education Years'] == 'missing_edu', :].index))
print('Total number of observations')
print(len(df.index))
return df
XD_features = ['Age (decade)', 'Education Years', 'sex']
D_features = [
'sex'] if protected_attributes is None else protected_attributes
Y_features = ['Income Binary']
X_features = list(set(XD_features) - set(D_features))
categorical_features = ['Age (decade)', 'Education Years']
# privileged classes
all_privileged_classes = {"sex": [1.0]}
# protected attribute maps
all_protected_attribute_maps = {"sex": {1.0: 'Male', 0.0: 'Female'}}
return AdultDataset(
label_name=Y_features[0],
favorable_classes=['>50K', '>50K.'],
protected_attribute_names=D_features,
privileged_classes=[all_privileged_classes[x] for x in D_features],
instance_weights_name=None,
categorical_features=categorical_features,
features_to_keep=X_features + Y_features + D_features,
na_values=['?'],
metadata={'label_maps': [{1.0: '>50K', 0.0: '<=50K'}],
'protected_attribute_maps': [all_protected_attribute_maps[x]
for x in D_features]},
custom_preprocessing=custom_preprocessing)
```
Same as above, we load the data and run the fairness fixing algorithm proposed by Calmon et al.
```
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_adult(['sex'])
optim_options = {
"distortion_fun": get_distortion_adult,
"epsilon": 0.03,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
dataset_orig_train, dataset_orig_vt = dataset_orig.split(
[0.7], shuffle=True)
OP = OptimPreproc(OptTools, optim_options,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
OP = OP.fit(dataset_orig_train)
dataset_transf_cat_test = OP.transform(dataset_orig_vt, transform_Y=True)
dataset_transf_cat_test = dataset_orig_vt.align_datasets(
dataset_transf_cat_test)
dataset_transf_cat_train = OP.transform(
dataset_orig_train, transform_Y=True)
dataset_transf_cat_train = dataset_orig_train.align_datasets(
dataset_transf_cat_train)
```
Same as MNAR case, we first train a logistic regression classifier without reweight and train another logistic regression classifier with reweight and validate both of them on the same test set
```
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_pred = lmod.predict(X_test)
print('Without reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
dataset_orig_train.instance_weights = reweight_df(dataset_orig_train)
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train, sample_weight=dataset_orig_train.instance_weights)
y_pred = lmod.predict(X_test)
print('With reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
```
Similar to results from MNAR, our reweighting algorithm improves the fairness scores with a small tradeoff in accuracy. <br>
# Reference
[1] Optimized Pre-Processing for Discrimination Prevention <br>
Flavio Calmon, Dennis Wei, Bhanukiran Vinzamuri, Karthikeyan Natesan Ramamurthy and Kush R. Varshney.
31st Advances in Neural Information Processing Systems (NIPS), Long Beach, CA, December 2017.
|
github_jupyter
|
import sys
sys.path.append("models")
import numpy as np
from adult_model import get_distortion_adult, AdultDataset, reweight_df, get_evaluation
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
def load_preproc_data_adult(protected_attributes=None):
def custom_preprocessing(df):
"""The custom pre-processing function is adapted from
https://github.com/fair-preprocessing/nips2017/blob/master/Adult/code/Generate_Adult_Data.ipynb
"""
np.random.seed(1)
# Group age by decade
df['Age (decade)'] = df['age'].apply(lambda x: x // 10 * 10)
def group_edu(x):
if x == -1:
return 'missing_edu'
elif x <= 5:
return '<6'
elif x >= 13:
return '>12'
else:
return x
def age_cut(x):
if x >= 70:
return '>=70'
else:
return x
def group_race(x):
if x == "White":
return 1.0
else:
return 0.0
# Cluster education and age attributes.
# Limit education range
df['Education Years'] = df['education-num'].apply(
lambda x: group_edu(x))
df['Education Years'] = df['Education Years'].astype('category')
# Limit age range
df['Age (decade)'] = df['Age (decade)'].apply(lambda x: age_cut(x))
# Rename income variable
df['Income Binary'] = df['income-per-year']
# Recode sex and race
df['sex'] = df['sex'].replace({'Female': 0.0, 'Male': 1.0})
df['race'] = df['race'].apply(lambda x: group_race(x))
# Here we define a column called mis_prob to assign the probability of each observation
# being missed
df['mis_prob'] = 0
for index, row in df.iterrows():
# Here, the probability of missing values in Education Years depends on sex and
# Education Years, so in this case the missing values are under MNAR
# To change the distribution of missing values, we can change the probability here
if row['sex']==0 and row['Education Years'] =='>12':
df.loc[index,'mis_prob'] = 0.65
elif row['sex']==1 and row['Education Years'] =='=8':
df.loc[index,'mis_prob'] = 0.15
else:
df.loc[index,'mis_prob'] = 0.1
new_label = []
for index, row in df.iterrows():
if np.random.binomial(1, float(row['mis_prob']), 1)[0] == 1:
new_label.append('missing_edu')
else:
new_label.append(row['Education Years'])
df['Education Years'] = new_label
print('Number of missing values')
print(len(df.loc[df['Education Years'] == 'missing_edu', :]))
print('Total number of observations')
print(len(df))
return df
XD_features = ['Age (decade)', 'Education Years', 'sex']
D_features = [
'sex'] if protected_attributes is None else protected_attributes
Y_features = ['Income Binary']
X_features = list(set(XD_features) - set(D_features))
categorical_features = ['Age (decade)', 'Education Years']
all_privileged_classes = {"sex": [1.0]}
all_protected_attribute_maps = {"sex": {1.0: 'Male', 0.0: 'Female'}}
return AdultDataset(
label_name=Y_features[0],
favorable_classes=['>50K', '>50K.'],
protected_attribute_names=D_features,
privileged_classes=[all_privileged_classes[x] for x in D_features],
instance_weights_name=None,
categorical_features=categorical_features,
features_to_keep=X_features + Y_features + D_features,
na_values=['?'],
metadata={'label_maps': [{1.0: '>50K', 0.0: '<=50K'}],
'protected_attribute_maps': [all_protected_attribute_maps[x]
for x in D_features]},
custom_preprocessing=custom_preprocessing)
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_adult(['sex'])
optim_options = {
"distortion_fun": get_distortion_adult,
"epsilon": 0.02,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
dataset_orig_train, dataset_orig_vt = dataset_orig.split(
[0.7], shuffle=True)
OP = OptimPreproc(OptTools, optim_options,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
OP = OP.fit(dataset_orig_train)
dataset_transf_cat_test = OP.transform(dataset_orig_vt, transform_Y=True)
dataset_transf_cat_test = dataset_orig_vt.align_datasets(
dataset_transf_cat_test)
dataset_transf_cat_train = OP.transform(
dataset_orig_train, transform_Y=True)
dataset_transf_cat_train = dataset_orig_train.align_datasets(
dataset_transf_cat_train)
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_pred = lmod.predict(X_test)
print('Without reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
dataset_orig_train.instance_weights = reweight_df(dataset_orig_train)
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train, sample_weight=dataset_orig_train.instance_weights)
y_pred = lmod.predict(X_test)
print('With reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
def load_preproc_data_adult(protected_attributes=None):
def custom_preprocessing(df):
"""The custom pre-processing function is adapted from
https://github.com/fair-preprocessing/nips2017/blob/master/Adult/code/Generate_Adult_Data.ipynb
"""
np.random.seed(1)
# Group age by decade
df['Age (decade)'] = df['age'].apply(lambda x: x // 10 * 10)
def group_edu(x):
if x == -1:
return 'missing_edu'
elif x <= 5:
return '<6'
elif x >= 13:
return '>12'
else:
return x
def age_cut(x):
if x >= 70:
return '>=70'
else:
return x
def group_race(x):
if x == "White":
return 1.0
else:
return 0.0
# Cluster education and age attributes.
# Limit education range
df['Education Years'] = df['education-num'].apply(
lambda x: group_edu(x))
df['Education Years'] = df['Education Years'].astype('category')
# Limit age range
df['Age (decade)'] = df['Age (decade)'].apply(lambda x: age_cut(x))
# Rename income variable
df['Income Binary'] = df['income-per-year']
# Recode sex and race
df['sex'] = df['sex'].replace({'Female': 0.0, 'Male': 1.0})
df['race'] = df['race'].apply(lambda x: group_race(x))
# Here we define a column called mis_prob to assign the probability of each observation
# being missed
df['mis_prob'] = 0
for index, row in df.iterrows():
# Here, the probability of missing values in Education Years depends on sex and
# Income Binary, so in this case the missing values are under MAR because the missingness
# does not depend on the feature Education Years
# To change the distribution of missing values, we can change the probability here
if row['sex']==0 and row['Income Binary'] =='>50K':
df.loc[index,'mis_prob'] = 0.4
elif row['sex']==0:
df.loc[index,'mis_prob'] = 0.1
else:
df.loc[index,'mis_prob'] = 0.05
new_label = []
for index, row in df.iterrows():
if np.random.binomial(1, float(row['mis_prob']), 1)[0] == 1:
new_label.append('missing_edu')
else:
new_label.append(row['Education Years'])
df['Education Years'] = new_label
print('Total number of missing values')
print(len(df.loc[df['Education Years'] == 'missing_edu', :].index))
print('Total number of observations')
print(len(df.index))
return df
XD_features = ['Age (decade)', 'Education Years', 'sex']
D_features = [
'sex'] if protected_attributes is None else protected_attributes
Y_features = ['Income Binary']
X_features = list(set(XD_features) - set(D_features))
categorical_features = ['Age (decade)', 'Education Years']
# privileged classes
all_privileged_classes = {"sex": [1.0]}
# protected attribute maps
all_protected_attribute_maps = {"sex": {1.0: 'Male', 0.0: 'Female'}}
return AdultDataset(
label_name=Y_features[0],
favorable_classes=['>50K', '>50K.'],
protected_attribute_names=D_features,
privileged_classes=[all_privileged_classes[x] for x in D_features],
instance_weights_name=None,
categorical_features=categorical_features,
features_to_keep=X_features + Y_features + D_features,
na_values=['?'],
metadata={'label_maps': [{1.0: '>50K', 0.0: '<=50K'}],
'protected_attribute_maps': [all_protected_attribute_maps[x]
for x in D_features]},
custom_preprocessing=custom_preprocessing)
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_adult(['sex'])
optim_options = {
"distortion_fun": get_distortion_adult,
"epsilon": 0.03,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
dataset_orig_train, dataset_orig_vt = dataset_orig.split(
[0.7], shuffle=True)
OP = OptimPreproc(OptTools, optim_options,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
OP = OP.fit(dataset_orig_train)
dataset_transf_cat_test = OP.transform(dataset_orig_vt, transform_Y=True)
dataset_transf_cat_test = dataset_orig_vt.align_datasets(
dataset_transf_cat_test)
dataset_transf_cat_train = OP.transform(
dataset_orig_train, transform_Y=True)
dataset_transf_cat_train = dataset_orig_train.align_datasets(
dataset_transf_cat_train)
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_pred = lmod.predict(X_test)
print('Without reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
dataset_orig_train.instance_weights = reweight_df(dataset_orig_train)
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_cat_train.features)
y_train = dataset_transf_cat_train.labels.ravel()
X_test = scale_transf.fit_transform(dataset_transf_cat_test.features)
lmod = LogisticRegression()
lmod.fit(X_train, y_train, sample_weight=dataset_orig_train.instance_weights)
y_pred = lmod.predict(X_test)
print('With reweight')
get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,0,1,1)
| 0.59843 | 0.969003 |
# Homework 6-1: "Fundamentals" based election prediction
In this homework you will explore an alternate election prediction model, using various economic and political indicators instead of polling data -- and also deal with the challenges of model building when there is very little training data. Political scientists have long analyzed these types of "fundamentals" models, and they can be reasonably accurate. For example, fundamentals [slightly favored](https://fivethirtyeight.com/features/it-wasnt-clintons-election-to-lose/) the Republicans in 2016
Data sources which I used to generate `election-fundamentals.csv`:
- Historical presidential approval ratings (highest and lowest for each president) from [Wikipedia](https://en.wikipedia.org/wiki/United_States_presidential_approval_rating)
- GDP growth in election year from [World Bank](https://data.worldbank.org/indicator/NY.GDP.MKTP.KD.ZG?locations=US)
Note that there are some timing issues here which more careful forecasts would avoid. The presidential approval rating is for the entire presidential term.The GDP growth is for the entire election year. These variables might have higher predictive power if they were (for example) sampled in the last quarters before the election.
For a comprehensive view of election prediction from non-poll data, and how well it might or might not be able to do, try [this](https://fivethirtyeight.com/features/models-based-on-fundamentals-have-failed-at-predicting-presidential-elections/) from Fivethirtyeight.
```
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
# First, import data/election-fundamentals.csv and take a look at what we have
# How many elections do we have data for?
# Rather than predicting the winning party, we're going to predict whether the same party stays in power or flips
# This is going to be the target variable
fund.flips = fund.winner != fund.incumbent_party
# Pull out all other numeric columns as features. Create features and and target numpy arrays
fields =
features =
target =
# Use 3-fold cross validation to see how well we can do with a RandomForestClassifier.
# Print out the scores
```
How predictable are election results just from these variables, as compared to a coin flip?
(your answer here)
```
# Now create a logistic regression using all the data
# Normally we'd split into test and training years, but here we're only interested in the coefficients
# What is the influence of each feature?
# Remeber to use np.exp to turn the lr coefficients into odds ratios
```
Describe the effect of each one of our features on whether or not the party in power flips. What feature has the biggest effect? How does economic growth relate? Are there any factors that operate backwards from what you would expect, and if so what do you think is happening?
(your answer here)
|
github_jupyter
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
# First, import data/election-fundamentals.csv and take a look at what we have
# How many elections do we have data for?
# Rather than predicting the winning party, we're going to predict whether the same party stays in power or flips
# This is going to be the target variable
fund.flips = fund.winner != fund.incumbent_party
# Pull out all other numeric columns as features. Create features and and target numpy arrays
fields =
features =
target =
# Use 3-fold cross validation to see how well we can do with a RandomForestClassifier.
# Print out the scores
# Now create a logistic regression using all the data
# Normally we'd split into test and training years, but here we're only interested in the coefficients
# What is the influence of each feature?
# Remeber to use np.exp to turn the lr coefficients into odds ratios
| 0.706596 | 0.988734 |
# Projet nยฐ7 - Effectuez une prรฉdiction de revenus
```
%reset
```
## Import de modules
```
import pandas as pd
import numpy as np
import seaborn as sns
from statsmodels.compat import lzip
```
## Paramรจtres seaborn
```
sns.set_context('talk')
sns.set_style('darkgrid')
```
## Lecture du DF
```
df = pd.read_csv('data-projet7.csv')
df.head()
# GDP = Gross Domestic Product
# PPP = Purchase power parity
df.info()
df.year_survey.unique()
df['income'] = df['income'].str.replace(',','.')
df['income'] = df['income'].astype('float')
df['gdpppp'] = df['gdpppp'].str.replace(',','.')
df['gdpppp'] = df['gdpppp'].astype('float')
df.describe()
```
## Mission nยฐ1
```
# annรฉes des donnรฉes
df.year_survey.unique()
# nombre de pays prรฉsents
len(df.country.unique())
# type de quantile = centile
df.nb_quantiles.unique()
```
รchantillonner une population en utilisant des quantiles est-il selon vous une bonne mรฉthode ? Pourquoi ?
Oui, pour plusieurs raisons :
- cela permet de partitionner les habitants en fonction des revenus. On peut isoler/retirer une certaine partie de la population
- cela permet de comparer un quantile entre les pays
Limites :
- Une sรฉparation en centile n'est pas assez prรฉcise, car dans la classe 100, il y a les riches et les super riches
```
import csv
```
### Lecture d'un DF avec les populations
```
pop = pd.read_csv('pop_databank/pop.csv', engine= 'python', quoting=3)
pop.info()
pop.head()
```
#### Nettoyage de pop
```
pop = pop.reset_index()
pop = pop [['Country Name', 'Country Code', '2006 [YR2006]', '2007 [YR2007]', '2008 [YR2008]']]
pop.columns = ['country_name', 'country', 'pop_2006', 'pop_2007', 'pop_2008']
pop = pop.replace('"',"")
pop['pop_2008'] = pop['pop_2008'].str.replace('"', '')
pop['country_name'] = pop['country_name'].str.replace('"', '')
pop[pop['country'] == 'COD']
# Ajout d'un quantile manquant
ltu41 = (df[(df['country'] == 'LTU')&(df['quantile'] == 40)]['income'].values + df[(df['country'] == 'LTU')&(df['quantile'] == 42)]['income'].values)/2
ltu41 = ltu41[0]
ltu41serie = pd.Series({'country' : 'LTU', 'year_survey' : 2008, 'quantile': 41, 'nb_quantiles' : 100, 'income' : ltu41, 'gdpppp': 17571.0})
df = df.append(ltu41serie, ignore_index=True)
```
### Merge entre pop et df : df1
```
df = df.sort_values(['country', 'quantile'])
df1 = pd.merge(df, pop, on='country', how='left')
len(df1.country.unique())
df1 = df1.set_index('country')
```
#### Nettoyage de df1
```
df1[(df1.index == 'LTU')&(df1['quantile'] == 40)]['income']
df1[(df1.index == 'LTU')&(df1['quantile'] == 41)]
# Ajout d'informations manquantes
df1.loc['TWN', 'pop_2006'] = 22823848
df1.loc['TWN', 'pop_2007'] = 22927215
df1.loc['TWN', 'pop_2008'] = 23019045
# source : https://www.worldometers.info/world-population/taiwan-population/
df1[df1['gdpppp'].isna()]
# Ajout d'informations manquantes
df1.loc['XKX', 'gdpppp'] = 7249.5 #source : https://databank.worldbank.org/ indicateur : GDP, PPP (constant 2017 international $)
df1.loc['PSE', 'gdpppp'] = 3155 #source : https://knoema.com/atlas/Palestine/GDP-per-capita-based-on-PPP
```
Il manquait les populations de Taiwan, et le gdpppp de XKX et PSE. Ces informations ont รฉtรฉ ajoutรฉes.
```
# pivot du df
df2 = pd.pivot_table(df1, index=['country','pop_2006','pop_2007', 'pop_2008', 'gdpppp'], columns=['quantile'], values=['income'])
df2 = df2.reset_index()
df2[df2['country'] == 'FJI']
# Modification du type de donnรฉes
pop['pop_2008'] = pop['pop_2008'].astype('int64')
df2['pop_2008'] = df2['pop_2008'].astype('int64')
```
## calcul de la population
```
df2['pop_2008'].sum() / pop['pop_2008'].sum()
pop['pop_2008'].sum()
# Pays les plus peuplรฉs
df2.sort_values('pop_2008', ascending= False)
df2.sort_values('gdpppp', ascending= False).head(5)
#Erreur dans le gdp ppp de FJI
# Correction du gdp ppp de FJI
df2.loc[(df2.country == 'FJI'),'gdpppp'] = 7777 #https://databank.worldbank.org/ indicateur : GDP, PPP (constant 2017 international $)
df2.columns = ["_".join(str(v)) for v in df2.columns.values]
# Nettoyage
list1 = list(range(1,101))
list2 = ['country', 'pop_2006', 'pop_2007', 'pop_2008', 'gdpppp']
list1 = list2 + list1
df2.columns = list1
```
# Mission nยฐ2
```
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Prรฉparation des donnรฉes
df3 = df2
df3 = df3.drop('pop_2006', axis=1)
df3 = df3.drop('pop_2007', axis=1)
df3 = df3.drop('pop_2008', axis=1)
df3 = df3.drop('gdpppp', axis=1)
df3.head()
```
## Calcul du coefficient de gini pour l'ensemble des pays
```
df4 = df3.set_index('country')
X = df4
X = X.to_numpy()
X = np.sort(X) # tri des valeurs
list_gini= []
for c in range(0, len(X)):
n = 100
i = np.arange(1, n + 1) # Index commenรงant ร 1
gini = (np.sum((2 * i - n - 1) * X[c])) / (n * np.sum(X[c]))
list_gini.append(gini)
df4['gini'] = list_gini
# Top 5 coef gini
df4.sort_values('gini').head(5)
# Bottom 5 coef gini
df4.sort_values('gini').tail(5)
# Coef gini de la France
df4.sort_values('gini').reset_index()[df4.sort_values('gini').reset_index()['country'] == 'FRA']
# Sรฉlection des pays
list_code_country = ['SVN', 'HUN', 'FRA', 'USA', 'MEX', 'ZAF']
df4[df4.index.isin(list_code_country)]['gini']
selected_countries = df4[df4.index.isin(list_code_country)]
selected_countries = selected_countries.sort_values('gini')
# modification du style de seaborn
sns.set('talk')
def lorenz(X):
# On divise la somme cumulรฉe par la somme
# afin avoir un rรฉsultat entre 0 et 1
scaled_prefix_sum = X.cumsum() / X.sum()
# On met en place la premiรจre valeur ร 0
return np.insert(scaled_prefix_sum, 0, 0)
fig, ax = plt.subplots()
fig.set_size_inches(10,10)
ax.plot([0,1], [0,1], c='black')
ax.axvline(0.5, linestyle='--', alpha=0.5)
for c in range(0, len(list_code_country)):
X = selected_countries.iloc[c,:]
X = X.to_numpy()
X = np.sort(X) # tri des valeurs
lorenz_curve = lorenz(X)
ax.plot(np.linspace(0.0, 1.0, lorenz_curve.size), lorenz_curve, linewidth=2, label='{}'.format(list_code_country[c]))
# on affiche une ligne de 0,0 ร 1,1
ax.set_xlabel('''population''')
ax.set_ylabel('''Revenus''')
ax.set_title('courbe de Lorenz de {}'.format(list_code_country[c]))
ax.legend()
fig.savefig('Lorenz')
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(15,10)
for c in range(0, len(list_code_country)):
ax.plot(selected_countries.iloc[:,:-1].columns, selected_countries.iloc[c,:-1],linewidth=3, label='{}'.format(list_code_country[c]))
ax.set_yscale('log')
ax.set_label('test')
ax.legend()
ax.set_title('Rรฉpartition des revenus')
fig.savefig('repart_revenu')
plt.show()
```
## Evolution du coef gini dans le temps
```
ev_gini = pd.read_csv('ev_gini.csv')
ev_gini = ev_gini.loc[0:5]
ev_gini= ev_gini.replace(to_replace='..', value=np.nan)
ev_gini.iloc[:,-6:] = ev_gini.iloc[:,-6:].astype("float")
ev_gini.columns = ['Series Name', 'Series Code', 'Country Name', 'Country Code',
'2006', '2007', '2008', '2009',
'2010', '2011']
ev_gini = ev_gini.melt(id_vars='Country Name', value_vars=['2006', '2007', '2008', '2009', '2010', '2011'], var_name='annรฉe', value_name='gini')
ev_gini.info()
sns.set('talk')
g = sns.relplot(y='gini', x='annรฉe', data=ev_gini, kind='line', hue='Country Name', marker='o')
g.fig.suptitle('''Evolution de l'indice de Gini''', y=1.03)
plt.savefig('ev_gini')
plt.show()
```
## Mission nยฐ3
```
# ajout du revenu moyen
df4['revenu_moyen'] = df4.mean(axis=1)
df4.head()
```
### Import du coef. d'รฉlasticitรฉ
```
elas = pd.read_csv('GDIMMay2018.csv')
elas.head()
elas.region.unique()
elas[elas.region == 'High income']
len(elas.countryname.unique())
elas.info()
elas = elas[['countryname', 'wbcode', 'region', 'year', 'IGEincome']]
elas = elas.drop_duplicates().dropna()
elas = elas.set_index('wbcode')
elas = elas[['IGEincome']]
df5 = pd.merge(df4, elas, how='left', left_index=True, right_index=True)
df5.head()
```
### Import des rรฉgions des pays
```
metadata = pd.read_csv('metadata_country.csv')
metadata = metadata.set_index('Country Code')
metadata.head()
```
### Ajout des coef. d'รฉlasticitรฉ manquants
#### Crรฉation de listes de rรฉgions
```
list_europe = list(metadata[metadata['Region'] == 'Europe & Central Asia'].index)
list_asia = list(metadata[(metadata['Region'] == 'East Asia & Pacific')|(metadata['Region'] == 'South Asia')].index)
list_latam_afr = list(metadata[(metadata['Region'] == 'Latin America & Caribbean')|(metadata['Region'] == 'Middle East & North Africa')|(metadata['Region'] == 'Sub-Saharan Africa')].index)
metadata[metadata.index.isin(list_europe)]
central_asia = ['AZE', 'KAZ', 'KGZ', 'TJK', 'TKM', 'UZB']
```
#### merge en df5 et metadata (ajout des rรฉgions)
```
df6 = pd.merge(df5, metadata[['Region']] , right_index=True, left_index=True, how='left')
df6[df6.Region.isna()]
# rรฉgion manquante pour TWN
df6.loc['TWN', 'Region'] = 'East Asia & Pacific'
```
#### Dรฉcoupage des rรฉgions (comme le fichier elasticity.txt)
```
nordic_european_countries_and_canada = ['SWE', 'FIN', 'ISL', 'NOR', 'DNK', 'CAN']
europe = [item for item in list_europe if item not in nordic_european_countries_and_canada]
europe = [item for item in europe if item not in central_asia]
aus_nz_usa = ['AUS', 'USA', 'NZL']
asia = list_asia + central_asia
latin_america_africa = list_latam_afr
```
#### Attribution des coef. d'elasticitรฉ par rรฉgion (comme le fichier elasticity.txt)
```
df6['IGE2'] = 0
df6.loc[df6.index.isin(nordic_european_countries_and_canada),'IGE2'] = 0.2
df6.loc[df6.index.isin(europe),'IGE2'] = 0.4
df6.loc[df6.index.isin(aus_nz_usa),'IGE2'] = 0.4
df6.loc[df6.index.isin(asia),'IGE2'] = 0.5
df6.loc[df6.index.isin(latin_america_africa),'IGE2'] = 0.66
# Quand il n'y pas de coef. d'elasticitรฉ, dรฉfinir celui de sa rรฉgion
df6.loc[df6['IGEincome'].isnull(),'IGEincome'] = df6.loc[df6['IGEincome'].isnull(),'IGE2']
# Vรฉrification
df6[df6['IGEincome'].isnull()]
# Arrondi des coefs (estimation)
df6['IGEincome'] = np.round(df6['IGEincome'],1)
```
### Crรฉation des fonctions
```
import scipy.stats as st
import numpy as np
from collections import Counter
def generate_incomes(n, pj):
# On gรฉnรจre les revenus des parents (exprimรฉs en logs) selon une loi normale.
# La moyenne et variance n'ont aucune incidence sur le rรฉsultat final (ie. sur le caclul de la classe de revenu)
ln_y_parent = st.norm(0,1).rvs(size=n)
# Gรฉnรฉration d'une rรฉalisation du terme d'erreur epsilon
residues = st.norm(0,1).rvs(size=n)
return np.exp(pj*ln_y_parent + residues), np.exp(ln_y_parent)
def quantiles(l, nb_quantiles):
size = len(l)
l_sorted = l.copy()
l_sorted = l_sorted.sort_values()
quantiles = np.round(np.arange(1, nb_quantiles+1, nb_quantiles/size) -0.5 +1./size)
q_dict = {a:int(b) for a,b in zip(l_sorted,quantiles)}
return pd.Series([q_dict[e] for e in l])
def compute_quantiles(y_child, y_parents, nb_quantiles):
y_child = pd.Series(y_child)
y_parents = pd.Series(y_parents)
c_i_child = quantiles(y_child, nb_quantiles)
c_i_parent = quantiles(y_parents, nb_quantiles)
sample = pd.concat([y_child, y_parents, c_i_child, c_i_parent], axis=1)
sample.columns = ["y_child", "y_parents", "c_i_child","c_i_parent"]
return sample
def distribution(counts, nb_quantiles):
distrib = []
total = counts["counts"].sum()
if total == 0 :
return [0] * nb_quantiles
for q_p in range(1, nb_quantiles+1):
subset = counts[counts.c_i_parent == q_p]
if len(subset):
nb = subset["counts"].values[0]
distrib += [nb / total]
else:
distrib += [0]
return distrib
def conditional_distributions(sample, nb_quantiles):
counts = sample.groupby(["c_i_child","c_i_parent"]).agg({"y_child":"count"}).unstack(fill_value=0).stack()
counts = counts.reset_index()
counts.columns = ["c_i_child","c_i_parent","counts"]
list_proba = [] # crรฉation d'une liste
for(_, c_i_child, c_i_parent, counts) in counts.itertuples() :
temp = [c_i_parent] * counts # le quantile de la classe parent est transformรฉ en liste
# je multiplie cette liste pour le nombre "counts"
# j'obtiens donc une liste de 10000 entrรฉes avec les quantiles parents triรฉs
list_proba.extend(temp)
return list_proba
def plot_conditional_distributions(p, cd, nb_quantiles):
plt.figure()
# La ligne suivante sert ร afficher un graphique en "stack bars", sur ce modรจle : https://matplotlib.org/gallery/lines_bars_and_markers/bar_stacked.html
cumul = np.array([0] * nb_quantiles)
for i, child_quantile in enumerate(cd):
plt.bar(np.arange(nb_quantiles)+1, child_quantile, bottom=cumul, width=0.95, label = str(i+1) +"e")
cumul = cumul + np.array(child_quantile)
plt.axis([.5, nb_quantiles*1.3 ,0 ,1])
plt.title("p=" + str(p))
plt.legend()
plt.xlabel("quantile parents")
plt.ylabel("probabilitรฉ du quantile enfant")
plt.show()
def proba_cond(c_i_parent, c_i_child, mat):
return mat[c_i_child, c_i_parent]
def conditional_distributions_0(sample, nb_quantiles):
counts = sample.groupby(["c_i_child","c_i_parent"]).apply(len)
counts = counts.reset_index()
counts.columns = ["c_i_child","c_i_parent","counts"]
mat = []
for child_quantile in np.arange(nb_quantiles)+1:
subset = counts[counts.c_i_child == child_quantile]
mat += [distribution(subset, nb_quantiles)]
return np.array(mat)
```
## Test : pj max
```
df6.sort_values('IGEincome', ascending=True)
pj = df6['IGEincome'].max() # coefficient d'รฉlasticitรฉ du pays j
nb_quantiles = 10 # nombre de quantiles (nombre de classes de revenu)
n = 1000*nb_quantiles # taille de l'รฉchantillon
y_child, y_parents = generate_incomes(n, pj)
y_child, y_parents
sample = compute_quantiles(y_child, y_parents, nb_quantiles)
sample
cd = conditional_distributions_0(sample, nb_quantiles)
sns.set('notebook')
plot_conditional_distributions(pj, cd, nb_quantiles) # Cette instruction prendra du temps si nb_quantiles > 10
```
## Test : pj min
```
pj = df6['IGEincome'].min() # coefficient d'รฉlasticitรฉ du pays j
nb_quantiles = 10 # nombre de quantiles (nombre de classes de revenu)
n = 1000*nb_quantiles # taille de l'รฉchantillon
y_child, y_parents = generate_incomes(n, pj)
y_child, y_parents
sample = compute_quantiles(y_child, y_parents, nb_quantiles)
cd = conditional_distributions_0(sample, nb_quantiles)
plot_conditional_distributions(pj, cd, nb_quantiles) # Cette instruction prendra du temps si nb_quantiles > 10
df6 = pd.merge(df6, df2[['country', 'gdpppp']], on='country', how='left')
df6.head()
# Multiplication de chaque ligne du df par 1000
df7 = pd.concat([df6]*1000)
list_columns = list(df7.columns[1:101])
df7 = df7.reset_index()
# Ajout des variables comme le DF original
df8 = pd.melt(df7, id_vars=[ 'country', 'gini', 'IGEincome', 'revenu_moyen', 'gdpppp'], value_vars=list_columns)
df8.head()
df9 = df8[['country', 'IGEincome']].drop_duplicates()
df9.head()
```
## Question nยฐ9 : Application des probabiltรฉs conditionnelles
```
list_pj = round(df9['IGEincome'],3) #liste des pj (triรฉ par ordre alphabรฉtique)
df8.columns = ['country', 'gini', 'elasticity', 'revenu_moyen', 'gdpppp', 'quantiles_enf', 'revenu_classe'] # changement de noms des colonnes
df8 = df8.sort_values(['country', 'quantiles_enf']) # Triage
df8.head()
# Vรฉrification si de l'ordre des pays entre le df8 et le df9
df9['country'].unique() == df8['country'].unique()
# Paramรจtre
nb_quantiles = 100
n = nb_quantiles * 1000
pj_list = np.round(df9['IGEincome'],3)
```
## Crรฉation de la boucle for pour crรฉer les probabilitรฉs conditionnelles des classes parents par pays.
```
%%time
final_list = []
for pj in pj_list:
y_child, y_parents = generate_incomes(n, pj)
sample = compute_quantiles(y_child, y_parents, nb_quantiles)
cd = conditional_distributions(sample, nb_quantiles)
final_list.extend(cd)
# Opรฉration assez longue (environ 35 secondes sur mon ordinateur)
# Ajout des classes parents
df8['classe_parents'] = final_list
df8.head()
# Exemple
df8[(df8['quantiles_enf'] == 70)&(df8['classe_parents'] == 5)].sort_values('elasticity').head()
```
# ANOVA
```
import statsmodels.api as sm
from scipy import stats as sp
df11 = df8[['country', 'gini', 'elasticity', 'revenu_moyen', 'gdpppp']].drop_duplicates()
df11.head()
df11.sort_values('gini', ascending=False)
import statsmodels.formula.api as smf
df12 = pd.melt(df6, id_vars=['country', 'gini', 'IGEincome', 'revenu_moyen', 'gdpppp'], value_vars=list_columns )
df12.head()
reg_anova = smf.ols('value ~ C(country)', data= df12).fit()
reg_anova.summary()
# Rยฒ= 0.496
# F-stat -> 98.43
# Prob(F-stat) -> 0
# --> regression significative ร un niveau de 5%
sum(reg_anova.pvalues < 0.05) / sum(reg_anova.pvalues < 1)
```
64.66% des pays ont une p-value infรฉrieur ร notre seuil alpha de 5%. Notre objectif รฉtant d'avoir un modรจle qui fonctionne pour la plupart des pays, ce rรฉsultat est peu satisfaisant.
# Vรฉrification des hypothรจses
## 1) Les rรฉsidus des populations รฉtudiรฉes suivent une distribution normale
```
sns.set('talk')
sm.qqplot(reg_anova.resid, dist='norm',line='s')
plt.savefig('qqplot')
plt.show()
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg_anova.resid)
lzip(name, test)
plt.hist(reg_anova.resid, bins=20)
plt.title('Histogramme de la variable : revenu_moyen' )
plt.savefig('hist_anova')
plt.show()
```
La distribution des donnรฉes sur cet histogramme s'approche d'une loi normale
```
import scipy.stats as stats
```
## 2) Les variances des populations sont toutes รฉgales (HOMOSCEDASTICITE)
```
list = []
list.append(df.country)
df12['resid'] = reg_anova.resid
list_homo = []
for country in df12.country.unique():
x = df12.resid[df12.country == country ]
list_homo.append(x)
stats.bartlett(*list_homo)
stats.levene(*list_homo)
```
Pour les tests de Bartlett et Levene, l'hypothรจse nulle est que les variance sont รฉgales. Ici, nous pouvons rejeter l'hypothรจse nulle. L'hypothรจse d'รฉgalitรฉ des variances n'est donc pas respectรฉ
## 3) Les รฉchantillons sont prรฉlevรฉs alรฉatoirement et indรฉpendamment dans les populations.
Oui, les donnรฉes sont reprรฉsentatives de la population
```
fra_inc = df12[(df12['country'] == 'FRA')]
fra_inc
plt.hist(fra_inc['value'])
plt.xlabel('revenus')
plt.title('Histogramme des revenus en France')
plt.savefig('hist_rev_1')
```
## ANOVA version log
```
df12['value_log'] = np.log(df12['value'])
fra_inc = df12[(df12['country'] == 'FRA')]
plt.hist(fra_inc['value_log'])
plt.xlabel('revenus')
plt.title('Histogramme des revenus en log. en France')
plt.savefig('hist_rev_2')
reg_anova_log = smf.ols('value_log ~ C(country)', data= df12).fit()
reg_anova_log.summary()
sum(reg_anova_log.pvalues < 0.05) / sum(reg_anova_log.pvalues < 1)
reg_anova_log.pvalues > 0.05
p_value_anova_log = pd.DataFrame(reg_anova_log.pvalues)
p_value_anova_log_sup_alpha = p_value_anova_log[p_value_anova_log[0] > 0.05]
p_value_anova_log_sup_alpha = p_value_anova_log_sup_alpha.reset_index()
p_value_anova_log_sup_alpha = p_value_anova_log_sup_alpha['index'].str.slice(start=-4, stop=-1)
p_value_anova_log_sup_alpha = p_value_anova_log_sup_alpha.values
df11[df11['country'].isin(p_value_anova_log_sup_alpha)]
```
## Vรฉrification des hypothรจses
### 1) Normalitรฉ des rรฉsidus
```
sm.qqplot(reg_anova_log.resid, dist='norm',line='s')
plt.savefig('qqplot')
plt.title('qqplot des rรฉsidus')
plt.show()
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg_anova_log.resid)
lzip(name, test)
plt.hist(reg_anova_log.resid, bins=20)
plt.title('Histogramme de la distribution des rรฉsidus' )
plt.xlabel('rรฉsidus')
plt.savefig('hist_anova')
plt.show()
df12['resid_log'] = reg_anova_log.resid
list_homo_log = []
for country in df12.country.unique():
x = df12.resid_log[df12.country == country ]
list_homo_log.append(x)
```
### 2) Homoscรฉdasticitรฉ
```
stats.bartlett(*list_homo_log)
stats.levene(*list_homo_log)
```
### 3) Les รฉchantillons sont prรฉlevรฉs alรฉatoirement et indรฉpendamment dans les populations.
On garde la mรชme hypothรจse que lors de la premiรจre rรฉgression.
# Rรฉgressions
```
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
```
## Regression : Revenu classe / ( gini, revenu moyen)
```
df8.head()
X = df8[['gini', 'revenu_moyen']].values
Y = df8['revenu_classe'].values
X_train1, X_test1, Y_train1, Y_test1 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp1 = pd.DataFrame({'x1' : X_train1[:,0], 'x2' : X_train1[:,1], 'y' : Y_train1 })
reg1 = smf.ols(formula='y ~ x1 + x2', data= temp1).fit()
reg1.summary()
```
La p-value pour x1 (coefficient de gini) n'est pas significative
## Regression : Revenu moyen / log
```
df8['revenu_moyen_log'] = np.log(df8['revenu_moyen'])
# Quel puissance en base e donne ce nombre
df8['revenu_classe_log'] = np.log(df8['revenu_classe'])
df8
df8b = df8.reset_index()
# revenu_classe en fonction gini, revenu_moyen_log
X = df8b[['gini', 'revenu_moyen_log', 'index']].values
Y = df8b['revenu_classe'].values
X_train2, X_test2, Y_train2, Y_test2 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp2 = pd.DataFrame({'index':X_train2[:,2], 'x1' : X_train2[:,0], 'x2' : X_train2[:,1], 'y' : Y_train2 })
reg2 = smf.ols(formula='y ~ x1 + x2', data= temp2).fit()
reg2.summary()
# revenu_classe_log en fonction gini, revenu_moyen_log
X = df8b[['gini', 'revenu_moyen_log', 'index']].values
Y = df8b['revenu_classe_log'].values
X_train2, X_test2, Y_train2, Y_test2 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp2 = pd.DataFrame({'index':X_train2[:,2],'x1' : X_train2[:,0], 'x2' : X_train2[:,1], 'y' : Y_train2 })
reg2 = smf.ols(formula='y ~ x1 + x2', data= temp2).fit()
reg2.summary()
```
## Regression : Revenu moyen + classe revenu parent
```
# revenu_classe en fonction gini, revenu_moyen, classe_parents
X = df8[['classe_parents','revenu_moyen', 'gini']].values
Y = df8['revenu_classe'].values
X_train3, X_test3, Y_train3, Y_test3 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp3 = pd.DataFrame({'x1' : X_train3[:,0], 'x2' : X_train3[:,1], 'x3' : X_train3[:,2], 'y' : Y_train3 })
reg3 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp3).fit()
reg3.summary()
```
La p-value pour x3 (coefficient de gini) n'est pas significative
## Regression : Revenu moyen + classe revenu parent / log
```
# revenu_classe_log en fonction gini, revenu_moyen, classe_parents
X = df8[['classe_parents','revenu_moyen', 'gini']].values
Y = df8['revenu_classe_log'].values
X_train4, X_test4, Y_train4, Y_test4 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp4 = pd.DataFrame({'x1' : X_train4[:,0], 'x2' : X_train4[:,1], 'x3' : X_train4[:,2], 'y' : Y_train4 })
reg4 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp4).fit()
reg4.summary()
# revenu_classe_log en fonction gini, revenu_moyen_log, classe_parents
X = df8b[['classe_parents','revenu_moyen_log', 'gini','index']].values
Y = df8b['revenu_classe_log'].values
X_train4, X_test4, Y_train4, Y_test4 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp4 = pd.DataFrame({'index':X_train2[:,2],'x1' : X_train4[:,0], 'x2' : X_train4[:,1], 'x3' : X_train4[:,2], 'y' : Y_train4 })
reg4 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp4).fit()
reg4.summary()
```
# Analyse des rรฉsultats des modรจles pertinents
Je sรฉlectionne les deux modรฉlisations faisant intervenir les transformations des revenus en logarithme :
- reg2
- reg4
```
alpha = 0.05
n1 = temp2.shape[0]
n2 = temp4.shape[0]
p1 = 3
p2 = 4
analyses1 = pd.DataFrame({'obs':np.arange(0, n1)})
#analyses['obs'].astype('float', inplace=True)
analyses2 = pd.DataFrame({'obs':np.arange(0, n2)})
#analyses['obs'].astype('float', inplace=True)
```
## Analyse Regression nยฐ2 revenu_classe_log ~ gini + revenu_moyen_log
### Calcul des leviers
```
analyses1['levier'] = reg2.get_influence().hat_matrix_diag
seuil_levier1 = 2*p1/n1
from scipy.stats import t, shapiro
from statsmodels.stats.outliers_influence import variance_inflation_factor
```
### Calcul des rรฉsidus studentisรฉs
```
analyses1['rstudent'] = reg2.get_influence().resid_studentized_internal
seuil_rstudent1 = t.ppf(1-alpha/2,n1-p1-1)
```
### Dรฉtermination de la distance de cook
```
influence = reg2.get_influence().cooks_distance[0]
analyses1['dcooks'] = influence
seuil_dcook1 = 4/(n1-p1)
```
### Vรฉrification de la colinรฉaritรฉ des variables
```
variables = reg2.model.exog
[variance_inflation_factor(variables, i) for i in np.arange(1,variables.shape[1])]
```
Tous les coefficient sont infรฉrieurs ร 10, les variables ne sont pas colinรฉaires
### Test de l'homoscรฉdasticitรฉ
```
_, pval, __, f_pval = sm.stats.diagnostic.het_breuschpagan(reg2.resid, variables)
print('p value test Breusch Pagan:', pval)
```
On rejette donc l'hypothรจse nulle : 'les variances sont รฉgales'. Le test d'homoscรฉdasticitรฉ n'est pas concluant
### Test de la normalitรฉ des rรฉsidus
```
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg2.resid)
lzip(name,test)
```
## Regression nยฐ2 revenu_classe_log ~ gini + revenu_moyen_log sans les valeurs influentes et atypiques
```
obs_a_retirer1 = analyses1[(analyses1.levier > seuil_levier1) & (analyses1.rstudent > seuil_rstudent1) & (analyses1.dcooks > seuil_dcook1)]
len(obs_a_retirer1)
len(obs_a_retirer1)/ len(temp2)
temp2_v2 = temp2[~temp2.index.isin(obs_a_retirer1.obs)]
temp2_v3 = temp2[temp2.index.isin(obs_a_retirer1.obs)]
reg2_v2 = smf.ols(formula='y ~ x1 + x2', data= temp2_v2).fit()
reg2_v2.summary()
obs_ret1 = df8b[df8b['index'].isin(temp2_v3['index'])]
obs_ret1.sort_values('quantiles_enf')
sns.set('talk')
plt.hist(obs_ret1.quantiles_enf)
plt.title('Quantile des classes des individus des observations retirรฉes')
plt.savefig('hist__obs_ret_classe_1')
plt.show()
sns.set('talk')
plt.hist(obs_ret1.classe_parents)
plt.title('Quantile des classes parents des observations retirรฉes')
plt.show()
obs_ret1.country.value_counts()
labels = obs_ret1.country.value_counts().index
fig, ax = plt.subplots()
ax.pie(obs_ret1.country.value_counts(), labels=labels, autopct='%.2f')
ax.set_title('''Pays d'origine des observations retirรฉes''')
fig.set_size_inches(8,8)
plt.savefig('pie_obs_ret_pays_1')
plt.show()
variables = reg2_v2.model.exog
[variance_inflation_factor(variables, i) for i in np.arange(1,variables.shape[1])]
_, pval, __, f_pval = sm.stats.diagnostic.het_breuschpagan(reg2_v2.resid, variables)
print('p value test Breusch Pagan:', pval)
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg2_v2.resid)
lzip(name,test)
```
## Analyse Regression nยฐ4 revenu_classe_log ~ gini + revenu_moyen_log + revenu_parent
```
analyses2['levier'] = reg4.get_influence().hat_matrix_diag
seuil_levier2 = 2*p2/n2
analyses2['rstudent'] = reg4.get_influence().resid_studentized_internal
seuil_rstudent2 = t.ppf(1-alpha/2,n2-p2-1)
influence = reg4.get_influence().cooks_distance[0]
analyses2['dcooks'] = influence
seuil_dcook2 = 4/(n2-p2)
```
### Vรฉrification de la colinรฉaritรฉ des variables
```
variables = reg4.model.exog
[variance_inflation_factor(variables, i) for i in np.arange(1,variables.shape[1])]
```
Tous les coefficient sont infรฉrieurs ร 10, les variables ne sont pas colinรฉaires
### Test de l'homoscรฉdasticitรฉ
```
_, pval, __, f_pval = sm.stats.diagnostic.het_breuschpagan(reg4.resid, variables)
print('p value test Breusch Pagan:', pval)
```
### Test de la normalitรฉ des rรฉsidus
```
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg4.resid)
lzip(name,test)
```
## Regression nยฐ4 revenu_classe_log ~ gini + revenu_moyen_log + revenu_parent sans les valeurs influentes et atypiques
```
obs_a_retirer2 = analyses2[(analyses2.levier > seuil_levier2) & (analyses2.rstudent > seuil_rstudent2) & (analyses2.dcooks > seuil_dcook2)]
temp4_v2 = temp4[~temp4.index.isin(obs_a_retirer2.obs)]
temp4_v3 = temp4[temp4.index.isin(obs_a_retirer2.obs)]
reg4 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp4_v2).fit()
reg4.summary()
len(obs_a_retirer2)/ len(temp4)
obs_ret2 = df8b[df8b['index'].isin(temp4_v3['index'])]
plt.hist(obs_ret2.quantiles_enf)
plt.title('Quantile des classes des individus des observations retirรฉes')
plt.savefig('hist__obs_ret_classe_2')
plt.show()
plt.hist(obs_ret2.classe_parents)
plt.title('Quantile des classes parents des observations retirรฉes')
plt.show()
labels = obs_ret2.country.value_counts().index
fig, ax = plt.subplots()
ax.pie(obs_ret2.country.value_counts(), labels=labels, autopct='%.2f')
ax.set_title('''Pays d'origine des observations retirรฉes''')
fig.set_size_inches(8,8)
plt.savefig('pie_obs_ret_pays_2')
plt.show()
```
|
github_jupyter
|
%reset
import pandas as pd
import numpy as np
import seaborn as sns
from statsmodels.compat import lzip
sns.set_context('talk')
sns.set_style('darkgrid')
df = pd.read_csv('data-projet7.csv')
df.head()
# GDP = Gross Domestic Product
# PPP = Purchase power parity
df.info()
df.year_survey.unique()
df['income'] = df['income'].str.replace(',','.')
df['income'] = df['income'].astype('float')
df['gdpppp'] = df['gdpppp'].str.replace(',','.')
df['gdpppp'] = df['gdpppp'].astype('float')
df.describe()
# annรฉes des donnรฉes
df.year_survey.unique()
# nombre de pays prรฉsents
len(df.country.unique())
# type de quantile = centile
df.nb_quantiles.unique()
import csv
pop = pd.read_csv('pop_databank/pop.csv', engine= 'python', quoting=3)
pop.info()
pop.head()
pop = pop.reset_index()
pop = pop [['Country Name', 'Country Code', '2006 [YR2006]', '2007 [YR2007]', '2008 [YR2008]']]
pop.columns = ['country_name', 'country', 'pop_2006', 'pop_2007', 'pop_2008']
pop = pop.replace('"',"")
pop['pop_2008'] = pop['pop_2008'].str.replace('"', '')
pop['country_name'] = pop['country_name'].str.replace('"', '')
pop[pop['country'] == 'COD']
# Ajout d'un quantile manquant
ltu41 = (df[(df['country'] == 'LTU')&(df['quantile'] == 40)]['income'].values + df[(df['country'] == 'LTU')&(df['quantile'] == 42)]['income'].values)/2
ltu41 = ltu41[0]
ltu41serie = pd.Series({'country' : 'LTU', 'year_survey' : 2008, 'quantile': 41, 'nb_quantiles' : 100, 'income' : ltu41, 'gdpppp': 17571.0})
df = df.append(ltu41serie, ignore_index=True)
df = df.sort_values(['country', 'quantile'])
df1 = pd.merge(df, pop, on='country', how='left')
len(df1.country.unique())
df1 = df1.set_index('country')
df1[(df1.index == 'LTU')&(df1['quantile'] == 40)]['income']
df1[(df1.index == 'LTU')&(df1['quantile'] == 41)]
# Ajout d'informations manquantes
df1.loc['TWN', 'pop_2006'] = 22823848
df1.loc['TWN', 'pop_2007'] = 22927215
df1.loc['TWN', 'pop_2008'] = 23019045
# source : https://www.worldometers.info/world-population/taiwan-population/
df1[df1['gdpppp'].isna()]
# Ajout d'informations manquantes
df1.loc['XKX', 'gdpppp'] = 7249.5 #source : https://databank.worldbank.org/ indicateur : GDP, PPP (constant 2017 international $)
df1.loc['PSE', 'gdpppp'] = 3155 #source : https://knoema.com/atlas/Palestine/GDP-per-capita-based-on-PPP
# pivot du df
df2 = pd.pivot_table(df1, index=['country','pop_2006','pop_2007', 'pop_2008', 'gdpppp'], columns=['quantile'], values=['income'])
df2 = df2.reset_index()
df2[df2['country'] == 'FJI']
# Modification du type de donnรฉes
pop['pop_2008'] = pop['pop_2008'].astype('int64')
df2['pop_2008'] = df2['pop_2008'].astype('int64')
df2['pop_2008'].sum() / pop['pop_2008'].sum()
pop['pop_2008'].sum()
# Pays les plus peuplรฉs
df2.sort_values('pop_2008', ascending= False)
df2.sort_values('gdpppp', ascending= False).head(5)
#Erreur dans le gdp ppp de FJI
# Correction du gdp ppp de FJI
df2.loc[(df2.country == 'FJI'),'gdpppp'] = 7777 #https://databank.worldbank.org/ indicateur : GDP, PPP (constant 2017 international $)
df2.columns = ["_".join(str(v)) for v in df2.columns.values]
# Nettoyage
list1 = list(range(1,101))
list2 = ['country', 'pop_2006', 'pop_2007', 'pop_2008', 'gdpppp']
list1 = list2 + list1
df2.columns = list1
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Prรฉparation des donnรฉes
df3 = df2
df3 = df3.drop('pop_2006', axis=1)
df3 = df3.drop('pop_2007', axis=1)
df3 = df3.drop('pop_2008', axis=1)
df3 = df3.drop('gdpppp', axis=1)
df3.head()
df4 = df3.set_index('country')
X = df4
X = X.to_numpy()
X = np.sort(X) # tri des valeurs
list_gini= []
for c in range(0, len(X)):
n = 100
i = np.arange(1, n + 1) # Index commenรงant ร 1
gini = (np.sum((2 * i - n - 1) * X[c])) / (n * np.sum(X[c]))
list_gini.append(gini)
df4['gini'] = list_gini
# Top 5 coef gini
df4.sort_values('gini').head(5)
# Bottom 5 coef gini
df4.sort_values('gini').tail(5)
# Coef gini de la France
df4.sort_values('gini').reset_index()[df4.sort_values('gini').reset_index()['country'] == 'FRA']
# Sรฉlection des pays
list_code_country = ['SVN', 'HUN', 'FRA', 'USA', 'MEX', 'ZAF']
df4[df4.index.isin(list_code_country)]['gini']
selected_countries = df4[df4.index.isin(list_code_country)]
selected_countries = selected_countries.sort_values('gini')
# modification du style de seaborn
sns.set('talk')
def lorenz(X):
# On divise la somme cumulรฉe par la somme
# afin avoir un rรฉsultat entre 0 et 1
scaled_prefix_sum = X.cumsum() / X.sum()
# On met en place la premiรจre valeur ร 0
return np.insert(scaled_prefix_sum, 0, 0)
fig, ax = plt.subplots()
fig.set_size_inches(10,10)
ax.plot([0,1], [0,1], c='black')
ax.axvline(0.5, linestyle='--', alpha=0.5)
for c in range(0, len(list_code_country)):
X = selected_countries.iloc[c,:]
X = X.to_numpy()
X = np.sort(X) # tri des valeurs
lorenz_curve = lorenz(X)
ax.plot(np.linspace(0.0, 1.0, lorenz_curve.size), lorenz_curve, linewidth=2, label='{}'.format(list_code_country[c]))
# on affiche une ligne de 0,0 ร 1,1
ax.set_xlabel('''population''')
ax.set_ylabel('''Revenus''')
ax.set_title('courbe de Lorenz de {}'.format(list_code_country[c]))
ax.legend()
fig.savefig('Lorenz')
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(15,10)
for c in range(0, len(list_code_country)):
ax.plot(selected_countries.iloc[:,:-1].columns, selected_countries.iloc[c,:-1],linewidth=3, label='{}'.format(list_code_country[c]))
ax.set_yscale('log')
ax.set_label('test')
ax.legend()
ax.set_title('Rรฉpartition des revenus')
fig.savefig('repart_revenu')
plt.show()
ev_gini = pd.read_csv('ev_gini.csv')
ev_gini = ev_gini.loc[0:5]
ev_gini= ev_gini.replace(to_replace='..', value=np.nan)
ev_gini.iloc[:,-6:] = ev_gini.iloc[:,-6:].astype("float")
ev_gini.columns = ['Series Name', 'Series Code', 'Country Name', 'Country Code',
'2006', '2007', '2008', '2009',
'2010', '2011']
ev_gini = ev_gini.melt(id_vars='Country Name', value_vars=['2006', '2007', '2008', '2009', '2010', '2011'], var_name='annรฉe', value_name='gini')
ev_gini.info()
sns.set('talk')
g = sns.relplot(y='gini', x='annรฉe', data=ev_gini, kind='line', hue='Country Name', marker='o')
g.fig.suptitle('''Evolution de l'indice de Gini''', y=1.03)
plt.savefig('ev_gini')
plt.show()
# ajout du revenu moyen
df4['revenu_moyen'] = df4.mean(axis=1)
df4.head()
elas = pd.read_csv('GDIMMay2018.csv')
elas.head()
elas.region.unique()
elas[elas.region == 'High income']
len(elas.countryname.unique())
elas.info()
elas = elas[['countryname', 'wbcode', 'region', 'year', 'IGEincome']]
elas = elas.drop_duplicates().dropna()
elas = elas.set_index('wbcode')
elas = elas[['IGEincome']]
df5 = pd.merge(df4, elas, how='left', left_index=True, right_index=True)
df5.head()
metadata = pd.read_csv('metadata_country.csv')
metadata = metadata.set_index('Country Code')
metadata.head()
list_europe = list(metadata[metadata['Region'] == 'Europe & Central Asia'].index)
list_asia = list(metadata[(metadata['Region'] == 'East Asia & Pacific')|(metadata['Region'] == 'South Asia')].index)
list_latam_afr = list(metadata[(metadata['Region'] == 'Latin America & Caribbean')|(metadata['Region'] == 'Middle East & North Africa')|(metadata['Region'] == 'Sub-Saharan Africa')].index)
metadata[metadata.index.isin(list_europe)]
central_asia = ['AZE', 'KAZ', 'KGZ', 'TJK', 'TKM', 'UZB']
df6 = pd.merge(df5, metadata[['Region']] , right_index=True, left_index=True, how='left')
df6[df6.Region.isna()]
# rรฉgion manquante pour TWN
df6.loc['TWN', 'Region'] = 'East Asia & Pacific'
nordic_european_countries_and_canada = ['SWE', 'FIN', 'ISL', 'NOR', 'DNK', 'CAN']
europe = [item for item in list_europe if item not in nordic_european_countries_and_canada]
europe = [item for item in europe if item not in central_asia]
aus_nz_usa = ['AUS', 'USA', 'NZL']
asia = list_asia + central_asia
latin_america_africa = list_latam_afr
df6['IGE2'] = 0
df6.loc[df6.index.isin(nordic_european_countries_and_canada),'IGE2'] = 0.2
df6.loc[df6.index.isin(europe),'IGE2'] = 0.4
df6.loc[df6.index.isin(aus_nz_usa),'IGE2'] = 0.4
df6.loc[df6.index.isin(asia),'IGE2'] = 0.5
df6.loc[df6.index.isin(latin_america_africa),'IGE2'] = 0.66
# Quand il n'y pas de coef. d'elasticitรฉ, dรฉfinir celui de sa rรฉgion
df6.loc[df6['IGEincome'].isnull(),'IGEincome'] = df6.loc[df6['IGEincome'].isnull(),'IGE2']
# Vรฉrification
df6[df6['IGEincome'].isnull()]
# Arrondi des coefs (estimation)
df6['IGEincome'] = np.round(df6['IGEincome'],1)
import scipy.stats as st
import numpy as np
from collections import Counter
def generate_incomes(n, pj):
# On gรฉnรจre les revenus des parents (exprimรฉs en logs) selon une loi normale.
# La moyenne et variance n'ont aucune incidence sur le rรฉsultat final (ie. sur le caclul de la classe de revenu)
ln_y_parent = st.norm(0,1).rvs(size=n)
# Gรฉnรฉration d'une rรฉalisation du terme d'erreur epsilon
residues = st.norm(0,1).rvs(size=n)
return np.exp(pj*ln_y_parent + residues), np.exp(ln_y_parent)
def quantiles(l, nb_quantiles):
size = len(l)
l_sorted = l.copy()
l_sorted = l_sorted.sort_values()
quantiles = np.round(np.arange(1, nb_quantiles+1, nb_quantiles/size) -0.5 +1./size)
q_dict = {a:int(b) for a,b in zip(l_sorted,quantiles)}
return pd.Series([q_dict[e] for e in l])
def compute_quantiles(y_child, y_parents, nb_quantiles):
y_child = pd.Series(y_child)
y_parents = pd.Series(y_parents)
c_i_child = quantiles(y_child, nb_quantiles)
c_i_parent = quantiles(y_parents, nb_quantiles)
sample = pd.concat([y_child, y_parents, c_i_child, c_i_parent], axis=1)
sample.columns = ["y_child", "y_parents", "c_i_child","c_i_parent"]
return sample
def distribution(counts, nb_quantiles):
distrib = []
total = counts["counts"].sum()
if total == 0 :
return [0] * nb_quantiles
for q_p in range(1, nb_quantiles+1):
subset = counts[counts.c_i_parent == q_p]
if len(subset):
nb = subset["counts"].values[0]
distrib += [nb / total]
else:
distrib += [0]
return distrib
def conditional_distributions(sample, nb_quantiles):
counts = sample.groupby(["c_i_child","c_i_parent"]).agg({"y_child":"count"}).unstack(fill_value=0).stack()
counts = counts.reset_index()
counts.columns = ["c_i_child","c_i_parent","counts"]
list_proba = [] # crรฉation d'une liste
for(_, c_i_child, c_i_parent, counts) in counts.itertuples() :
temp = [c_i_parent] * counts # le quantile de la classe parent est transformรฉ en liste
# je multiplie cette liste pour le nombre "counts"
# j'obtiens donc une liste de 10000 entrรฉes avec les quantiles parents triรฉs
list_proba.extend(temp)
return list_proba
def plot_conditional_distributions(p, cd, nb_quantiles):
plt.figure()
# La ligne suivante sert ร afficher un graphique en "stack bars", sur ce modรจle : https://matplotlib.org/gallery/lines_bars_and_markers/bar_stacked.html
cumul = np.array([0] * nb_quantiles)
for i, child_quantile in enumerate(cd):
plt.bar(np.arange(nb_quantiles)+1, child_quantile, bottom=cumul, width=0.95, label = str(i+1) +"e")
cumul = cumul + np.array(child_quantile)
plt.axis([.5, nb_quantiles*1.3 ,0 ,1])
plt.title("p=" + str(p))
plt.legend()
plt.xlabel("quantile parents")
plt.ylabel("probabilitรฉ du quantile enfant")
plt.show()
def proba_cond(c_i_parent, c_i_child, mat):
return mat[c_i_child, c_i_parent]
def conditional_distributions_0(sample, nb_quantiles):
counts = sample.groupby(["c_i_child","c_i_parent"]).apply(len)
counts = counts.reset_index()
counts.columns = ["c_i_child","c_i_parent","counts"]
mat = []
for child_quantile in np.arange(nb_quantiles)+1:
subset = counts[counts.c_i_child == child_quantile]
mat += [distribution(subset, nb_quantiles)]
return np.array(mat)
df6.sort_values('IGEincome', ascending=True)
pj = df6['IGEincome'].max() # coefficient d'รฉlasticitรฉ du pays j
nb_quantiles = 10 # nombre de quantiles (nombre de classes de revenu)
n = 1000*nb_quantiles # taille de l'รฉchantillon
y_child, y_parents = generate_incomes(n, pj)
y_child, y_parents
sample = compute_quantiles(y_child, y_parents, nb_quantiles)
sample
cd = conditional_distributions_0(sample, nb_quantiles)
sns.set('notebook')
plot_conditional_distributions(pj, cd, nb_quantiles) # Cette instruction prendra du temps si nb_quantiles > 10
pj = df6['IGEincome'].min() # coefficient d'รฉlasticitรฉ du pays j
nb_quantiles = 10 # nombre de quantiles (nombre de classes de revenu)
n = 1000*nb_quantiles # taille de l'รฉchantillon
y_child, y_parents = generate_incomes(n, pj)
y_child, y_parents
sample = compute_quantiles(y_child, y_parents, nb_quantiles)
cd = conditional_distributions_0(sample, nb_quantiles)
plot_conditional_distributions(pj, cd, nb_quantiles) # Cette instruction prendra du temps si nb_quantiles > 10
df6 = pd.merge(df6, df2[['country', 'gdpppp']], on='country', how='left')
df6.head()
# Multiplication de chaque ligne du df par 1000
df7 = pd.concat([df6]*1000)
list_columns = list(df7.columns[1:101])
df7 = df7.reset_index()
# Ajout des variables comme le DF original
df8 = pd.melt(df7, id_vars=[ 'country', 'gini', 'IGEincome', 'revenu_moyen', 'gdpppp'], value_vars=list_columns)
df8.head()
df9 = df8[['country', 'IGEincome']].drop_duplicates()
df9.head()
list_pj = round(df9['IGEincome'],3) #liste des pj (triรฉ par ordre alphabรฉtique)
df8.columns = ['country', 'gini', 'elasticity', 'revenu_moyen', 'gdpppp', 'quantiles_enf', 'revenu_classe'] # changement de noms des colonnes
df8 = df8.sort_values(['country', 'quantiles_enf']) # Triage
df8.head()
# Vรฉrification si de l'ordre des pays entre le df8 et le df9
df9['country'].unique() == df8['country'].unique()
# Paramรจtre
nb_quantiles = 100
n = nb_quantiles * 1000
pj_list = np.round(df9['IGEincome'],3)
%%time
final_list = []
for pj in pj_list:
y_child, y_parents = generate_incomes(n, pj)
sample = compute_quantiles(y_child, y_parents, nb_quantiles)
cd = conditional_distributions(sample, nb_quantiles)
final_list.extend(cd)
# Opรฉration assez longue (environ 35 secondes sur mon ordinateur)
# Ajout des classes parents
df8['classe_parents'] = final_list
df8.head()
# Exemple
df8[(df8['quantiles_enf'] == 70)&(df8['classe_parents'] == 5)].sort_values('elasticity').head()
import statsmodels.api as sm
from scipy import stats as sp
df11 = df8[['country', 'gini', 'elasticity', 'revenu_moyen', 'gdpppp']].drop_duplicates()
df11.head()
df11.sort_values('gini', ascending=False)
import statsmodels.formula.api as smf
df12 = pd.melt(df6, id_vars=['country', 'gini', 'IGEincome', 'revenu_moyen', 'gdpppp'], value_vars=list_columns )
df12.head()
reg_anova = smf.ols('value ~ C(country)', data= df12).fit()
reg_anova.summary()
# Rยฒ= 0.496
# F-stat -> 98.43
# Prob(F-stat) -> 0
# --> regression significative ร un niveau de 5%
sum(reg_anova.pvalues < 0.05) / sum(reg_anova.pvalues < 1)
sns.set('talk')
sm.qqplot(reg_anova.resid, dist='norm',line='s')
plt.savefig('qqplot')
plt.show()
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg_anova.resid)
lzip(name, test)
plt.hist(reg_anova.resid, bins=20)
plt.title('Histogramme de la variable : revenu_moyen' )
plt.savefig('hist_anova')
plt.show()
import scipy.stats as stats
list = []
list.append(df.country)
df12['resid'] = reg_anova.resid
list_homo = []
for country in df12.country.unique():
x = df12.resid[df12.country == country ]
list_homo.append(x)
stats.bartlett(*list_homo)
stats.levene(*list_homo)
fra_inc = df12[(df12['country'] == 'FRA')]
fra_inc
plt.hist(fra_inc['value'])
plt.xlabel('revenus')
plt.title('Histogramme des revenus en France')
plt.savefig('hist_rev_1')
df12['value_log'] = np.log(df12['value'])
fra_inc = df12[(df12['country'] == 'FRA')]
plt.hist(fra_inc['value_log'])
plt.xlabel('revenus')
plt.title('Histogramme des revenus en log. en France')
plt.savefig('hist_rev_2')
reg_anova_log = smf.ols('value_log ~ C(country)', data= df12).fit()
reg_anova_log.summary()
sum(reg_anova_log.pvalues < 0.05) / sum(reg_anova_log.pvalues < 1)
reg_anova_log.pvalues > 0.05
p_value_anova_log = pd.DataFrame(reg_anova_log.pvalues)
p_value_anova_log_sup_alpha = p_value_anova_log[p_value_anova_log[0] > 0.05]
p_value_anova_log_sup_alpha = p_value_anova_log_sup_alpha.reset_index()
p_value_anova_log_sup_alpha = p_value_anova_log_sup_alpha['index'].str.slice(start=-4, stop=-1)
p_value_anova_log_sup_alpha = p_value_anova_log_sup_alpha.values
df11[df11['country'].isin(p_value_anova_log_sup_alpha)]
sm.qqplot(reg_anova_log.resid, dist='norm',line='s')
plt.savefig('qqplot')
plt.title('qqplot des rรฉsidus')
plt.show()
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg_anova_log.resid)
lzip(name, test)
plt.hist(reg_anova_log.resid, bins=20)
plt.title('Histogramme de la distribution des rรฉsidus' )
plt.xlabel('rรฉsidus')
plt.savefig('hist_anova')
plt.show()
df12['resid_log'] = reg_anova_log.resid
list_homo_log = []
for country in df12.country.unique():
x = df12.resid_log[df12.country == country ]
list_homo_log.append(x)
stats.bartlett(*list_homo_log)
stats.levene(*list_homo_log)
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
df8.head()
X = df8[['gini', 'revenu_moyen']].values
Y = df8['revenu_classe'].values
X_train1, X_test1, Y_train1, Y_test1 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp1 = pd.DataFrame({'x1' : X_train1[:,0], 'x2' : X_train1[:,1], 'y' : Y_train1 })
reg1 = smf.ols(formula='y ~ x1 + x2', data= temp1).fit()
reg1.summary()
df8['revenu_moyen_log'] = np.log(df8['revenu_moyen'])
# Quel puissance en base e donne ce nombre
df8['revenu_classe_log'] = np.log(df8['revenu_classe'])
df8
df8b = df8.reset_index()
# revenu_classe en fonction gini, revenu_moyen_log
X = df8b[['gini', 'revenu_moyen_log', 'index']].values
Y = df8b['revenu_classe'].values
X_train2, X_test2, Y_train2, Y_test2 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp2 = pd.DataFrame({'index':X_train2[:,2], 'x1' : X_train2[:,0], 'x2' : X_train2[:,1], 'y' : Y_train2 })
reg2 = smf.ols(formula='y ~ x1 + x2', data= temp2).fit()
reg2.summary()
# revenu_classe_log en fonction gini, revenu_moyen_log
X = df8b[['gini', 'revenu_moyen_log', 'index']].values
Y = df8b['revenu_classe_log'].values
X_train2, X_test2, Y_train2, Y_test2 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp2 = pd.DataFrame({'index':X_train2[:,2],'x1' : X_train2[:,0], 'x2' : X_train2[:,1], 'y' : Y_train2 })
reg2 = smf.ols(formula='y ~ x1 + x2', data= temp2).fit()
reg2.summary()
# revenu_classe en fonction gini, revenu_moyen, classe_parents
X = df8[['classe_parents','revenu_moyen', 'gini']].values
Y = df8['revenu_classe'].values
X_train3, X_test3, Y_train3, Y_test3 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp3 = pd.DataFrame({'x1' : X_train3[:,0], 'x2' : X_train3[:,1], 'x3' : X_train3[:,2], 'y' : Y_train3 })
reg3 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp3).fit()
reg3.summary()
# revenu_classe_log en fonction gini, revenu_moyen, classe_parents
X = df8[['classe_parents','revenu_moyen', 'gini']].values
Y = df8['revenu_classe_log'].values
X_train4, X_test4, Y_train4, Y_test4 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp4 = pd.DataFrame({'x1' : X_train4[:,0], 'x2' : X_train4[:,1], 'x3' : X_train4[:,2], 'y' : Y_train4 })
reg4 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp4).fit()
reg4.summary()
# revenu_classe_log en fonction gini, revenu_moyen_log, classe_parents
X = df8b[['classe_parents','revenu_moyen_log', 'gini','index']].values
Y = df8b['revenu_classe_log'].values
X_train4, X_test4, Y_train4, Y_test4 = train_test_split( X, Y, test_size=0.2, random_state=0)
temp4 = pd.DataFrame({'index':X_train2[:,2],'x1' : X_train4[:,0], 'x2' : X_train4[:,1], 'x3' : X_train4[:,2], 'y' : Y_train4 })
reg4 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp4).fit()
reg4.summary()
alpha = 0.05
n1 = temp2.shape[0]
n2 = temp4.shape[0]
p1 = 3
p2 = 4
analyses1 = pd.DataFrame({'obs':np.arange(0, n1)})
#analyses['obs'].astype('float', inplace=True)
analyses2 = pd.DataFrame({'obs':np.arange(0, n2)})
#analyses['obs'].astype('float', inplace=True)
analyses1['levier'] = reg2.get_influence().hat_matrix_diag
seuil_levier1 = 2*p1/n1
from scipy.stats import t, shapiro
from statsmodels.stats.outliers_influence import variance_inflation_factor
analyses1['rstudent'] = reg2.get_influence().resid_studentized_internal
seuil_rstudent1 = t.ppf(1-alpha/2,n1-p1-1)
influence = reg2.get_influence().cooks_distance[0]
analyses1['dcooks'] = influence
seuil_dcook1 = 4/(n1-p1)
variables = reg2.model.exog
[variance_inflation_factor(variables, i) for i in np.arange(1,variables.shape[1])]
_, pval, __, f_pval = sm.stats.diagnostic.het_breuschpagan(reg2.resid, variables)
print('p value test Breusch Pagan:', pval)
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg2.resid)
lzip(name,test)
obs_a_retirer1 = analyses1[(analyses1.levier > seuil_levier1) & (analyses1.rstudent > seuil_rstudent1) & (analyses1.dcooks > seuil_dcook1)]
len(obs_a_retirer1)
len(obs_a_retirer1)/ len(temp2)
temp2_v2 = temp2[~temp2.index.isin(obs_a_retirer1.obs)]
temp2_v3 = temp2[temp2.index.isin(obs_a_retirer1.obs)]
reg2_v2 = smf.ols(formula='y ~ x1 + x2', data= temp2_v2).fit()
reg2_v2.summary()
obs_ret1 = df8b[df8b['index'].isin(temp2_v3['index'])]
obs_ret1.sort_values('quantiles_enf')
sns.set('talk')
plt.hist(obs_ret1.quantiles_enf)
plt.title('Quantile des classes des individus des observations retirรฉes')
plt.savefig('hist__obs_ret_classe_1')
plt.show()
sns.set('talk')
plt.hist(obs_ret1.classe_parents)
plt.title('Quantile des classes parents des observations retirรฉes')
plt.show()
obs_ret1.country.value_counts()
labels = obs_ret1.country.value_counts().index
fig, ax = plt.subplots()
ax.pie(obs_ret1.country.value_counts(), labels=labels, autopct='%.2f')
ax.set_title('''Pays d'origine des observations retirรฉes''')
fig.set_size_inches(8,8)
plt.savefig('pie_obs_ret_pays_1')
plt.show()
variables = reg2_v2.model.exog
[variance_inflation_factor(variables, i) for i in np.arange(1,variables.shape[1])]
_, pval, __, f_pval = sm.stats.diagnostic.het_breuschpagan(reg2_v2.resid, variables)
print('p value test Breusch Pagan:', pval)
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg2_v2.resid)
lzip(name,test)
analyses2['levier'] = reg4.get_influence().hat_matrix_diag
seuil_levier2 = 2*p2/n2
analyses2['rstudent'] = reg4.get_influence().resid_studentized_internal
seuil_rstudent2 = t.ppf(1-alpha/2,n2-p2-1)
influence = reg4.get_influence().cooks_distance[0]
analyses2['dcooks'] = influence
seuil_dcook2 = 4/(n2-p2)
variables = reg4.model.exog
[variance_inflation_factor(variables, i) for i in np.arange(1,variables.shape[1])]
_, pval, __, f_pval = sm.stats.diagnostic.het_breuschpagan(reg4.resid, variables)
print('p value test Breusch Pagan:', pval)
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sm.stats.stattools.jarque_bera(reg4.resid)
lzip(name,test)
obs_a_retirer2 = analyses2[(analyses2.levier > seuil_levier2) & (analyses2.rstudent > seuil_rstudent2) & (analyses2.dcooks > seuil_dcook2)]
temp4_v2 = temp4[~temp4.index.isin(obs_a_retirer2.obs)]
temp4_v3 = temp4[temp4.index.isin(obs_a_retirer2.obs)]
reg4 = smf.ols(formula='y ~ x1 + x2 + x3', data= temp4_v2).fit()
reg4.summary()
len(obs_a_retirer2)/ len(temp4)
obs_ret2 = df8b[df8b['index'].isin(temp4_v3['index'])]
plt.hist(obs_ret2.quantiles_enf)
plt.title('Quantile des classes des individus des observations retirรฉes')
plt.savefig('hist__obs_ret_classe_2')
plt.show()
plt.hist(obs_ret2.classe_parents)
plt.title('Quantile des classes parents des observations retirรฉes')
plt.show()
labels = obs_ret2.country.value_counts().index
fig, ax = plt.subplots()
ax.pie(obs_ret2.country.value_counts(), labels=labels, autopct='%.2f')
ax.set_title('''Pays d'origine des observations retirรฉes''')
fig.set_size_inches(8,8)
plt.savefig('pie_obs_ret_pays_2')
plt.show()
| 0.280321 | 0.791055 |
# LABXX: What-if Tool: Model Interpretability Using Mortgage Data
**Learning Objectives**
1. Create a What-if Tool visualization
2. What-if Tool exploration using the XGBoost Model
## Introduction
This notebook shows how to use the [What-if Tool (WIT)](https://pair-code.github.io/what-if-tool/) on a deployed [Cloud AI Platform](https://cloud.google.com/ai-platform/) model. The What-If Tool provides an easy-to-use interface for expanding understanding of black-box classification and regression ML models. With the plugin, you can perform inference on a large set of examples and immediately visualize the results in a variety of ways. Additionally, examples can be edited manually or programmatically and re-run through the model in order to see the results of the changes. It contains tooling for investigating model performance and fairness over subsets of a dataset. The purpose of the tool is to give people a simple, intuitive, and powerful way to explore and investigate trained ML models through a visual interface with absolutely no code required.
[Extreme Gradient Boosting (XGBoost)](https://xgboost.ai/) is a decision-tree-based ensemble Machine Learning algorithm that uses a gradient boosting framework. In prediction problems involving unstructured data (images, text, etc.) artificial neural networks tend to outperform all other algorithms or frameworks. However, when it comes to small-to-medium structured/tabular data, decision tree based algorithms are considered best-in-class right now. Please see the chart below for the evolution of tree-based algorithms over the years.
*You don't need your own cloud project* to run this notebook.
** UPDATE LINK BEFORE PRODUCTION **: Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/gwendolyn-dev/courses/machine_learning/deepdive2/ml_on_gc/what_if_mortgage.ipynb)) -- try to complete that notebook first before reviewing this solution notebook.
## Set up environment variables and load necessary libraries
We will start by importing the necessary libraries for this lab.
```
import sys
python_version = sys.version_info[0]
print("Python Version: ", python_version)
!pip3 install witwidget
import numpy as np
import pandas as pd
import witwidget
from witwidget.notebook.visualization import WitConfigBuilder, WitWidget
```
## Loading the mortgage test dataset
The model we'll be exploring here is a binary classification model built with XGBoost and trained on a [mortgage dataset](https://www.ffiec.gov/hmda/hmdaflat.htm). It predicts whether or not a mortgage application will be approved. In this section we'll:
* Download some test data from Cloud Storage and load it into a numpy array + Pandas DataFrame
* Preview the features for our model in Pandas
```
# Download our Pandas dataframe and our test features and labels
!gsutil cp gs://mortgage_dataset_files/data.pkl .
!gsutil cp gs://mortgage_dataset_files/x_test.npy .
!gsutil cp gs://mortgage_dataset_files/y_test.npy .
```
## Preview the Features
Preview the features from our model as a pandas DataFrame
```
features = pd.read_pickle("data.pkl")
features.head()
features.info()
```
## Load the test features and labels into numpy arrays
Developing machine learning models in Python often requires the use of NumPy arrays. Recall that NumPy, which stands for Numerical Python, is a library consisting of multidimensional array objects and a collection of routines for processing those arrays. NumPy arrays are efficient data structures for working with data in Python, and machine learning models like those in the scikit-learn library, and deep learning models like those in the Keras library, expect input data in the format of NumPy arrays and make predictions in the format of NumPy arrays. As such, it is common to need to save NumPy arrays to file. Note that the data info reveals the following datatypes dtypes: float64(8), int16(1), int8(1), uint8(34) -- and no strings or "objects". So, let's now load the features and labels into numpy arrays.
```
x_test = np.load("x_test.npy")
y_test = np.load("y_test.npy")
```
Let's take a look at the contents of the 'x_test.npy' file. You can see the "array" structure.
```
print(x_test)
```
## Combine the features and labels into one array for the What-if Tool
Note that the numpy.hstack() function is used to stack the sequence of input arrays horizontally (i.e. column wise) to make a single array. In the following example, the numpy matrix is reshaped into a vector using the reshape function with .reshape((-1, 1) to convert the array into a single column matrix.
```
test_examples = np.hstack((x_test, y_test.reshape(-1, 1)))
```
## Using the What-if Tool to interpret our model
With our test examples ready, we can now connect our model to the What-if Tool using the `WitWidget`. To use the What-if Tool with Cloud AI Platform, we need to send it:
* A Python list of our test features + ground truth labels
* Optionally, the names of our columns
* Our Cloud project, model, and version name (we've created a public one for you to play around with)
See the next cell for some exploration ideas in the What-if Tool.
## Create a What-if Tool visualization
This prediction adjustment function is needed as this xgboost model's prediction returns just a score for the positive class of the binary classification, whereas the What-If Tool expects a list of scores for each class (in this case, both the negative class and the positive class).
**NOTE:** The WIT may take a minute to load. While it is loading, review the parameters that are defined in the next cell, BUT NOT RUN IT, it is simply for reference.
```
# ******** DO NOT RUN THIS CELL ********
# TODO 1
PROJECT_ID = "YOUR_PROJECT_ID"
MODEL_NAME = "YOUR_MODEL_NAME"
VERSION_NAME = "YOUR_VERSION_NAME"
TARGET_FEATURE = "mortgage_status"
LABEL_VOCAB = ["denied", "approved"]
# TODO 1a
config_builder = (
WitConfigBuilder(
test_examples.tolist(), features.columns.tolist() + ["mortgage_status"]
)
.set_ai_platform_model(
PROJECT_ID,
MODEL_NAME,
VERSION_NAME,
adjust_prediction=adjust_prediction,
)
.set_target_feature(TARGET_FEATURE)
.set_label_vocab(LABEL_VOCAB)
)
```
Run this cell to load the WIT config builder. **NOTE:** The WIT may take a minute to load
```
# TODO 1b
def adjust_prediction(pred):
return [1 - pred, pred]
config_builder = (
WitConfigBuilder(
test_examples.tolist(), features.columns.tolist() + ["mortgage_status"]
)
.set_ai_platform_model(
"wit-caip-demos",
"xgb_mortgage",
"v1",
adjust_prediction=adjust_prediction,
)
.set_target_feature("mortgage_status")
.set_label_vocab(["denied", "approved"])
)
WitWidget(config_builder, height=800)
```
## What-if Tool exploration using the XGBoost Model
#### TODO 2
* **Individual data points**: The default graph shows all data points from the test set, colored by their ground truth label (approved or denied)
* Try selecting data points close to the middle and tweaking some of their feature values. Then run inference again to see if the model prediction changes
* Select a data point and then move the "Show nearest counterfactual datapoint" slider to the right. This will highlight a data point with feature values closest to your original one, but with a different prediction
#### TODO 2a
* **Binning data**: Create separate graphs for individual features
* From the "Binning - X axis" dropdown, try selecting one of the agency codes, for example "Department of Housing and Urban Development (HUD)". This will create 2 separate graphs, one for loan applications from the HUD (graph labeled 1), and one for all other agencies (graph labeled 0). This shows us that loans from this agency are more likely to be denied
#### TODO 2b
* **Exploring overall performance**: Click on the "Performance & Fairness" tab to view overall performance statistics on the model's results on the provided dataset, including confusion matrices, PR curves, and ROC curves.
* Experiment with the threshold slider, raising and lowering the positive classification score the model needs to return before it decides to predict "approved" for the loan, and see how it changes accuracy, false positives, and false negatives.
* On the left side "Slice by" menu, select "loan_purpose_Home purchase". You'll now see performance on the two subsets of your data: the "0" slice shows when the loan is not for a home purchase, and the "1" slice is for when the loan is for a home purchase. Notice that the model's false positive rate is much higher on loans for home purchases. If you expand the rows to look at the confusion matrices, you can see that the model predicts "approved" more often for home purchase loans.
* You can use the optimization buttons on the left side to have the tool auto-select different positive classification thresholds for each slice in order to achieve different goals. If you select the "Demographic parity" button, then the two thresholds will be adjusted so that the model predicts "approved" for a similar percentage of applicants in both slices. What does this do to the accuracy, false positives and false negatives for each slice?
Copyright 2020 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
import sys
python_version = sys.version_info[0]
print("Python Version: ", python_version)
!pip3 install witwidget
import numpy as np
import pandas as pd
import witwidget
from witwidget.notebook.visualization import WitConfigBuilder, WitWidget
# Download our Pandas dataframe and our test features and labels
!gsutil cp gs://mortgage_dataset_files/data.pkl .
!gsutil cp gs://mortgage_dataset_files/x_test.npy .
!gsutil cp gs://mortgage_dataset_files/y_test.npy .
features = pd.read_pickle("data.pkl")
features.head()
features.info()
x_test = np.load("x_test.npy")
y_test = np.load("y_test.npy")
print(x_test)
test_examples = np.hstack((x_test, y_test.reshape(-1, 1)))
# ******** DO NOT RUN THIS CELL ********
# TODO 1
PROJECT_ID = "YOUR_PROJECT_ID"
MODEL_NAME = "YOUR_MODEL_NAME"
VERSION_NAME = "YOUR_VERSION_NAME"
TARGET_FEATURE = "mortgage_status"
LABEL_VOCAB = ["denied", "approved"]
# TODO 1a
config_builder = (
WitConfigBuilder(
test_examples.tolist(), features.columns.tolist() + ["mortgage_status"]
)
.set_ai_platform_model(
PROJECT_ID,
MODEL_NAME,
VERSION_NAME,
adjust_prediction=adjust_prediction,
)
.set_target_feature(TARGET_FEATURE)
.set_label_vocab(LABEL_VOCAB)
)
# TODO 1b
def adjust_prediction(pred):
return [1 - pred, pred]
config_builder = (
WitConfigBuilder(
test_examples.tolist(), features.columns.tolist() + ["mortgage_status"]
)
.set_ai_platform_model(
"wit-caip-demos",
"xgb_mortgage",
"v1",
adjust_prediction=adjust_prediction,
)
.set_target_feature("mortgage_status")
.set_label_vocab(["denied", "approved"])
)
WitWidget(config_builder, height=800)
| 0.111858 | 0.990848 |
# Projet 8: Dรฉployez un modรจle dans le cloud
Dรฉvelopper dans un environnement Big Data (AWS) une premiรจre chaรฎne de traitement des donnรฉes
Preprocessing et une รฉtape de rรฉduction de dimension (ACP).
```
from pyspark.sql import SparkSession
from pyspark.ml.image import ImageSchema
import sagemaker_pyspark
from pyspark import SparkConf
from pyspark.sql import SparkSession
import numpy as np
import pandas as pd
pip install tensorflow
```
# Traitement des images
```
spark
print (pd.__version__)
from pyspark.sql.types import ArrayType,IntegerType,FloatType
from pyspark.sql.functions import regexp_replace
from pyspark.sql.functions import udf
from pyspark.sql.functions import split
import numpy as np
#https://stackoverflow.com/questions/60568744/pyspark-spark-dataframe-convert-imageschema-column-to-a-ndarray-as-a-new-colu
data_location = "s3a://ocprojet08/**"
imagesdf = spark.read.format("image").load(data_location, inferschema=True)
imagesdf = imagesdf.withColumn("FileName", regexp_replace('image.origin', 'dbfs:/mnt/images/', ''))
split_col =split(imagesdf['FileName'], '/')
imagesdf = imagesdf.withColumn('label', split_col.getItem(3))
imagesdf.show(20)
df_im=imagesdf.select('image.origin',"image.height","image.width","image.nChannels", "image.mode", "image.data",'label')
df_im.show(5)
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
model = ResNet50(
include_top=False,
weights=None,
pooling='max',
input_shape=(100, 100, 3))
model.summary()
```
## Extraction des features
```
from keras.preprocessing import image
from PIL import Image
#https://stackoverflow.com/questions/36205481/read-file-content-from-s3-bucket-with-boto3
#projet6 openclassrooms
import boto3
# Get resources stored in AWS S3 service
s3 = boto3.resource('s3')
bucket = s3.Bucket('ocprojet08')
resnet_features=[]
for bucket_key in bucket.objects.limit(1000):
path=bucket_key.key
obj = bucket.Object(path)
image_x = obj.get()['Body']
img = Image.open(image_x).resize ((100, 100))
# convert image to array
x = image.img_to_array(img).reshape((-1,100,100,3))
x=np.array(x)
# preprocess input
x = preprocess_input(x)
resnet_feature = model.predict(x).ravel().tolist()
# add path, labels and features
resnet_features.append(resnet_feature)
#features_np = np.array(features)
#https://stackoverflow.com/questions/48164206/pyspark-adding-a-column-from-a-list-of-values-using-a-udf
from pyspark.sql.functions import monotonically_increasing_id, row_number
from pyspark.sql import Window
b = spark.createDataFrame([(l,) for l in resnet_features], ['features'])
df_im = df_im.withColumn("row_idx", row_number().over(Window.orderBy(monotonically_increasing_id())))
b = b.withColumn("row_idx", row_number().over(Window.orderBy(monotonically_increasing_id())))
images_df = df_im.join(b, df_im.row_idx == b.row_idx).\
drop("row_idx")
images_df.show()
images_df.groupby('label').count().show()
```
# Rรฉduction de dimension , PCA (API saprkMLib):
```
#https://mail-archives.apache.org/mod_mbox/spark-user/201609.mbox/%3CCALD+6GMFE2NuSXHsNxySMSJ1j4Za=DwzY9fGR5GPHdk-3O6Kbw@mail.gmail.com%3E
from pyspark.sql.functions import udf
from pyspark.ml.linalg import Vectors, VectorUDT
#convert array to vecteur dense
to_vector = udf(lambda x: Vectors.dense(x), VectorUDT())
sparkDF = images_df.select('origin', 'label','features', to_vector("features").alias("features_vec"))
sparkDF.show(2)
from pyspark.ml.feature import PCA
#https://calvinfeng.gitbook.io/machine-learning-notebook/sagemaker/population_segmentation
pcaSparkEstimator = PCA(inputCol="features_vec", outputCol="pca_Features", k=100)
pca = pcaSparkEstimator.fit(sparkDF)
pca_matrix=pca.transform(sparkDF)
pca_matrix.show(5)
pca_matrix.select( 'label','pca_Features').show(5)
import seaborn as sns
import matplotlib.pyplot as plt
var = pca.explainedVariance.cumsum()
sns.lineplot(x=[i for i in range(100+1)],
y=np.insert(var, 0, 0)*100)
plt.title('Somme cumulรฉe de variance des composantes du PCA')
plt.xlabel('K composantes')
plt.ylabel('Variance cumulรฉe (%)')
plt.ylim(0, 100)
plt.xlim(left=0)
plt.axhline(85, lw=1, c='red')
plt.text(1, 85, '85%', c='red')
plt.show()
```
# Sauvegarde des rรฉsultats sur bucket S3
```
# Save Spark DataFrame to S3
#https://stackoverflow.com/questions/38154040/save-dataframe-to-csv-directly-to-s3-python
#https://sagemaker-examples.readthedocs.io/en/latest/introduction_to_amazon_algorithms/pca_mnist/pca_mnist.html
from io import StringIO # python3; python2: BytesIO
bucket = 'ocprojet08r' # already created on S3
csv_buffer = StringIO()
pca_matrix.toPandas().to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(bucket, 'pca_matrix.csv').put(Body=csv_buffer.getvalue())
!pip install pandas
```
|
github_jupyter
|
from pyspark.sql import SparkSession
from pyspark.ml.image import ImageSchema
import sagemaker_pyspark
from pyspark import SparkConf
from pyspark.sql import SparkSession
import numpy as np
import pandas as pd
pip install tensorflow
spark
print (pd.__version__)
from pyspark.sql.types import ArrayType,IntegerType,FloatType
from pyspark.sql.functions import regexp_replace
from pyspark.sql.functions import udf
from pyspark.sql.functions import split
import numpy as np
#https://stackoverflow.com/questions/60568744/pyspark-spark-dataframe-convert-imageschema-column-to-a-ndarray-as-a-new-colu
data_location = "s3a://ocprojet08/**"
imagesdf = spark.read.format("image").load(data_location, inferschema=True)
imagesdf = imagesdf.withColumn("FileName", regexp_replace('image.origin', 'dbfs:/mnt/images/', ''))
split_col =split(imagesdf['FileName'], '/')
imagesdf = imagesdf.withColumn('label', split_col.getItem(3))
imagesdf.show(20)
df_im=imagesdf.select('image.origin',"image.height","image.width","image.nChannels", "image.mode", "image.data",'label')
df_im.show(5)
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
model = ResNet50(
include_top=False,
weights=None,
pooling='max',
input_shape=(100, 100, 3))
model.summary()
from keras.preprocessing import image
from PIL import Image
#https://stackoverflow.com/questions/36205481/read-file-content-from-s3-bucket-with-boto3
#projet6 openclassrooms
import boto3
# Get resources stored in AWS S3 service
s3 = boto3.resource('s3')
bucket = s3.Bucket('ocprojet08')
resnet_features=[]
for bucket_key in bucket.objects.limit(1000):
path=bucket_key.key
obj = bucket.Object(path)
image_x = obj.get()['Body']
img = Image.open(image_x).resize ((100, 100))
# convert image to array
x = image.img_to_array(img).reshape((-1,100,100,3))
x=np.array(x)
# preprocess input
x = preprocess_input(x)
resnet_feature = model.predict(x).ravel().tolist()
# add path, labels and features
resnet_features.append(resnet_feature)
#features_np = np.array(features)
#https://stackoverflow.com/questions/48164206/pyspark-adding-a-column-from-a-list-of-values-using-a-udf
from pyspark.sql.functions import monotonically_increasing_id, row_number
from pyspark.sql import Window
b = spark.createDataFrame([(l,) for l in resnet_features], ['features'])
df_im = df_im.withColumn("row_idx", row_number().over(Window.orderBy(monotonically_increasing_id())))
b = b.withColumn("row_idx", row_number().over(Window.orderBy(monotonically_increasing_id())))
images_df = df_im.join(b, df_im.row_idx == b.row_idx).\
drop("row_idx")
images_df.show()
images_df.groupby('label').count().show()
#https://mail-archives.apache.org/mod_mbox/spark-user/201609.mbox/%3CCALD+6GMFE2NuSXHsNxySMSJ1j4Za=DwzY9fGR5GPHdk-3O6Kbw@mail.gmail.com%3E
from pyspark.sql.functions import udf
from pyspark.ml.linalg import Vectors, VectorUDT
#convert array to vecteur dense
to_vector = udf(lambda x: Vectors.dense(x), VectorUDT())
sparkDF = images_df.select('origin', 'label','features', to_vector("features").alias("features_vec"))
sparkDF.show(2)
from pyspark.ml.feature import PCA
#https://calvinfeng.gitbook.io/machine-learning-notebook/sagemaker/population_segmentation
pcaSparkEstimator = PCA(inputCol="features_vec", outputCol="pca_Features", k=100)
pca = pcaSparkEstimator.fit(sparkDF)
pca_matrix=pca.transform(sparkDF)
pca_matrix.show(5)
pca_matrix.select( 'label','pca_Features').show(5)
import seaborn as sns
import matplotlib.pyplot as plt
var = pca.explainedVariance.cumsum()
sns.lineplot(x=[i for i in range(100+1)],
y=np.insert(var, 0, 0)*100)
plt.title('Somme cumulรฉe de variance des composantes du PCA')
plt.xlabel('K composantes')
plt.ylabel('Variance cumulรฉe (%)')
plt.ylim(0, 100)
plt.xlim(left=0)
plt.axhline(85, lw=1, c='red')
plt.text(1, 85, '85%', c='red')
plt.show()
# Save Spark DataFrame to S3
#https://stackoverflow.com/questions/38154040/save-dataframe-to-csv-directly-to-s3-python
#https://sagemaker-examples.readthedocs.io/en/latest/introduction_to_amazon_algorithms/pca_mnist/pca_mnist.html
from io import StringIO # python3; python2: BytesIO
bucket = 'ocprojet08r' # already created on S3
csv_buffer = StringIO()
pca_matrix.toPandas().to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(bucket, 'pca_matrix.csv').put(Body=csv_buffer.getvalue())
!pip install pandas
| 0.637595 | 0.868715 |
```
# default_exp review
```
# Review
> API details.
```
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
#export
from literature_review.article import make_articles
from IPython.display import display, HTML
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual, Layout
import re
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
import json
import time
class Article_Displayer():
def __init__(self, articles, checks = [], current = 0, search = None, only_unreviewed = True):
self.current = 0 # current article to display
def is_reviewed(a):
if 'reviewed' in a.annotations.keys():
if a.annotations['reviewed']:
return True
return False
if only_unreviewed:
articles = [a for a in articles if not is_reviewed(a)]
self.articles = articles
self.article = self.articles[current]
self.checks = ['reviewed'] + checks
self.search = search
self.create_ui()
self.update_ui()
def next_article(self, b):
self.change_article(1)
def last_article(self, b):
self.change_article(-1)
def change_article(self, increment):
self.abstract.value = ""
self.search_results.value = ""
with self.output:
self.persist_data()
self.current += increment
self.article = self.articles[self.current]
self.update_ui()
def store_input(self):
for check in self.check_boxes:
self.article.annotations[check.description] = check.value
self.article.annotations['note'] = self.note.value
def persist_data(self):
self.store_input()
with open(self.article.path, 'w') as fp:
json.dump(dict(self.article), fp)
def update_ui(self):
with self.output:
self.title.value = "<h3>%s</h3><i>%s<i>."%(self.articles[self.current].title,self.articles[self.current].path)
self.abstract.value = "<h3>Abstract:</h3>"+self.articles[self.current].abstract
self.count_display.value = "%d of %d articles reviewed in this session."%(self.current, len(self.articles))
for checkbox in self.check_boxes:
if checkbox.description in self.article.annotations.keys():
checkbox.value = self.article.annotations[checkbox.description]
else:
checkbox.value = False
if checkbox.description == 'reviewed':
checkbox.value = True
if "note" in self.article.annotations.keys():
self.note.value = self.article.annotations["note"]
else:
self.note.value = ""
if self.search:
self.search_results.value = "<h3>Search results:</h3>"+self.extract_sentences(self.search)
def extract_sentences(self, search_term):
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(['al'])
sentence_splitter = PunktSentenceTokenizer(punkt_param)
result = ""
for section in self.article.sections:
matches = re.findall(search_term, section['text'])
if len(matches):
result += "<h4>%s</h4>"%section['heading']
matches = list(set(matches))
sentences = sentence_splitter.tokenize(section['text'])
matched_sentences = []
for i, s in enumerate(sentences):
for match in matches:
if match in s:
s = s.replace(match, "<b>%s</b>"%match)
matched_sentences.append(sentences[i-1]+' '+s)
for ms in matched_sentences:
result += "<p>%s</p>"%ms
return result
def create_ui(self):
a = self.article
self.output = widgets.Output()
self.next_button = widgets.Button(description="Next")
self.last_button = widgets.Button(description="Last")
self.next_button.on_click(self.next_article)
self.last_button.on_click(self.last_article)
self.button_ui = widgets.HBox([self.last_button, self.next_button])
self.check_boxes = []
for check in self.checks:
self.check_boxes.append(widgets.Checkbox(description=check, value= False))
self.note = widgets.Textarea(
value='',
placeholder='Add notes here',
description='String:',
disabled=False
)
self.count_display = widgets.HTML(value="count")
# Making the text UI
self.title = widgets.HTML(value="Title")
self.abstract = widgets.HTML(value="Abstract")
self.search_results = widgets.HTML(value="")
self.text_ui = widgets.VBox([self.title, self.abstract, self.search_results], layout=Layout(width='60%'))
# Making the action ui
self.action_ui = widgets.VBox([self.button_ui, self.count_display] + self.check_boxes + [self.note, self.output], layout=Layout(width='40%'))
self.main_ui = widgets.HBox([self.text_ui,self.action_ui])
display(self.main_ui)
articles = make_articles('../data/interim/article_dicts/')
import pandas as pd
selection = []
for a in articles:
for section in a.sections:
if "manikin" in section['text']:
article = a.path.split('/')[-1]
is_intro = 'introduction' in section['heading'].lower()
is_dis = 'discussion' in section['heading'].lower()
if not (is_intro or is_dis):
selection.append(a)
selection = list(set(selection))
#articles = [a for a in articles if (a.annotations['split-half']==True)]
ad = Article_Displayer(selection,
checks = ['AAT','AAT_uncertain','split-half','retest','reliability_uncertain','animal','drugs','food','general','manikin'],
#search = r'\brelia\w+',
search = 'manikin',
only_unreviewed = False)
# Todo: add manickin, joystick etc. to categorize AATs
```
|
github_jupyter
|
# default_exp review
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
#export
from literature_review.article import make_articles
from IPython.display import display, HTML
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual, Layout
import re
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
import json
import time
class Article_Displayer():
def __init__(self, articles, checks = [], current = 0, search = None, only_unreviewed = True):
self.current = 0 # current article to display
def is_reviewed(a):
if 'reviewed' in a.annotations.keys():
if a.annotations['reviewed']:
return True
return False
if only_unreviewed:
articles = [a for a in articles if not is_reviewed(a)]
self.articles = articles
self.article = self.articles[current]
self.checks = ['reviewed'] + checks
self.search = search
self.create_ui()
self.update_ui()
def next_article(self, b):
self.change_article(1)
def last_article(self, b):
self.change_article(-1)
def change_article(self, increment):
self.abstract.value = ""
self.search_results.value = ""
with self.output:
self.persist_data()
self.current += increment
self.article = self.articles[self.current]
self.update_ui()
def store_input(self):
for check in self.check_boxes:
self.article.annotations[check.description] = check.value
self.article.annotations['note'] = self.note.value
def persist_data(self):
self.store_input()
with open(self.article.path, 'w') as fp:
json.dump(dict(self.article), fp)
def update_ui(self):
with self.output:
self.title.value = "<h3>%s</h3><i>%s<i>."%(self.articles[self.current].title,self.articles[self.current].path)
self.abstract.value = "<h3>Abstract:</h3>"+self.articles[self.current].abstract
self.count_display.value = "%d of %d articles reviewed in this session."%(self.current, len(self.articles))
for checkbox in self.check_boxes:
if checkbox.description in self.article.annotations.keys():
checkbox.value = self.article.annotations[checkbox.description]
else:
checkbox.value = False
if checkbox.description == 'reviewed':
checkbox.value = True
if "note" in self.article.annotations.keys():
self.note.value = self.article.annotations["note"]
else:
self.note.value = ""
if self.search:
self.search_results.value = "<h3>Search results:</h3>"+self.extract_sentences(self.search)
def extract_sentences(self, search_term):
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(['al'])
sentence_splitter = PunktSentenceTokenizer(punkt_param)
result = ""
for section in self.article.sections:
matches = re.findall(search_term, section['text'])
if len(matches):
result += "<h4>%s</h4>"%section['heading']
matches = list(set(matches))
sentences = sentence_splitter.tokenize(section['text'])
matched_sentences = []
for i, s in enumerate(sentences):
for match in matches:
if match in s:
s = s.replace(match, "<b>%s</b>"%match)
matched_sentences.append(sentences[i-1]+' '+s)
for ms in matched_sentences:
result += "<p>%s</p>"%ms
return result
def create_ui(self):
a = self.article
self.output = widgets.Output()
self.next_button = widgets.Button(description="Next")
self.last_button = widgets.Button(description="Last")
self.next_button.on_click(self.next_article)
self.last_button.on_click(self.last_article)
self.button_ui = widgets.HBox([self.last_button, self.next_button])
self.check_boxes = []
for check in self.checks:
self.check_boxes.append(widgets.Checkbox(description=check, value= False))
self.note = widgets.Textarea(
value='',
placeholder='Add notes here',
description='String:',
disabled=False
)
self.count_display = widgets.HTML(value="count")
# Making the text UI
self.title = widgets.HTML(value="Title")
self.abstract = widgets.HTML(value="Abstract")
self.search_results = widgets.HTML(value="")
self.text_ui = widgets.VBox([self.title, self.abstract, self.search_results], layout=Layout(width='60%'))
# Making the action ui
self.action_ui = widgets.VBox([self.button_ui, self.count_display] + self.check_boxes + [self.note, self.output], layout=Layout(width='40%'))
self.main_ui = widgets.HBox([self.text_ui,self.action_ui])
display(self.main_ui)
articles = make_articles('../data/interim/article_dicts/')
import pandas as pd
selection = []
for a in articles:
for section in a.sections:
if "manikin" in section['text']:
article = a.path.split('/')[-1]
is_intro = 'introduction' in section['heading'].lower()
is_dis = 'discussion' in section['heading'].lower()
if not (is_intro or is_dis):
selection.append(a)
selection = list(set(selection))
#articles = [a for a in articles if (a.annotations['split-half']==True)]
ad = Article_Displayer(selection,
checks = ['AAT','AAT_uncertain','split-half','retest','reliability_uncertain','animal','drugs','food','general','manikin'],
#search = r'\brelia\w+',
search = 'manikin',
only_unreviewed = False)
# Todo: add manickin, joystick etc. to categorize AATs
| 0.201067 | 0.436982 |
# KEN3140: Lab 4 (Part 1)
### Writing and executing basic SPARQL queries on remote SPARQL endpoints (RDF graphs on the Web)
##### Authors:
+ [Vincent Emonet](https://www.maastrichtuniversity.nl/vincent.emonet): [[email protected]](mailto:[email protected])
+ [Kody Moodley](https://www.maastrichtuniversity.nl/kody.moodley): [[email protected]](mailto:[email protected])
##### Affiliation:
[Institute of Data Science](https://www.maastrichtuniversity.nl/research/institute-data-science)
##### License:
[CC-BY 4.0](https://creativecommons.org/licenses/by/4.0)
##### Date:
2021-09-06
#### In this lab you will learn:
How to compose basic [SPARQL](https://www.w3.org/TR/2013/REC-sparql11-query-20130321/) [SELECT](https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#select) queries to retrieve specific information from an [RDF](https://www.w3.org/TR/rdf11-concepts/) graph, and to answer questions about its content
#### Specific learning goals:
+ How to select the appropriate SPARQL feature(s) or function(s) required to answer the given question or retrieve the result asked for
+ How to represent the retrieval of information from a triplestore using triple patterns and basic graph patterns in SELECT queries
+ How to query existing public SPARQL endpoints using tools such as [YASGUI](https://yasgui.triply.cc)
#### Prerequisite knowledge:
+ [Lecture 4: Introduction to SPARQL](https://canvas.maastrichtuniversity.nl/courses/4700/files/559320?module_item_id=115828)
+ [SPARQL 1.1 language specification](https://www.w3.org/TR/sparql11-query/)
+ Chapters 1 - 3 of [Learning SPARQL](https://maastrichtuniversity.on.worldcat.org/external-search?queryString=SPARQL#/oclc/853679890)
#### Task information:
+ In this lab, we will ask you to query the [DBpedia](https://dbpedia.org/) knowledge graph!
+ [DBpedia](https://dbpedia.org/) is a crowd-sourced community effort to extract structured content in RDF from the information created in various [Wikimedia](https://www.wikimedia.org/) projects (e.g. [Wikipedia](https://www.wikipedia.org/)). DBpedia is similar in information content to [Wikidata](https://www.wikidata.org/wiki/Wikidata:Main_Page).
+ **A word on data quality:** remember that DBpedia is crowd-sourced. This means that volunteers and members of the general public are permitted to add and maintain it's content. As a result, you may encounter inaccuracies / omissions in the content and inconsistencies in how the information is represented. Don't be alarmed by this, it is not critical that the content is accurate for the learning objectives of this lab.
+ **Your task** is to formulate and execute SPARQL queries for Tasks 1 - 3 either in this Jupyter notebook (if you have SPARQL kernel installed in Jupyter) or on [YAS-GUI](https://yasgui.triply.cc/)
#### Task information (contd):
+ The DBpedia SPARQL endpoint URL is: [https://dbpedia.org/sparql](https://dbpedia.org/sparql)
+ DBPedia has it's own SPARQL query interface at [https://dbpedia.org/sparql](https://dbpedia.org/sparql) which is built on OpenLink's [Virtuoso](https://virtuoso.openlinksw.com/) [RDF](https://www.w3.org/TR/rdf11-concepts/) triplestore management system.
+ In this lab, we will use an alternative SPARQL query interface to query DBPedia. It is called **[YASGUI](https://yasgui.triply.cc)**. The reason is that YASGUI has additional user-friendly features e.g. management of multiple SPARQL queries in separate tabs. It also allows one to query any publicly available SPARQL endpoint from the same interface.
+ To install SPARQL kernel for Jupyter -> close Jupyter and execute the following commands in sequence in your terminal before you start Jupyter:
+ ``pip install sparql-kernel``
+ ``jupyter sparqlkernel install`` **OR** ``jupyter sparqlkernel install --user`` (if the first command gives an error)
#### Tips ๐
+ How do I find vocabulary to use in my SPARQL query from DBpedia?
> Search on google, e.g., if you want to know the term for "capital city" in DBpedia, search for: "**[dbpedia capital](https://www.google.com/search?&q=dbpedia+capital)**" In general, "dbpedia [approximate name of predicate or class you are looking for]"
> Your search query does not have to exactly match the spelling of the DBpedia resource name you are looking for
> Alternatively, you can formulate SPARQL queries to list properties and types in DBpedia Do you know what these queries might look like?
+ Use [prefix.cc](http://prefix.cc/) to discover the full IRIs for unknown prefixes you may encounter
# YASGUI interface
<img src="yasgui-interface.png">
<!-- # Install the SPARQL kernel
This notebook uses the SPARQL Kernel to define and **execute SPARQL queries in the notebook** codeblocks.
To **install the SPARQL Kernel** in your JupyterLab installation:
```shell
pip install sparqlkernel --user
jupyter sparqlkernel install --user
```
To start running SPARQL query in this notebook, we need to define the **SPARQL kernel parameters**:
* ๐ **URL of the SPARQL endpoint to query**
* ๐ Language of preferred labels
* ๐ Log level -->
```
# specify which endpoint we are querying
%endpoint http://dbpedia.org/sparql
# This is optional, it would increase the log level (messages from the jupyter sparql kernel)
%log debug
# Uncomment the next line to return labels in english and avoid duplicates
# %lang en
```
# Anatomy of a SPARQL query
As we saw in Lecture 4, these are the main components of a SPARQL query:
<img src="sparql_query_breakdown.png">
### Task 1 [15min]: Simpler queries
Write SPARQL queries to execute the following tasks.
a) List 10 triples from DBpedia
b) List all the books in DBpedia
c) List the authors of all books in DBpedia
d) Truncate the results of Task 1c) to only 10 results
e) Display the number of authors for books in DBpedia
f) Display the number of UNIQUE authors for books in DBpedia
### Task 2 [15-20min]: Moderately challenging queries
Write SPARQL queries to execute the following tasks.
a) List 10 authors who wrote a book with more than 500 pages
b) List 20 books in DBpedia that have the term grand in their name
* **Hint:** use the [contains(string_to_look_in,string_to_look_for)](https://www.w3.org/TR/sparql11-query/#func-contains) function
c) List 20 book names from DBpedia together with the language of their names
* **Hint:** use the [lang](https://www.w3.org/TR/sparql11-query/#func-lang) function.
d) List the top 5 longest books in DBpedia (with the most pages) in descending order
### Task 3 [20min]: Challenging queries
Write SPARQL queries to execute the following tasks.
a) List 10 book authors from DBpedia and the capital cities of the countries in which they were born
b) Display the number of authors for the book that has the English title "1066 and All That"
c) List all DBpedia books whose English name starts with "she" (case-insensitive)
* **Hint:** use [langMatches](https://www.w3.org/TR/rdf-sparql-query/#func-langMatches), [STRSTARTS](https://www.w3.org/TR/sparql11-query/#func-strstarts) and [lcase](https://www.w3.org/TR/sparql11-query/#func-lcase) functions.
d) List all the unique book categories for all short books (less than 300 pages) written by authors who were born in Amsterdam
* **Hint:** use the [dct:subject](http://udfr.org/docs/onto/dct_subject.html) property of a [dbo:Book](https://dbpedia.org/ontology/Book) to define "category" in this task.
e) Sort the results in Task 3d) by the number of pages - longest to shortest
# Examples of other public SPARQL endpoints ๐
* Wikidata, facts powering Wikipedia infobox: https://query.wikidata.org/sparql
* Bio2RDF, linked data for the life sciences: https://bio2rdf.org/sparql
* Disgenet, gene-disease association: http://rdf.disgenet.org/sparql
* PathwayCommons, resource for biological pathways analysis: http://rdf.pathwaycommons.org/sparql
* EU publications office, court decisions and legislative documents from the EU: http://publications.europa.eu/webapi/rdf/sparql
* Finland legal open data, cases and legislation: https://data.finlex.fi/en/sparql
* EU Knowledge Graph, open knowledge graph containing general information about the European Union: [SPARQL endpoint](https://query.linkedopendata.eu/#SELECT%20DISTINCT%20%3Fo1%20WHERE%20%7B%0A%20%20%3Chttps%3A%2F%2Flinkedopendata.eu%2Fentity%2FQ1%3E%20%3Chttps%3A%2F%2Flinkedopendata.eu%2Fprop%2Fdirect%2FP62%3E%20%3Fo1%20.%20%0A%7D%20%0ALIMIT%201000)
# SPARQL applied to the COVID pandemic:
* Wikidata SPARQL queries around the SARS-CoV-2 virus and pandemic: https://egonw.github.io/SARS-CoV-2-Queries
|
github_jupyter
|
pip install sparqlkernel --user
jupyter sparqlkernel install --user
# specify which endpoint we are querying
%endpoint http://dbpedia.org/sparql
# This is optional, it would increase the log level (messages from the jupyter sparql kernel)
%log debug
# Uncomment the next line to return labels in english and avoid duplicates
# %lang en
| 0.476823 | 0.990054 |
## KNN
O algoritmo dos vizinhos mais prรณximos tem variaรงรตes definidas pelo nรบmero de vizinhos conhecidos. Dessa variaรงรฃo, a mais simples รฉ o algoritmo 1-vizinho mais prรณximo (1-NN).
Nesse algoritmo, cada objeto representa um ponto em um espaรงo definido pelos atributos, denominado espaรงo de entrada. Definindo uma mรฉtrica nesse espaรงo, รฉ possรญvel calcular as distรขncias entre cada dois pontos. A mรฉtrica mais usual para isso รฉ a distรขncia euclidiana, dada pela equaรงรฃo:

ร um algotitmo muito simples, na fase de treinamento o algoritmo memoriza os exemplos rotulados do conjunto de treinamento. Para classificar um exemplo nรฃo rotulado, ou seja, cuja classe nรฃo รฉ conhecida, รฉ calculada a distรขncia entre o vetor de valores de atributos e cada exemplo rotulado em memรณria. O rรณtulo da classe associado ao exemplo de treinamento mais prรณximo do exemplo de teste รฉ utilizado para classificar o novo exemplo.
Apesar de sua simplicidade, as superfรญcies de decisรฃo desenhadas pelo algoritmo do 1-NN sรฃo muito complexas. Sรฃo poliedros convexos com centro em cada objeto do conjunto de treinameno, todos os pontos no interior de um poliedro pertencem ร classe do objeto do conjunto de treinamento que define o centro desse poliedro. O conjunto desses poliedros รฉ designado <b>Diagrama de Voronoi</b>.

O algoritmo KNN รฉ uma extenรงรฃo imediata do 1-NN.

#### Vantagens e Desvantagens
```
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
dataset_iris = load_iris()
df = pd.DataFrame(data=dataset_iris.data, columns=dataset_iris.feature_names)
df.head(5)
print(dataset_iris.data.shape, dataset_iris.target.shape)
X_train, X_test, Y_train, Y_test = train_test_split(dataset_iris.data,
dataset_iris.target,
test_size= 0.33)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
model = KNeighborsClassifier(n_neighbors=7)
model.fit(X_train, Y_train)
predicts = model.predict(X_test)
print('Acuracia com dados de treino: ', model.score(X_train, Y_train))
print('Acuracia com dados de teste: ', accuracy_score(Y_test, predicts))
print(predicts)
print(Y_test)
```
|
github_jupyter
|
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
dataset_iris = load_iris()
df = pd.DataFrame(data=dataset_iris.data, columns=dataset_iris.feature_names)
df.head(5)
print(dataset_iris.data.shape, dataset_iris.target.shape)
X_train, X_test, Y_train, Y_test = train_test_split(dataset_iris.data,
dataset_iris.target,
test_size= 0.33)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
model = KNeighborsClassifier(n_neighbors=7)
model.fit(X_train, Y_train)
predicts = model.predict(X_test)
print('Acuracia com dados de treino: ', model.score(X_train, Y_train))
print('Acuracia com dados de teste: ', accuracy_score(Y_test, predicts))
print(predicts)
print(Y_test)
| 0.70069 | 0.898053 |
# PyDMD
## Tutorial 1: Dynamic Mode Decomposition on a toy dataset
In this tutorial we will show the typical use case, applying the dynamic mode decomposition on the snapshots collected during the evolution of a generic system. We present a very simple system since the main purpose of this tutorial is to show the capabilities of the algorithm and the package interface.
First of all we import the DMD class from the pydmd package, we set matplotlib for the notebook and we import numpy.
```
%matplotlib inline
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from pydmd import DMD
```
We create the input data by summing two different functions:<br>
$f_1(x,t) = \text{sech}(x+3)\exp(i2.3t)$<br>
$f_2(x,t) = 2\text{sech}(x)\tanh(x)\exp(i2.8t)$.<br>
```
def f1(x,t):
return 1./np.cosh(x+3)*np.exp(2.3j*t)
def f2(x,t):
return 2./np.cosh(x)*np.tanh(x)*np.exp(2.8j*t)
x = np.linspace(-5, 5, 65)
t = np.linspace(0, 4*np.pi, 129)
xgrid, tgrid = np.meshgrid(x, t)
X1 = f1(xgrid, tgrid)
X2 = f2(xgrid, tgrid)
X = X1 + X2
```
The plots below represent these functions and the dataset.
```
titles = ['$f_1(x,t)$', '$f_2(x,t)$', '$f$']
data = [X1, X2, X]
fig = plt.figure(figsize=(17, 6))
for n, title, d in zip(range(131, 134), titles, data):
plt.subplot(n)
plt.pcolor(xgrid, tgrid, d.real)
plt.title(title)
plt.colorbar()
plt.show()
```
Now we have the temporal snapshots in the input matrix rows: we can easily create a new DMD instance and exploit it in order to compute the decomposition on the data. Since the snapshots must be arranged by columns, in this case we need to transpose the matrix.
```
dmd = DMD(svd_rank=2)
dmd.fit(X.T)
```
The `dmd` object contains the principal information about the decomposition:
- the attribute `modes` is a 2D numpy array where the columns are the low-rank structures individuated;
- the attribute `dynamics` is a 2D numpy array where the rows refer to the time evolution of each mode;
- the attribute `eigs` refers to the eigenvalues of the low dimensional operator;
- the attribute `reconstructed_data` refers to the approximated system evolution.
Moreover, some helpful methods for the graphical representation are provided.
Thanks to the eigenvalues, we can check if the modes are stable or not: if an eigenvalue is on the unit circle, the corresponding mode will be stable; while if an eigenvalue is inside or outside the unit circle, the mode will converge or diverge, respectively. From the following plot, we can note that the two modes are stable.
```
for eig in dmd.eigs:
print('Eigenvalue {}: distance from unit circle {}'.format(eig, np.abs(np.sqrt(eig.imag**2+eig.real**2) - 1)))
dmd.plot_eigs(show_axes=True, show_unit_circle=True)
```
We can plot the modes and the dynamics:
```
for mode in dmd.modes.T:
plt.plot(x, mode.real)
plt.title('Modes')
plt.show()
for dynamic in dmd.dynamics:
plt.plot(t, dynamic.real)
plt.title('Dynamics')
plt.show()
```
Finally, we can reconstruct the original dataset as the product of modes and dynamics. We plot the evolution of each mode to emphasize their similarity with the input functions and we plot the reconstructed data.
```
fig = plt.figure(figsize=(17,6))
for n, mode, dynamic in zip(range(131, 133), dmd.modes.T, dmd.dynamics):
plt.subplot(n)
plt.pcolor(xgrid, tgrid, (mode.reshape(-1, 1).dot(dynamic.reshape(1, -1))).real.T)
plt.subplot(133)
plt.pcolor(xgrid, tgrid, dmd.reconstructed_data.T.real)
plt.colorbar()
plt.show()
```
We can also plot the absolute error between the approximated data and the original one.
```
plt.pcolor(xgrid, tgrid, (X-dmd.reconstructed_data.T).real)
fig = plt.colorbar()
```
The reconstructed system looks almost equal to the original one: the dynamic mode decomposition made possible the identification of the meaningful structures and the complete reconstruction of the system using only the collected snapshots.
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from pydmd import DMD
def f1(x,t):
return 1./np.cosh(x+3)*np.exp(2.3j*t)
def f2(x,t):
return 2./np.cosh(x)*np.tanh(x)*np.exp(2.8j*t)
x = np.linspace(-5, 5, 65)
t = np.linspace(0, 4*np.pi, 129)
xgrid, tgrid = np.meshgrid(x, t)
X1 = f1(xgrid, tgrid)
X2 = f2(xgrid, tgrid)
X = X1 + X2
titles = ['$f_1(x,t)$', '$f_2(x,t)$', '$f$']
data = [X1, X2, X]
fig = plt.figure(figsize=(17, 6))
for n, title, d in zip(range(131, 134), titles, data):
plt.subplot(n)
plt.pcolor(xgrid, tgrid, d.real)
plt.title(title)
plt.colorbar()
plt.show()
dmd = DMD(svd_rank=2)
dmd.fit(X.T)
for eig in dmd.eigs:
print('Eigenvalue {}: distance from unit circle {}'.format(eig, np.abs(np.sqrt(eig.imag**2+eig.real**2) - 1)))
dmd.plot_eigs(show_axes=True, show_unit_circle=True)
for mode in dmd.modes.T:
plt.plot(x, mode.real)
plt.title('Modes')
plt.show()
for dynamic in dmd.dynamics:
plt.plot(t, dynamic.real)
plt.title('Dynamics')
plt.show()
fig = plt.figure(figsize=(17,6))
for n, mode, dynamic in zip(range(131, 133), dmd.modes.T, dmd.dynamics):
plt.subplot(n)
plt.pcolor(xgrid, tgrid, (mode.reshape(-1, 1).dot(dynamic.reshape(1, -1))).real.T)
plt.subplot(133)
plt.pcolor(xgrid, tgrid, dmd.reconstructed_data.T.real)
plt.colorbar()
plt.show()
plt.pcolor(xgrid, tgrid, (X-dmd.reconstructed_data.T).real)
fig = plt.colorbar()
| 0.51562 | 0.993692 |
# Processing data with Pandas
During the first part of this lesson you learned the basics of pandas data structures (*Series* and *DataFrame*) and got familiar with basic methods loading and exploring data.
Here, we will continue with basic data manipulation and analysis methods such calculations and selections.
We are now working in a new notebook file and we need to import pandas again.
```
import pandas as pd
```
Let's work with the same input data `'Kumpula-June-2016-w-metadata.txt'` and load it using the `pd.read_csv()` method. Remember, that the first 8 lines contain metadata so we can skip those. This time, let's store the filepath into a separate variable in order to make the code more readable and easier to change afterwards:
```
# Define file path:
fp = 'Kumpula-June-2016-w-metadata.txt'
# Read in the data from the file (starting at row 9):
data = pd.read_csv(fp, skiprows=8)
```
Remember to always check the data after reading it in:
```
data.head()
```
````{admonition} Filepaths
Note, that our input file `'Kumpula-June-2016-w-metadata.txt'` is located **in the same folder** as the notebook we are running. Furthermore, the same folder is the working directory for our Python session (you can check this by running the `pwd()`command).
For these two reasons, we are able to pass only the filename to `.read_csv()` function and pandas is able to find the file and read it in. In fact, we are using a **relative filepath** when reading in the file.
The **absolute filepath** to the input data file in the CSC cloud computing environment is `/home/jovyan/work/notebooks/L5/Kumpula-June-2016-w-metadata.txt`, and we could also use this as input when reading in the file. When working with absolute filepaths, it's good practice to pass the file paths as a [raw string](https://docs.python.org/3/reference/lexical_analysis.html#literals) using the prefix `r` in order to avoid problems with escape characters such as `"\n"`.
```
# Define file path as a raw string:
fp = r'/home/jovyan/work/notebooks/L5/Kumpula-June-2016-w-metadata.txt'
# Read in the data from the file (starting at row 9):
data = pd.read_csv(fp, skiprows=8)
```
````
## Basic calculations
One of the most common things to do in pandas is to create new columns based on calculations between different variables (columns).
We can create a new column `DIFF` in our DataFrame by specifying the name of the column and giving it some default value (in this case the decimal number `0.0`).
```
# Define a new column "DIFF"
data['DIFF'] = 0.0
# Check how the dataframe looks like:
data
```
Let's check the datatype of our new column:
```
data['DIFF'].dtypes
```
Okey, so we see that Pandas created a new column and recognized automatically that the data type is float as we passed a 0.0 value to it.
Let's update the column `DIFF` by calculating the difference between `MAX` and `MIN` columns to get an idea how much the temperatures have
been varying during different days:
```
#Calculate max min difference
data['DIFF'] = data['MAX'] - data['MIN']
# Check the result
data.head()
```
The calculations were stored into the ``DIFF`` column as planned.
You can also create new columns on-the-fly at the same time when doing the calculation (the column does not have to exist before). Furthermore, it is possible to use any kind of math
algebra (e.g. subtracttion, addition, multiplication, division, exponentiation, etc.) when creating new columns.
We can for example convert the Fahrenheit temperatures in the `TEMP` column into Celsius using the formula that we have seen already many times. Let's do that and store it in a new column called `TEMP_CELSIUS`.
```
# Create a new column and convert temp fahrenheit to celsius:
data['TEMP_CELSIUS'] = (data['TEMP'] - 32) / (9/5)
#Check output
data.head()
```
#### Check your understanding
Calculate the temperatures in Kelvins using the Celsius values **and store the result a new column** calle `TEMP_KELVIN` in our dataframe.
0 Kelvins is is -273.15 degrees Celsius as we learned during [Lesson 4](https://geo-python-site.readthedocs.io/en/latest/notebooks/L4/functions.html#let-s-make-another-function).
```
# Solution
data['TEMP_KELVIN'] = data['TEMP_CELSIUS'] + 273.15
data.head()
```
## Selecting rows and columns
We often want to select only specific rows from a DataFrame for further analysis. There are multiple ways of selecting subsets of a pandas DataFrame. In this section we will go through most useful tricks for selecting specific rows, columns and individual values.
### Selecting several rows
One common way of selecting only specific rows from your DataFrame is done via **index slicing** to extract part of the DataFrame. Slicing in pandas can be done in a similar manner as with normal Python lists, i.e. you specify index range you want to select inside the square brackets: ``dataframe[start_index:stop_index]``.
Let's select the first five rows and assign them to a variable called `selection`:
```
# Select first five rows of dataframe using row index values
selection = data[0:5]
selection
```
```{note}
Here we have selected the first five rows (index 0-4) using the integer index.
```
### Selecting several rows and columns
It is also possible to control which columns are chosen when selecting a subset of rows. In this case we will use [pandas.DataFrame.loc](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html) which selects data based on axis labels (row labels and column labels).
Let's select temperature values (column `TEMP`) from rows 0-5:
```
# Select temp column values on rows 0-5
selection = data.loc[0:5, 'TEMP']
selection
```
```{note}
In this case, we get six rows of data (index 0-5)! We are now doing the selection based on axis labels instead of the integer index.
```
It is also possible to select multiple columns when using `loc`. Here, we select the `TEMP` and `TEMP_CELSIUS` columns from a set of rows by passing them inside a list (`.loc[start_index:stop_index, list_of_columns]`):
```
# Select columns temp and temp_celsius on rows 0-5
selection = data.loc[0:5, ['TEMP', 'TEMP_CELSIUS']]
selection
```
#### Check your understanding
Find the mean temperatures (in Celsius) for the last seven days of June. Do the selection using the row index values.
```
# Here is the solution
data.loc[23:29, 'TEMP_CELSIUS'].mean()
```
### Selecting a single row
You can also select an individual row from specific position using the `.loc[]` indexing. Here we select all the data values using index 4 (the 5th row):
```
# Select one row using index
row = data.loc[4]
row
```
``.loc[]`` indexing returns the values from that position as a ``pd.Series`` where the indices are actually the column names of those variables. Hence, you can access the value of an individual column by referring to its index using following format (both should work):
```
#Print one attribute from the selected row
row['TEMP']
```
### Selecting a single value based on row and column
Sometimes it is enough to access a single value in a DataFrame. In this case, we can use [DataFrame.at](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.at.html#pandas-dataframe-at) instead of `Data.Frame.loc`.
Let's select the temperature (column `TEMP`) on the first row (index `0`) of our DataFrame.
```
selection.at[0, "TEMP"]
```
### EXTRA: Selections by integer position
```{admonition} .iloc
`.loc` and `.at` are based on the *axis labels* - the names of columns and rows. Axis labels can be also something else than "traditional" index values. For example, datetime is commonly used as the row index.
`.iloc` is another indexing operator which is based on *integer value* indices. Using `.iloc`, it is possible to refer also to the columns based on their index value. For example, `data.iloc[0,0]` would return `20160601` in our example data frame.
See the pandas documentation for more information about [indexing and selecting data](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#indexing-and-selecting-data).
```
For example, we could select select `TEMP` and the `TEMP_CELSIUS` columns from a set of rows based on their index.
```
data.iloc[0:5:,0:2]
```
To access the value on the first row and second column (`TEMP`), the syntax for `iloc` would be:
```
data.iloc[0,1]
```
We can also access individual rows using `iloc`. Let's check out the last row of data:
```
data.iloc[-1]
```
## Filtering and updating data
One really useful feature in pandas is the ability to easily filter and select rows based on a conditional statement.
The following example shows how to select rows when the Celsius temperature has been higher than 15 degrees into variable `warm_temps` (warm temperatures). Pandas checks if the condition is `True` or `False` for each row, and returns those rows where the condition is `True`:
```
# Check the condition
data['TEMP_CELSIUS'] > 15
# Select rows with temp celsius higher than 15 degrees
warm_temps = data.loc[data['TEMP_CELSIUS'] > 15]
warm_temps
```
It is also possible to combine multiple criteria at the same time. Here, we select temperatures above 15 degrees that were recorded on the second half of June in 2016 (i.e. `YEARMODA >= 20160615`).
Combining multiple criteria can be done with the `&` operator (AND) or the `|` operator (OR). Notice, that it is often useful to separate the different clauses inside the parentheses `()`.
```
# Select rows with temp celsius higher than 15 degrees from late June 2016
warm_temps = data.loc[(data['TEMP_CELSIUS'] > 15) & (data['YEARMODA'] >= 20160615)]
warm_temps
```
Now we have a subset of our DataFrame with only rows where the `TEMP_CELSIUS` is above 15 and the dates in `YEARMODA` column start from 15th of June.
Notice, that the index values (numbers on the left) are still showing the positions from the original DataFrame. It is possible to **reset** the index using `reset_index()` function that
might be useful in some cases to be able to slice the data in a similar manner as above. By default the `reset_index()` would make a new column called `index` to keep track on the previous
index which might be useful in some cases but here not, so we can omit that by passing parameter `drop=True`.
```
# Reset index
warm_temps = warm_temps.reset_index(drop=True)
warm_temps
```
As can be seen, now the index values goes from 0 to 12.
#### Check your understanding
Find the mean temperatures (in Celsius) for the last seven days of June again. This time you should select the rows based on a condition for the `YEARMODA` column!
```
# Here's the solution
data['TEMP_CELSIUS'].loc[data['YEARMODA'] >= 20160624].mean()
```
```{admonition} Deep copy
In this lesson, we have stored subsets of a DataFrame as a new variable. In some cases, we are still referring to the original data and any modifications made to the new variable might influence the original DataFrame.
If you want to be extra careful to not modify the original DataFrame, then you should take a proper copy of the data before proceeding using the `.copy()` method. You can read more about indexing, selecting data and deep and shallow copies in [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html) and in [this excellent blog post](https://medium.com/dunder-data/selecting-subsets-of-data-in-pandas-part-4-c4216f84d388).
```
## Dealing with missing data
As you may have noticed by now, we have several missing values for the temperature minimum, maximum, and difference columns (`MIN`, `MAX`, `DIFF`, and `DIFF_MIN`). These missing values are indicated as `NaN` (not-a-number). Having missing data in your datafile is really common situation and typically you want to deal with it somehow. Common procedures to deal with `NaN` values are to either **remove** them from
the DataFrame or **fill** them with some value. In Pandas both of these options are really easy to do.
Let's first see how we can remove the NoData values (i.e. clean the data) using the [.dropna()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html) function. Inside the function you can pass a list of column(s) from which the `NaN` values should found using the `subset` parameter.
```
# Drop no data values based on the MIN column
warm_temps_clean = warm_temps.dropna(subset=['MIN'])
warm_temps_clean
```
As you can see by looking at the table above (and the change in index values), we now have a DataFrame without the NoData values.
````{note}
Note that we replaced the original `warm_temps` variable with version where no data are removed. The `.dropna()` function, among other pandas functions can also be applied "inplace" which means that the function updates the DataFrame object and returns `None`:
```python
warm_temps.dropna(subset=['MIN'], inplace=True)
```
````
Another option is to fill the NoData with some value using the `fillna()` function. Here we can fill the missing values in the with value -9999. Note that we are not giving the `subset` parameter this time.
```
# Fill na values
warm_temps.fillna(-9999)
```
As a result we now have a DataFrame where NoData values are filled with the value -9999.
```{warning}
In many cases filling the data with a specific value is dangerous because you end up modifying the actual data, which might affect the results of your analysis. For example, in the case above we would have dramatically changed the temperature difference columns because the -9999 values not an actual temperature difference! Hence, use caution when filling missing values.
You might have to fill in no data values for the purposes of saving the data to file in a spesific format. For example, some GIS software don't accept missing values. Always pay attention to potential no data values when reading in data files and doing further analysis!
```
## Data type conversions
There are occasions where you'll need to convert data stored within a Series to another data type, for example, from floating point to integer.
Remember, that we already did data type conversions using the [built-in Python functions](https://docs.python.org/3/library/functions.html#built-in-functions) such as `int()` or `str()`.
For values in pandas DataFrames and Series, we can use the `astype()` method.
```{admonition} Truncating versus rounding up
**Be careful with type conversions from floating point values to integers.** The conversion simply drops the stuff to the right of the decimal point, so all values are rounded down to the nearest whole number. For example, 99.99 will be truncated to 99 as an integer, when it should be rounded up to 100.
Chaining the round and type conversion functions solves this issue as the `.round(0).astype(int)` command first rounds the values with zero decimals and then converts those values into integers.
```
```
print("Original values:")
data['TEMP'].head()
print("Truncated integer values:")
data['TEMP'].astype(int).head()
print("Rounded integer values:")
data['TEMP'].round(0).astype(int).head()
```
Looks correct now.
## Unique values
Sometimes it is useful to extract the unique values that you have in your column.
We can do that by using `unique()` method:
```
# Get unique celsius values
unique = data['TEMP'].unique()
unique
```
As a result we get an array of unique values in that column.
```{note}
Sometimes if you have a long list of unique values, you don't necessarily see all the unique values directly as IPython/Jupyter may hide them with an elipsis `...`. It is, however, possible to see all those values by printing them as a list
```
```
# unique values as list
list(unique)
```
How many days with unique mean temperature did we have in June 2016? We can check that!
```
# Number of unique values
unique_temps = len(unique)
print("There were", unique_temps, "days with unique mean temperatures in June 2016.")
```
## Sorting data
Quite often it is useful to be able to sort your data (descending/ascending) based on values in some column
This can be easily done with Pandas using `sort_values(by='YourColumnName')` -function.
Let's first sort the values on ascending order based on the `TEMP` column:
```
# Sort dataframe, ascending
data.sort_values(by='TEMP')
```
Of course, it is also possible to sort them in descending order with ``ascending=False`` parameter:
```
# Sort dataframe, descending
data.sort_values(by='TEMP', ascending=False)
```
## Writing data to a file
Lastly, it is of course important to be able to write the data that you have analyzed into your computer. This is really handy in Pandas as it [supports many different data formats
by default](https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html).
**The most typical output format by far is CSV file.** Function `to_csv()` can be used to easily save your data in CSV format.
Let's first save the data from our `data` DataFrame into a file called `Kumpula_temp_results_June_2016.csv`.
```
# define output filename
output_fp = "Kumpula_temps_June_2016.csv"
# Save dataframe to csv
data.to_csv(output_fp, sep=',')
```
Now we have the data from our DataFrame saved to a file:

As you can see, the first value in the datafile contains now the index value of the rows. There are also quite many decimals present in the new columns
that we created. Let's deal with these and save the temperature values from `warm_temps` DataFrame without the index and with only 1 decimal in the floating point numbers.
```
# define output filename
output_fp2 = "Kumpula_temps_above15_June_2016.csv"
# Save dataframe to csv
warm_temps.to_csv(output_fp2, sep=',', index=False, float_format="%.1f")
```
Omitting the index can be with `index=False` parameter. Specifying how many decimals should be written can be done with `float_format` parameter where text `%.1f` defines Pandas to use 1 decimals
in all columns when writing the data to a file (changing the value 1 to 2 would write 2 decimals etc.)

As a results you have a "cleaner" output file without the index column, and with only 1 decimal for floating point numbers.
That's it for this week. We will dive deeper into data analysis with Pandas in the following Lesson.
|
github_jupyter
|
import pandas as pd
# Define file path:
fp = 'Kumpula-June-2016-w-metadata.txt'
# Read in the data from the file (starting at row 9):
data = pd.read_csv(fp, skiprows=8)
data.head()
## Basic calculations
One of the most common things to do in pandas is to create new columns based on calculations between different variables (columns).
We can create a new column `DIFF` in our DataFrame by specifying the name of the column and giving it some default value (in this case the decimal number `0.0`).
Let's check the datatype of our new column:
Okey, so we see that Pandas created a new column and recognized automatically that the data type is float as we passed a 0.0 value to it.
Let's update the column `DIFF` by calculating the difference between `MAX` and `MIN` columns to get an idea how much the temperatures have
been varying during different days:
The calculations were stored into the ``DIFF`` column as planned.
You can also create new columns on-the-fly at the same time when doing the calculation (the column does not have to exist before). Furthermore, it is possible to use any kind of math
algebra (e.g. subtracttion, addition, multiplication, division, exponentiation, etc.) when creating new columns.
We can for example convert the Fahrenheit temperatures in the `TEMP` column into Celsius using the formula that we have seen already many times. Let's do that and store it in a new column called `TEMP_CELSIUS`.
#### Check your understanding
Calculate the temperatures in Kelvins using the Celsius values **and store the result a new column** calle `TEMP_KELVIN` in our dataframe.
0 Kelvins is is -273.15 degrees Celsius as we learned during [Lesson 4](https://geo-python-site.readthedocs.io/en/latest/notebooks/L4/functions.html#let-s-make-another-function).
## Selecting rows and columns
We often want to select only specific rows from a DataFrame for further analysis. There are multiple ways of selecting subsets of a pandas DataFrame. In this section we will go through most useful tricks for selecting specific rows, columns and individual values.
### Selecting several rows
One common way of selecting only specific rows from your DataFrame is done via **index slicing** to extract part of the DataFrame. Slicing in pandas can be done in a similar manner as with normal Python lists, i.e. you specify index range you want to select inside the square brackets: ``dataframe[start_index:stop_index]``.
Let's select the first five rows and assign them to a variable called `selection`:
### Selecting several rows and columns
It is also possible to control which columns are chosen when selecting a subset of rows. In this case we will use [pandas.DataFrame.loc](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html) which selects data based on axis labels (row labels and column labels).
Let's select temperature values (column `TEMP`) from rows 0-5:
It is also possible to select multiple columns when using `loc`. Here, we select the `TEMP` and `TEMP_CELSIUS` columns from a set of rows by passing them inside a list (`.loc[start_index:stop_index, list_of_columns]`):
#### Check your understanding
Find the mean temperatures (in Celsius) for the last seven days of June. Do the selection using the row index values.
### Selecting a single row
You can also select an individual row from specific position using the `.loc[]` indexing. Here we select all the data values using index 4 (the 5th row):
``.loc[]`` indexing returns the values from that position as a ``pd.Series`` where the indices are actually the column names of those variables. Hence, you can access the value of an individual column by referring to its index using following format (both should work):
### Selecting a single value based on row and column
Sometimes it is enough to access a single value in a DataFrame. In this case, we can use [DataFrame.at](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.at.html#pandas-dataframe-at) instead of `Data.Frame.loc`.
Let's select the temperature (column `TEMP`) on the first row (index `0`) of our DataFrame.
### EXTRA: Selections by integer position
For example, we could select select `TEMP` and the `TEMP_CELSIUS` columns from a set of rows based on their index.
To access the value on the first row and second column (`TEMP`), the syntax for `iloc` would be:
We can also access individual rows using `iloc`. Let's check out the last row of data:
## Filtering and updating data
One really useful feature in pandas is the ability to easily filter and select rows based on a conditional statement.
The following example shows how to select rows when the Celsius temperature has been higher than 15 degrees into variable `warm_temps` (warm temperatures). Pandas checks if the condition is `True` or `False` for each row, and returns those rows where the condition is `True`:
It is also possible to combine multiple criteria at the same time. Here, we select temperatures above 15 degrees that were recorded on the second half of June in 2016 (i.e. `YEARMODA >= 20160615`).
Combining multiple criteria can be done with the `&` operator (AND) or the `|` operator (OR). Notice, that it is often useful to separate the different clauses inside the parentheses `()`.
Now we have a subset of our DataFrame with only rows where the `TEMP_CELSIUS` is above 15 and the dates in `YEARMODA` column start from 15th of June.
Notice, that the index values (numbers on the left) are still showing the positions from the original DataFrame. It is possible to **reset** the index using `reset_index()` function that
might be useful in some cases to be able to slice the data in a similar manner as above. By default the `reset_index()` would make a new column called `index` to keep track on the previous
index which might be useful in some cases but here not, so we can omit that by passing parameter `drop=True`.
As can be seen, now the index values goes from 0 to 12.
#### Check your understanding
Find the mean temperatures (in Celsius) for the last seven days of June again. This time you should select the rows based on a condition for the `YEARMODA` column!
## Dealing with missing data
As you may have noticed by now, we have several missing values for the temperature minimum, maximum, and difference columns (`MIN`, `MAX`, `DIFF`, and `DIFF_MIN`). These missing values are indicated as `NaN` (not-a-number). Having missing data in your datafile is really common situation and typically you want to deal with it somehow. Common procedures to deal with `NaN` values are to either **remove** them from
the DataFrame or **fill** them with some value. In Pandas both of these options are really easy to do.
Let's first see how we can remove the NoData values (i.e. clean the data) using the [.dropna()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html) function. Inside the function you can pass a list of column(s) from which the `NaN` values should found using the `subset` parameter.
As you can see by looking at the table above (and the change in index values), we now have a DataFrame without the NoData values.
warm_temps.dropna(subset=['MIN'], inplace=True)
Another option is to fill the NoData with some value using the `fillna()` function. Here we can fill the missing values in the with value -9999. Note that we are not giving the `subset` parameter this time.
As a result we now have a DataFrame where NoData values are filled with the value -9999.
## Data type conversions
There are occasions where you'll need to convert data stored within a Series to another data type, for example, from floating point to integer.
Remember, that we already did data type conversions using the [built-in Python functions](https://docs.python.org/3/library/functions.html#built-in-functions) such as `int()` or `str()`.
For values in pandas DataFrames and Series, we can use the `astype()` method.
Looks correct now.
## Unique values
Sometimes it is useful to extract the unique values that you have in your column.
We can do that by using `unique()` method:
As a result we get an array of unique values in that column.
How many days with unique mean temperature did we have in June 2016? We can check that!
## Sorting data
Quite often it is useful to be able to sort your data (descending/ascending) based on values in some column
This can be easily done with Pandas using `sort_values(by='YourColumnName')` -function.
Let's first sort the values on ascending order based on the `TEMP` column:
Of course, it is also possible to sort them in descending order with ``ascending=False`` parameter:
## Writing data to a file
Lastly, it is of course important to be able to write the data that you have analyzed into your computer. This is really handy in Pandas as it [supports many different data formats
by default](https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html).
**The most typical output format by far is CSV file.** Function `to_csv()` can be used to easily save your data in CSV format.
Let's first save the data from our `data` DataFrame into a file called `Kumpula_temp_results_June_2016.csv`.
Now we have the data from our DataFrame saved to a file:

As you can see, the first value in the datafile contains now the index value of the rows. There are also quite many decimals present in the new columns
that we created. Let's deal with these and save the temperature values from `warm_temps` DataFrame without the index and with only 1 decimal in the floating point numbers.
| 0.805785 | 0.991472 |
```
import torch
from torch import optim, nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import zipfile
import sys
import os
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
# Installation via binaries
print(torch.__version__)
TORCH="'1.7.0'"
CUDA="'cu101'"
!pip install torch-scatter==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-sparse==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-cluster==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-spline-conv==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-geometric
# Installation from source
#!pip install torch-scatter
#!pip install torch-sparse
#!pip install torch-cluster
#!pip install torch-spline-conv
#!pip install torch-geometric
import torch_geometric.transforms as T
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops
!python --version
# Download dataset - Takes 5-10 minutes to download and untar
!wget https://s3-eu-west-1.amazonaws.com/pstorage-npg-968563215/9057631/ANI1_release.tar.gz
!tar -xvf ANI1_release.tar.gz
sys.path.insert(1,'/content/ANI-1_release/readers/lib')
import pyanitools as pya
# Copied from example_data_sampler.py
hdf5file = '/content/ANI-1_release/ani_gdb_s01.h5'
adl = pya.anidataloader(hdf5file)
for data in adl:
P = data['path'] # Won't need
X = data['coordinates'] # For input feature generation
E = data['energies'] # For labels
S = data['species'] # For embedding
sm = data['smiles'] # Won't need
# Print the data
print("Path: ", P)
print(" Smiles: ","".join(sm))
print(" Symbols: ", S)
print(" Coordinates: ", X)
print(" Energies: ", E, "\n")
adl.cleanup()
from torch_geometric.data import Data
species_dict = {'H': 0, 'C': 1, 'N': 2, 'O': 3}
class AniDataset(Dataset):
def __init__(self, dir='ANI-1_release'):
super(AniDataset, self).__init__()
self.parse(dir)
def parse(self, dir):
self.species = []
self.pos = []
self.energies = []
for i in range(1,9):
hdf5file = os.path.join(dir,'ani_gdb_s0{}.h5'.format(i))
adl = pya.anidataloader(hdf5file)
for molecule in adl:
species = molecule['species']
for pos, energy in zip(molecule['coordinates'], molecule['energies']):
self.species.append(species)
self.pos.append(pos)
self.energies.append(energy)
@staticmethod
def get_edge_index(pos):
dist_mat = np.linalg.norm(pos[None,:,:]-pos[:,None,:],axis=2)
edge_index = [[],[]]
N = len(pos)
for i in range(N):
for j in range(N):
if i!=j and dist_mat[i,j]<10:
edge_index[0].append(i)
edge_index[1].append(j)
edge_index = torch.tensor(edge_index, dtype=torch.long)
return edge_index
def __getitem__(self, i):
pos = self.pos[i]
species = self.species[i]
energy = self.energies[i]
pos = torch.tensor(pos)
species = torch.tensor([species_dict[atom] for atom in species], dtype=torch.long)
edge_index = self.get_edge_index(pos)
energy = torch.tensor(energy)
graph = Data(x=species, edge_index=edge_index, y=energy, pos=pos)
return graph
def __len__(self):
return len(self.energies)
ani_dataset = AniDataset()
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops
class GraphConv(MessagePassing):
def __init__(self, coors, out_channels_1, out_features, label_dim=1, dropout=0):
"""
label_dim - dimention of node reprezentaion
coors - dimension of position (for MNIST 2)
out_channels_1 - dimension of convolution on each reprezentation chanal
* autput will have dimention label_dim * out_channels_1
out_features - dimension of node representation after graphConv
"""
super(GraphConv, self).__init__(aggr='add')
self.lin_in = torch.nn.Linear(coors, label_dim * out_channels_1)
self.lin_out = torch.nn.Linear(label_dim * out_channels_1, out_features)
self.dropout = dropout
def forward(self, x, pos, edge_index):
"""
x - feature matrix of the whole graph [num_nodes, label_dim]
pos - node position matrix [num_nodes, coors]
edge_index - graph connectivity [2, num_edges]
"""
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) # num_edges = num_edges + num_nodes
return self.propagate(edge_index=edge_index, x=x, pos=pos, aggr='add') # [N, out_channels, label_dim]
def message(self, pos_i, pos_j, x_j):
"""
pos_i [num_edges, coors]
pos_j [num_edges, coors]
x_j [num_edges, label_dim]
"""
tmp = pos_j - pos_i
L = self.lin_in(tmp) # [num_edges, out_channels]
num_nodes, label_dim = list(x_j.size())
label_dim_out_channels_1 = list(L.size())[1]
X = F.relu(L)
Y = x_j
X = torch.t(X)
X = F.dropout(X, p=self.dropout, training=self.training)
result = torch.t(
(X.view(label_dim, -1, num_nodes) * torch.t(Y).unsqueeze(1)).reshape(label_dim_out_channels_1, num_nodes))
return result
def update(self, aggr_out):
"""
aggr_out [num_nodes, label_dim, out_channels]
"""
aggr_out = self.lin_out(aggr_out) # [num_nodes, label_dim, out_features]
aggr_out = F.relu(aggr_out)
aggr_out = F.dropout(aggr_out, p=self.dropout, training=self.training)
return aggr_out
from torch_geometric.utils import normalized_cut
def normalized_cut_2d(edge_index, pos):
row, col = edge_index
edge_attr = torch.norm(pos[row] - pos[col], p=2, dim=1)
return normalized_cut(edge_index, edge_attr, num_nodes=pos.size(0))
from torch_geometric.nn import graclus, max_pool, global_mean_pool
class GeoGCN(torch.nn.Module):
def __init__(self, dim_coor, out_dim, input_features,
layers_num, model_dim, out_channels_1, dropout,
use_cluster_pooling):
super(GeoGCN, self).__init__()
self.layers_num = layers_num
self.use_cluster_pooling = use_cluster_pooling
self.conv_layers = [GraphConv(coors=dim_coor,
out_channels_1=out_channels_1,
out_features=model_dim,
label_dim=input_features,
dropout=dropout)] + \
[GraphConv(coors=dim_coor,
out_channels_1=out_channels_1,
out_features=model_dim,
label_dim=model_dim,
dropout=dropout) for _ in range(layers_num - 1)]
self.conv_layers = torch.nn.ModuleList(self.conv_layers)
self.fc1 = torch.nn.Linear(model_dim, out_dim)
self.embedding = nn.Embedding(4,input_features)
def forward(self, data):
data.x = self.embedding(data.x)
data.x = data.x.float()
for i in range(self.layers_num):
data.x = self.conv_layers[i](data.x, data.pos, data.edge_index)
if self.use_cluster_pooling:
weight = normalized_cut_2d(data.edge_index, data.pos)
cluster = graclus(data.edge_index, weight, data.x.size(0))
data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
#data.x = global_mean_pool(data.x, data.batch)
x = self.fc1(data.x)
return torch.sum(x)
model = GeoGCN(3,1,5,10,16,64,False,False)
#model.cuda()
ani_dataloader = DataLoader(ani_dataset, batch_size=1, pin_memory=True)
objective = nn.L1Loss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
losses = []
num_epoch = 10
def train():
for epoch in range(num_epoch):
loop = tqdm(total=len(ani_dataloader), position=0, leave=False)
indices = np.arange(len(ani_dataset))
np.random.shuffle(indices)
for idx in indices:
data = ani_dataset[idx]
#data = data.cuda()
data.x = data.x
data.y = data.y.float()
optimizer.zero_grad()
y_hat = model(data)
loss = objective(y_hat, data.y)
loss.backward()
losses.append(loss.item())
loop.update(1)
optimizer.step()
train()
n=[]
l = []
for i in range(0,len(losses),100):
l.append(sum(losses[i:i+100])/100)
n.append(i)
plt.plot(n[:-1],l[:-1])
plt.show()
```
|
github_jupyter
|
import torch
from torch import optim, nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import zipfile
import sys
import os
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
# Installation via binaries
print(torch.__version__)
TORCH="'1.7.0'"
CUDA="'cu101'"
!pip install torch-scatter==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-sparse==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-cluster==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-spline-conv==latest+${CUDA} -f https://pytorch-geometric.com/whl/torch-${TORCH}.html
!pip install torch-geometric
# Installation from source
#!pip install torch-scatter
#!pip install torch-sparse
#!pip install torch-cluster
#!pip install torch-spline-conv
#!pip install torch-geometric
import torch_geometric.transforms as T
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops
!python --version
# Download dataset - Takes 5-10 minutes to download and untar
!wget https://s3-eu-west-1.amazonaws.com/pstorage-npg-968563215/9057631/ANI1_release.tar.gz
!tar -xvf ANI1_release.tar.gz
sys.path.insert(1,'/content/ANI-1_release/readers/lib')
import pyanitools as pya
# Copied from example_data_sampler.py
hdf5file = '/content/ANI-1_release/ani_gdb_s01.h5'
adl = pya.anidataloader(hdf5file)
for data in adl:
P = data['path'] # Won't need
X = data['coordinates'] # For input feature generation
E = data['energies'] # For labels
S = data['species'] # For embedding
sm = data['smiles'] # Won't need
# Print the data
print("Path: ", P)
print(" Smiles: ","".join(sm))
print(" Symbols: ", S)
print(" Coordinates: ", X)
print(" Energies: ", E, "\n")
adl.cleanup()
from torch_geometric.data import Data
species_dict = {'H': 0, 'C': 1, 'N': 2, 'O': 3}
class AniDataset(Dataset):
def __init__(self, dir='ANI-1_release'):
super(AniDataset, self).__init__()
self.parse(dir)
def parse(self, dir):
self.species = []
self.pos = []
self.energies = []
for i in range(1,9):
hdf5file = os.path.join(dir,'ani_gdb_s0{}.h5'.format(i))
adl = pya.anidataloader(hdf5file)
for molecule in adl:
species = molecule['species']
for pos, energy in zip(molecule['coordinates'], molecule['energies']):
self.species.append(species)
self.pos.append(pos)
self.energies.append(energy)
@staticmethod
def get_edge_index(pos):
dist_mat = np.linalg.norm(pos[None,:,:]-pos[:,None,:],axis=2)
edge_index = [[],[]]
N = len(pos)
for i in range(N):
for j in range(N):
if i!=j and dist_mat[i,j]<10:
edge_index[0].append(i)
edge_index[1].append(j)
edge_index = torch.tensor(edge_index, dtype=torch.long)
return edge_index
def __getitem__(self, i):
pos = self.pos[i]
species = self.species[i]
energy = self.energies[i]
pos = torch.tensor(pos)
species = torch.tensor([species_dict[atom] for atom in species], dtype=torch.long)
edge_index = self.get_edge_index(pos)
energy = torch.tensor(energy)
graph = Data(x=species, edge_index=edge_index, y=energy, pos=pos)
return graph
def __len__(self):
return len(self.energies)
ani_dataset = AniDataset()
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops
class GraphConv(MessagePassing):
def __init__(self, coors, out_channels_1, out_features, label_dim=1, dropout=0):
"""
label_dim - dimention of node reprezentaion
coors - dimension of position (for MNIST 2)
out_channels_1 - dimension of convolution on each reprezentation chanal
* autput will have dimention label_dim * out_channels_1
out_features - dimension of node representation after graphConv
"""
super(GraphConv, self).__init__(aggr='add')
self.lin_in = torch.nn.Linear(coors, label_dim * out_channels_1)
self.lin_out = torch.nn.Linear(label_dim * out_channels_1, out_features)
self.dropout = dropout
def forward(self, x, pos, edge_index):
"""
x - feature matrix of the whole graph [num_nodes, label_dim]
pos - node position matrix [num_nodes, coors]
edge_index - graph connectivity [2, num_edges]
"""
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) # num_edges = num_edges + num_nodes
return self.propagate(edge_index=edge_index, x=x, pos=pos, aggr='add') # [N, out_channels, label_dim]
def message(self, pos_i, pos_j, x_j):
"""
pos_i [num_edges, coors]
pos_j [num_edges, coors]
x_j [num_edges, label_dim]
"""
tmp = pos_j - pos_i
L = self.lin_in(tmp) # [num_edges, out_channels]
num_nodes, label_dim = list(x_j.size())
label_dim_out_channels_1 = list(L.size())[1]
X = F.relu(L)
Y = x_j
X = torch.t(X)
X = F.dropout(X, p=self.dropout, training=self.training)
result = torch.t(
(X.view(label_dim, -1, num_nodes) * torch.t(Y).unsqueeze(1)).reshape(label_dim_out_channels_1, num_nodes))
return result
def update(self, aggr_out):
"""
aggr_out [num_nodes, label_dim, out_channels]
"""
aggr_out = self.lin_out(aggr_out) # [num_nodes, label_dim, out_features]
aggr_out = F.relu(aggr_out)
aggr_out = F.dropout(aggr_out, p=self.dropout, training=self.training)
return aggr_out
from torch_geometric.utils import normalized_cut
def normalized_cut_2d(edge_index, pos):
row, col = edge_index
edge_attr = torch.norm(pos[row] - pos[col], p=2, dim=1)
return normalized_cut(edge_index, edge_attr, num_nodes=pos.size(0))
from torch_geometric.nn import graclus, max_pool, global_mean_pool
class GeoGCN(torch.nn.Module):
def __init__(self, dim_coor, out_dim, input_features,
layers_num, model_dim, out_channels_1, dropout,
use_cluster_pooling):
super(GeoGCN, self).__init__()
self.layers_num = layers_num
self.use_cluster_pooling = use_cluster_pooling
self.conv_layers = [GraphConv(coors=dim_coor,
out_channels_1=out_channels_1,
out_features=model_dim,
label_dim=input_features,
dropout=dropout)] + \
[GraphConv(coors=dim_coor,
out_channels_1=out_channels_1,
out_features=model_dim,
label_dim=model_dim,
dropout=dropout) for _ in range(layers_num - 1)]
self.conv_layers = torch.nn.ModuleList(self.conv_layers)
self.fc1 = torch.nn.Linear(model_dim, out_dim)
self.embedding = nn.Embedding(4,input_features)
def forward(self, data):
data.x = self.embedding(data.x)
data.x = data.x.float()
for i in range(self.layers_num):
data.x = self.conv_layers[i](data.x, data.pos, data.edge_index)
if self.use_cluster_pooling:
weight = normalized_cut_2d(data.edge_index, data.pos)
cluster = graclus(data.edge_index, weight, data.x.size(0))
data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
#data.x = global_mean_pool(data.x, data.batch)
x = self.fc1(data.x)
return torch.sum(x)
model = GeoGCN(3,1,5,10,16,64,False,False)
#model.cuda()
ani_dataloader = DataLoader(ani_dataset, batch_size=1, pin_memory=True)
objective = nn.L1Loss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
losses = []
num_epoch = 10
def train():
for epoch in range(num_epoch):
loop = tqdm(total=len(ani_dataloader), position=0, leave=False)
indices = np.arange(len(ani_dataset))
np.random.shuffle(indices)
for idx in indices:
data = ani_dataset[idx]
#data = data.cuda()
data.x = data.x
data.y = data.y.float()
optimizer.zero_grad()
y_hat = model(data)
loss = objective(y_hat, data.y)
loss.backward()
losses.append(loss.item())
loop.update(1)
optimizer.step()
train()
n=[]
l = []
for i in range(0,len(losses),100):
l.append(sum(losses[i:i+100])/100)
n.append(i)
plt.plot(n[:-1],l[:-1])
plt.show()
| 0.58166 | 0.439988 |
# GWO DailyใพใใฏHoulyใฎ่คๆฐๆธฌ็นcsvใๆธฌ็นๅฅcsvใซๅๅฒใใ
**Author: Jun Sasaki** **Coded on Sep. 16, 2018, revised on January 10, 2022**<br>
ๆฐ่ฑกใใผใฟใใผในๅฐไธ่ฆณๆธฌ๏ผGWO๏ผDVDๆๅฅๅคใฏ๏ผ1961ๅนดใใ1990ๅนดใพใงใฏ3ๆ้้้ใใผใฟใจใชใฃใฆใใ๏ผGWOใฎDVDใใ1ๅฐ็น1ใใกใคใซใจใใฆ่คๆฐๅนดไธๆฌๅบๅใใใใฎใๅนดๆฏใซๅใใ๏ผ1991ๅนดไปฅ้ใฏ1ๆ้้้ใจใชใฃใฆใใ๏ผๆฌใณใผใใฏใฉใกใใซใ๏ผใใใใฏๆททๅจใใฆใใฆใ้ฉ็จๅฏ่ฝใงใใ๏ผ<br>
ใใฎใปใใซๆฅๅฅๅคใซใๅๆงใซๅฏพๅฟใใ๏ผ<br>
ใ่งฃ่ชฌใGWOใฎDVDใงใฏๅ
จๅนดๅ
จ่ฆณๆธฌ็นใฎใใผใฟใไธใคใฎcsvใใกใคใซใจใใฆๅบๅใงใใ๏ผใใใ่ฆณๆธฌ็นๅฅใฎใใกใคใซใซๅๅฒใใใฎใ็ฎ็๏ผ<br>
#### ๆณจๆ
* ๅ
ฅๅcsvใใกใคใซใฎใจใณใณใผใใฏSHIFT-JIS๏ผCRLF๏ผๅๅฒๅพใฎ่ฆณๆธฌ็นๅฅใใกใคใซใฎใจใณใณใผใใฏSHIFT-JIS๏ผCRLFใงใใ๏ผ
* ใใผใฟใใผในใฏSQLViewer7ใ็ซใกไธใ๏ผใๆๅนใใผใฟใใใใณใ้พๅคใฝใผใใใฎใใงใใฏใๅคใ๏ผใๅ
จใใผใฟใใผใน้
็ฎใใซใใงใใฏใๅ
ฅใ๏ผๅ
จ่ฆณๆธฌๆใ้ธใณ๏ผๅ
จๆ้ใๆๅฎใใฆ๏ผCSVๅบๅใจใใฆๅฎ่กใใ๏ผ
* ๆๅฅๅคใฏ1ๆฅใฎๆๅพใฎใใผใฟๆๅปใ24ๆใฎใใ๏ผๅนดๆซ24ๆใฎใใผใฟใฏ็ฟๅนดๅนดๅ0ๆใฎใใผใฟใๆๅณใใ๏ผ
```
import pandas as pd
import os
import sys
```
## stn_dictใไฝๆใใฆใใ
ๆธฌ็นๅ๏ผๆผขๅญ๏ผใ่ชๅ่ช่ญใ๏ผใใฎๆธฌ็นๅใซๅฏพๅฟใใใใฃใฌใฏใใชๅใstn_dictใใๅพใ
```
stn_dict={"็จๅ
":"Wakkanai", "ๅ่ฆๆๅนธ":"Kitamiesashi", "็พฝๅน":"Haboro", "้ๆญฆ":"Oumu", "็่":"Rumoi", "ๆญๅท":"Asahikawa", \
"็ถฒ่ตฐ":"Abashiri", "ๅฐๆจฝ":"Otaru", "ๆญๅน":"Sapporo", "ๅฒฉ่ฆๆฒข":"Iwamizawa", "ๅธฏๅบ":"Obihiro", "้ง่ทฏ":"Kushiro", \
"ๆ นๅฎค":"Nemuro", "ๅฏฟ้ฝ":"Suttu", "ๅฎค่ญ":"Muroran", "่ซๅฐ็ง":"Tomakomai", \
"ๆตฆๆฒณ":"Urakawa", "ๆฑๅทฎ":"Esashi", "ๅฝ้คจ":"Hakodate", "ๅถ็ฅๅฎ":"Kutchan", "็ดๅฅ":"Monbetsu", "ๅบๅฐพ":"Hiroo", \
"ๅคง่นๆธก":"Ofunato", "ๆฐๅบ":"Shinjo", "่ฅๆพ":"Wakamatsu", "ๆทฑๆตฆ":"Fukaura", \
"้ๆฃฎ":"Aomori", "ใใค":"Mutsu", "ๅ
ซๆธ":"Hachinohe", "็ง็ฐ":"Akita", "็ๅฒก":"Morioka", "ๅฎฎๅค":"Miyako", \
"้
็ฐ":"Sakata", "ๅฑฑๅฝข":"Yamagata", "ไปๅฐ":"Sendai", "็ณๅทป":"Ishinomaki", \
"็ฆๅณถ":"Fukushima", "็ฝๆฒณ":"Shirakawa", "ๅฐๅๆต":"Onahama", "่ผชๅณถ":"Wajima", "็ธๅท":"Aikawa", "ๆฐๆฝ":"Niigata", \
"้ๆฒข":"Kanazawa", "ไผๆจ":"Fushiki", "ๅฏๅฑฑ":"Toyama", "้ท้":"Nagano", \
"้ซ็ฐ":"Takada", "ๅฎ้ฝๅฎฎ":"Utsunomiya", "็ฆไบ":"Fukui", "้ซๅฑฑ":"Takayama", "ๆพๆฌ":"Matsumoto", \
"่ซ่จช":"Suwa", "่ปฝไบๆฒข":"Karuizawa", "ๅๆฉ":"Maebashi", "็่ฐท":"Kumagaya", "ๆฐดๆธ":"Mito", \
"ๆฆ่ณ":"Tsuruga", "ๅฒ้":"Gifu", "ๅๅคๅฑ":"Nagoya", "้ฃฏ็ฐ":"Iida", "็ฒๅบ":"Kofu", \
"ๆฒณๅฃๆน":"Kawaguchiko", "็งฉ็ถ":"Chichibu", "้คจ้":"Tateno", "้ๅญ":"Choshi", "ไธ้":"Ueno", \
"ๆดฅ":"Tsu", "ไผ่ฏๆน":"Irago", "ๆตๆพ":"Hamamatsu", "ๅพกๅๅด":"Omaezaki", "้ๅฒก":"Shizuoka", \
"ไธๅณถ":"Mishima", "ๆฑไบฌ":"Tokyo", "ๅฐพ้ทฒ":"Owase", "็ณๅปๅด":"Irozaki", "็ถฒไปฃ":"Ajiro", \
"ๆจชๆต":"Yokohama", "้คจๅฑฑ":"Tateyama", "ๅๆตฆ":"Katsuura", "ๅคงๅณถ":"Oshima", "ไธๅฎ
ๅณถ":"Miyakejima", \
"ๅ
ซไธๅณถ":"Hachijojima", "ๅ่":"Chiba", "ๅๆฅๅธ":"Yokkaichi", "ๆฅๅ
":"Nikko", "่ฅฟ้ท":"Saigo", \
"ๆพๆฑ":"Matsue", "ๅข":"Sakai", "็ฑณๅญ":"Yonago", "้ณฅๅ":"Tottori", "่ฑๅฒก":"Toyooka", "่้ถด":"Maiduru", \
"ไผๅนๅฑฑ":"Ibukiyama", "่ฉ":"Hagi", "ๆต็ฐ":"Hamada", "ๆดฅๅฑฑ":"Tsuyama", \
"ไบฌ้ฝ":"Kyoto", "ๅฝฆๆ น":"Hikone", "ไธ้ข":"Shimonoseki", "ๅบๅณถ":"Hiroshima", "ๅ":"Kure", \
"็ฆๅฑฑ":"Fukuyama", "ๅฒกๅฑฑ":"Okayama", "ๅงซ่ทฏ":"Himeji", "็ฅๆธ":"Kobe", "ๅคง้ช":"Osaka", \
"ๆดฒๆฌ":"Sumoto", "ๅๆญๅฑฑ":"Wakayama", "ๆฝฎๅฒฌ":"Shionomisaki", "ๅฅ่ฏ":"Nara", "ๅฑฑๅฃ":"Yamaguchi", \
"ๅณๅ":"Izuhara", "ๅนณๆธ":"Hirado", "็ฆๅฒก":"Fukuoka", "้ฃฏๅก":"Iiduka", "ไฝไธไฟ":"Sasebo", \
"ไฝ่ณ":"Saga", "ๆฅ็ฐ":"Hita", "ๅคงๅ":"Oita", "้ทๅด":"Nagasaki", "็ๆฌ":"Kumamoto", \
"้ฟ่ๅฑฑ":"Asosan", "ๅปถๅฒก":"Nobeoka", "้ฟไน
ๆ น":"Akune", "ไบบๅ":"Hitoyoshi", "้นฟๅ
ๅณถ":"Kagoshima", \
"้ฝๅ":"Miyakonojo", "ๅฎฎๅด":"Miyazaki", "ๆๅด":"Makurazaki", "ๆฒนๆดฅ":"Aburatsu", "ๅฑไน
ๅณถ":"Yakushima", \
"็จฎๅญๅณถ":"Tanegashima", "็ๆทฑ":"Ushibuka", "็ฆๆฑ":"Fukue", "ๆพๅฑฑ":"Matsuyama", "ๅคๅบฆๆดฅ":"Tadotsu", \
"้ซๆพ":"Takamatsu", "ๅฎๅๅณถ":"Uwajima", "้ซ็ฅ":"Kochi", "ๅฃๅฑฑ":"Tsurugisan", "ๅพณๅณถ":"Tokushima", \
"ๅฎฟๆฏ":"Sukumo", "ๆธ
ๆฐด":"Shimizu", "ๅฎคๆธๅฒฌ":"Murotomisaki", "ๅ็ฌ":"Nase", "ไธ้ฃๅฝๅณถ":"Yonakunijima", \
"็ณๅฃๅณถ":"Ishigakijima", "ๅฎฎๅคๅณถ":"Miyakojima", "ไน
็ฑณๅณถ":"Kumejima", "้ฃ่ฆ":"Naha", "ๅ่ญท":"Nago", \
"ๆฒๆฐธ่ฏ้จ":"Okinoerabu", "ๅๅคงๆฑๅณถ":"Minamidaitojima", "็ถๅณถ":"Chichijima", "ๅ้ณฅๅณถ":"Minamitorishima"}
def GWO_stns2stn(fname="Matsue-Sakai-Yonago-Tottori", dir="../GWO/Daily/", hourly=False, overwrite=False):
'''Divide a file containing multiple stations csv to each station csv'''
if hourly:
names = ["KanID","Kname","KanID_1","YYYY","MM","DD","HH","lhpa","lhpaRMK","shpa","shpaRMK","kion","kionRMK","stem",\
"stemRMK","rhum","rhumRMK","muki","mukiRMK","sped","spedRMK","clod","clodRMK","tnki","tnkiRMK","humd","humdRMK",\
"lght","lghtRMK","slht","slhtRMK","kous","kousRMK"]
else:
names = ["KanID","Kname","KanID_1","YYYY","MM","DD","avrLhpa","avrLhpaRMK","avrShpa","avrShpaRMK","minShpa","minShpaRMK",\
"avrKion","avrKionRMK","maxKion","maxKionRMK","minKion","minKionRMK","avrStem","avrStemRMK","avrRhum","avrRhumRMK",\
"minRhum","minRhumRMK","avrSped","avrSpedRMK","maxSped","maxSpedRMK","maxMuki","maxMukiRMK","maxSSpd","maxSSpdRMK",\
"maxSMuk","maxSMukRMK","avrClod","avrClodRMK","daylght","daylghtRMK","sunlght","sunlghtRMK","amtEva","amtEvaRMK",\
"dayPrec","dayPrecRMK","maxHPrc","maxHPrcRMK","maxMPrc","maxMPrcRMK","talSnow","talSnowRMK","daySnow","daySnowRMK",\
"tenki1","tenki1RMK","tenki2","tenki2RMK","apCode1","apCode2","apCode3","apCode4","apCode5","strgTim","strgTimRMK"]
fpath=dir + fname + ".csv"
print("Reading ", fpath)
df = pd.read_csv(fpath, header=None, names=names, dtype="str", encoding="SHIFT-JIS")
for stn in set(df['Kname']):
print(stn)
df_stn = df[df['Kname']==stn]
dirpath_stn = dir + stn_dict[stn]
if hourly:
fpath_stn = dirpath_stn + "/" + stn_dict[stn] + fname[-9:] + ".csv"
else:
fpath_stn = dirpath_stn + "/" + stn_dict[stn] + "1961-2017" + ".csv" ### output CSV file path for each year
if not os.path.isdir(dirpath_stn): ### directoryใๅญๅจใใชใๅ ดๅใฏไฝๆ
print("Creating directory of " + dirpath_stn)
os.mkdir(dirpath_stn)
if not os.path.isfile(fpath_stn):
print("Creating " + fpath_stn)
df_stn.to_csv(fpath_stn, header=None, index=False, encoding="SHIFT-JIS") ### ใจใณใณใผใใSHIFT-JISใจใใ
elif overwrite:
print("Overwriting " + fpath_stn)
df_stn.to_csv(fpath_stn, header=None, index=False, encoding="SHIFT-JIS") ### ใจใณใณใผใใSHIFT-JISใจใใ
else:
print("Already existing " + fpath_stn)
dirpath = "d:\dat\GWO\"
end_year = "2020"
```
### GWO Daily data
ใขใผใซใคใใใผใฟใใกใคใซๅใไธใ๏ผ่ฆณๆธฌ็นๅฅใฎใใกใคใซใซๅๅฒใใ๏ผ่ฆณๆธฌ็นๅใฏใใกใคใซไธญใฎๆผขๅญๅใใstn_dictใซใใฃใฆ่ชๅ่จญๅฎใใใ๏ผ
```
GWO_stns2stn(fname="GWODaily1961-{}".format(end_year), dir="{}Daily/".format(dirpath), hourly=False, overwrite=False)
```
### GWO Hourly data
1961-1990ใฎGWOHourly1961-1990.csvใจ1991-2017ใฎGWOHourly1991-2017.csvใใใใใ๏ผ่ฆณๆธฌ็นๆฏใฎใใกใคใซใซๅๅฒใใ
```
GWO_stns2stn(fname="GWOHourly1961-1990", dir="{}Hourly/".format(dirpath), hourly=True, overwrite=False)
GWO_stns2stn(fname="GWOHourly1991-{}".format(end_year), dir="{}Hourly/".format(dirpath), hourly=True, overwrite=False)
```
|
github_jupyter
|
import pandas as pd
import os
import sys
stn_dict={"็จๅ
":"Wakkanai", "ๅ่ฆๆๅนธ":"Kitamiesashi", "็พฝๅน":"Haboro", "้ๆญฆ":"Oumu", "็่":"Rumoi", "ๆญๅท":"Asahikawa", \
"็ถฒ่ตฐ":"Abashiri", "ๅฐๆจฝ":"Otaru", "ๆญๅน":"Sapporo", "ๅฒฉ่ฆๆฒข":"Iwamizawa", "ๅธฏๅบ":"Obihiro", "้ง่ทฏ":"Kushiro", \
"ๆ นๅฎค":"Nemuro", "ๅฏฟ้ฝ":"Suttu", "ๅฎค่ญ":"Muroran", "่ซๅฐ็ง":"Tomakomai", \
"ๆตฆๆฒณ":"Urakawa", "ๆฑๅทฎ":"Esashi", "ๅฝ้คจ":"Hakodate", "ๅถ็ฅๅฎ":"Kutchan", "็ดๅฅ":"Monbetsu", "ๅบๅฐพ":"Hiroo", \
"ๅคง่นๆธก":"Ofunato", "ๆฐๅบ":"Shinjo", "่ฅๆพ":"Wakamatsu", "ๆทฑๆตฆ":"Fukaura", \
"้ๆฃฎ":"Aomori", "ใใค":"Mutsu", "ๅ
ซๆธ":"Hachinohe", "็ง็ฐ":"Akita", "็ๅฒก":"Morioka", "ๅฎฎๅค":"Miyako", \
"้
็ฐ":"Sakata", "ๅฑฑๅฝข":"Yamagata", "ไปๅฐ":"Sendai", "็ณๅทป":"Ishinomaki", \
"็ฆๅณถ":"Fukushima", "็ฝๆฒณ":"Shirakawa", "ๅฐๅๆต":"Onahama", "่ผชๅณถ":"Wajima", "็ธๅท":"Aikawa", "ๆฐๆฝ":"Niigata", \
"้ๆฒข":"Kanazawa", "ไผๆจ":"Fushiki", "ๅฏๅฑฑ":"Toyama", "้ท้":"Nagano", \
"้ซ็ฐ":"Takada", "ๅฎ้ฝๅฎฎ":"Utsunomiya", "็ฆไบ":"Fukui", "้ซๅฑฑ":"Takayama", "ๆพๆฌ":"Matsumoto", \
"่ซ่จช":"Suwa", "่ปฝไบๆฒข":"Karuizawa", "ๅๆฉ":"Maebashi", "็่ฐท":"Kumagaya", "ๆฐดๆธ":"Mito", \
"ๆฆ่ณ":"Tsuruga", "ๅฒ้":"Gifu", "ๅๅคๅฑ":"Nagoya", "้ฃฏ็ฐ":"Iida", "็ฒๅบ":"Kofu", \
"ๆฒณๅฃๆน":"Kawaguchiko", "็งฉ็ถ":"Chichibu", "้คจ้":"Tateno", "้ๅญ":"Choshi", "ไธ้":"Ueno", \
"ๆดฅ":"Tsu", "ไผ่ฏๆน":"Irago", "ๆตๆพ":"Hamamatsu", "ๅพกๅๅด":"Omaezaki", "้ๅฒก":"Shizuoka", \
"ไธๅณถ":"Mishima", "ๆฑไบฌ":"Tokyo", "ๅฐพ้ทฒ":"Owase", "็ณๅปๅด":"Irozaki", "็ถฒไปฃ":"Ajiro", \
"ๆจชๆต":"Yokohama", "้คจๅฑฑ":"Tateyama", "ๅๆตฆ":"Katsuura", "ๅคงๅณถ":"Oshima", "ไธๅฎ
ๅณถ":"Miyakejima", \
"ๅ
ซไธๅณถ":"Hachijojima", "ๅ่":"Chiba", "ๅๆฅๅธ":"Yokkaichi", "ๆฅๅ
":"Nikko", "่ฅฟ้ท":"Saigo", \
"ๆพๆฑ":"Matsue", "ๅข":"Sakai", "็ฑณๅญ":"Yonago", "้ณฅๅ":"Tottori", "่ฑๅฒก":"Toyooka", "่้ถด":"Maiduru", \
"ไผๅนๅฑฑ":"Ibukiyama", "่ฉ":"Hagi", "ๆต็ฐ":"Hamada", "ๆดฅๅฑฑ":"Tsuyama", \
"ไบฌ้ฝ":"Kyoto", "ๅฝฆๆ น":"Hikone", "ไธ้ข":"Shimonoseki", "ๅบๅณถ":"Hiroshima", "ๅ":"Kure", \
"็ฆๅฑฑ":"Fukuyama", "ๅฒกๅฑฑ":"Okayama", "ๅงซ่ทฏ":"Himeji", "็ฅๆธ":"Kobe", "ๅคง้ช":"Osaka", \
"ๆดฒๆฌ":"Sumoto", "ๅๆญๅฑฑ":"Wakayama", "ๆฝฎๅฒฌ":"Shionomisaki", "ๅฅ่ฏ":"Nara", "ๅฑฑๅฃ":"Yamaguchi", \
"ๅณๅ":"Izuhara", "ๅนณๆธ":"Hirado", "็ฆๅฒก":"Fukuoka", "้ฃฏๅก":"Iiduka", "ไฝไธไฟ":"Sasebo", \
"ไฝ่ณ":"Saga", "ๆฅ็ฐ":"Hita", "ๅคงๅ":"Oita", "้ทๅด":"Nagasaki", "็ๆฌ":"Kumamoto", \
"้ฟ่ๅฑฑ":"Asosan", "ๅปถๅฒก":"Nobeoka", "้ฟไน
ๆ น":"Akune", "ไบบๅ":"Hitoyoshi", "้นฟๅ
ๅณถ":"Kagoshima", \
"้ฝๅ":"Miyakonojo", "ๅฎฎๅด":"Miyazaki", "ๆๅด":"Makurazaki", "ๆฒนๆดฅ":"Aburatsu", "ๅฑไน
ๅณถ":"Yakushima", \
"็จฎๅญๅณถ":"Tanegashima", "็ๆทฑ":"Ushibuka", "็ฆๆฑ":"Fukue", "ๆพๅฑฑ":"Matsuyama", "ๅคๅบฆๆดฅ":"Tadotsu", \
"้ซๆพ":"Takamatsu", "ๅฎๅๅณถ":"Uwajima", "้ซ็ฅ":"Kochi", "ๅฃๅฑฑ":"Tsurugisan", "ๅพณๅณถ":"Tokushima", \
"ๅฎฟๆฏ":"Sukumo", "ๆธ
ๆฐด":"Shimizu", "ๅฎคๆธๅฒฌ":"Murotomisaki", "ๅ็ฌ":"Nase", "ไธ้ฃๅฝๅณถ":"Yonakunijima", \
"็ณๅฃๅณถ":"Ishigakijima", "ๅฎฎๅคๅณถ":"Miyakojima", "ไน
็ฑณๅณถ":"Kumejima", "้ฃ่ฆ":"Naha", "ๅ่ญท":"Nago", \
"ๆฒๆฐธ่ฏ้จ":"Okinoerabu", "ๅๅคงๆฑๅณถ":"Minamidaitojima", "็ถๅณถ":"Chichijima", "ๅ้ณฅๅณถ":"Minamitorishima"}
def GWO_stns2stn(fname="Matsue-Sakai-Yonago-Tottori", dir="../GWO/Daily/", hourly=False, overwrite=False):
'''Divide a file containing multiple stations csv to each station csv'''
if hourly:
names = ["KanID","Kname","KanID_1","YYYY","MM","DD","HH","lhpa","lhpaRMK","shpa","shpaRMK","kion","kionRMK","stem",\
"stemRMK","rhum","rhumRMK","muki","mukiRMK","sped","spedRMK","clod","clodRMK","tnki","tnkiRMK","humd","humdRMK",\
"lght","lghtRMK","slht","slhtRMK","kous","kousRMK"]
else:
names = ["KanID","Kname","KanID_1","YYYY","MM","DD","avrLhpa","avrLhpaRMK","avrShpa","avrShpaRMK","minShpa","minShpaRMK",\
"avrKion","avrKionRMK","maxKion","maxKionRMK","minKion","minKionRMK","avrStem","avrStemRMK","avrRhum","avrRhumRMK",\
"minRhum","minRhumRMK","avrSped","avrSpedRMK","maxSped","maxSpedRMK","maxMuki","maxMukiRMK","maxSSpd","maxSSpdRMK",\
"maxSMuk","maxSMukRMK","avrClod","avrClodRMK","daylght","daylghtRMK","sunlght","sunlghtRMK","amtEva","amtEvaRMK",\
"dayPrec","dayPrecRMK","maxHPrc","maxHPrcRMK","maxMPrc","maxMPrcRMK","talSnow","talSnowRMK","daySnow","daySnowRMK",\
"tenki1","tenki1RMK","tenki2","tenki2RMK","apCode1","apCode2","apCode3","apCode4","apCode5","strgTim","strgTimRMK"]
fpath=dir + fname + ".csv"
print("Reading ", fpath)
df = pd.read_csv(fpath, header=None, names=names, dtype="str", encoding="SHIFT-JIS")
for stn in set(df['Kname']):
print(stn)
df_stn = df[df['Kname']==stn]
dirpath_stn = dir + stn_dict[stn]
if hourly:
fpath_stn = dirpath_stn + "/" + stn_dict[stn] + fname[-9:] + ".csv"
else:
fpath_stn = dirpath_stn + "/" + stn_dict[stn] + "1961-2017" + ".csv" ### output CSV file path for each year
if not os.path.isdir(dirpath_stn): ### directoryใๅญๅจใใชใๅ ดๅใฏไฝๆ
print("Creating directory of " + dirpath_stn)
os.mkdir(dirpath_stn)
if not os.path.isfile(fpath_stn):
print("Creating " + fpath_stn)
df_stn.to_csv(fpath_stn, header=None, index=False, encoding="SHIFT-JIS") ### ใจใณใณใผใใSHIFT-JISใจใใ
elif overwrite:
print("Overwriting " + fpath_stn)
df_stn.to_csv(fpath_stn, header=None, index=False, encoding="SHIFT-JIS") ### ใจใณใณใผใใSHIFT-JISใจใใ
else:
print("Already existing " + fpath_stn)
dirpath = "d:\dat\GWO\"
end_year = "2020"
GWO_stns2stn(fname="GWODaily1961-{}".format(end_year), dir="{}Daily/".format(dirpath), hourly=False, overwrite=False)
GWO_stns2stn(fname="GWOHourly1961-1990", dir="{}Hourly/".format(dirpath), hourly=True, overwrite=False)
GWO_stns2stn(fname="GWOHourly1991-{}".format(end_year), dir="{}Hourly/".format(dirpath), hourly=True, overwrite=False)
| 0.141875 | 0.82379 |
# Homework 3
For this homework, you will be working extensively in tensorflow. It is suggested that you spin up a Google Cloud VM with a GPU attached. Remember, instructions for doing so are found in Homework 0.
### Part 1: Homework 2, but on tensorflow
### Part 2: DNN on MNIST and CIFAR10
### Part 3: VGG on MNIST and CIFAR10
### (Optional) Part 4, getting state of the art (#SOTA)
# Part 1
You don't have to repeat everything in homework 2, but rather pick one set of two features that worked well for you last homework, and implement logistic regression using tensorflow without using keras (you will practice using keras in parts 2 and 3). In other words, using tensorflow operations, please create a scalar-value loss function and let tensorflow create the training operation for logistic regression, which automatically computes the gradients and updates the weight parameters. Note that the logistic loss is a special case of the softmax cross entropy loss that you've seen when classifying MNIST.
# Part 2: DNN on MNIST and CIFAR10
In our lab, you guys saw how to work with the MNIST dataset to perform image classification. We can attempt the MNIST classification problem with just fully connected layers. This means we will be optimizing for non-banded matrices (no convolutions).
1. Calcualte the number of weight parameters you are optimizing for 1, 2 and 3 differen fully connected layers (the total size of each layer is up to you).
2. What is the max layer depth you can go before training loss does not converge? You can usually tell that something is not converging by examining the training loss vs. iteration curve.
3. How does the number of parameters relate to the training loss and validation/test loss? Try to get a few data points to speak to this question.
3. Keeping the maximum number of parameters possible while still maintaining convergence (i.e., a good training and validation/test loss), what happens when you swap the activation function to `tanh` instead of `relu`? How about `sigmoid`?
4. After exploring the above, train a DNN model with the combination of hyperparameters that you believe will work best on MNIST.
5. Using the same architecture, try training a DNN model on more difficult dataset such as Fashion MNIST or CIFAR10/100. Example download instructions are shown in the next problem.
### Must haves
1. Make a curve of the final validation/test loss of your DNN after the loss plateaus as a function of the number of weight parameters used (final loss versus # parameters used). Note that you might see something like the curve below for a low number of parameters, but as the number of parameters increases, it will not look like this plot.
2. On the same figure, make the same curve as above, but use different activation functions in your architecture.
3. Plot a point corresponding to your crafted DNN archiecture for question 4.
4. Repeat 1-3 for CIFAR10
The curves when reasonable # params are used look like the below

```
# Download and visualize the data: see all here https://www.tensorflow.org/api_docs/python/tf/keras/datasets
import tensorflow as tf
(X_train, y_train), (X_val, y_val) = tf.keras.datasets.mnist.load_data()
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_val = tf.keras.utils.to_categorical(y_val, 10)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_val = X_val.reshape(X_val.shape[0], 28, 28, 1)
from matplotlib import pyplot as plt
%matplotlib inline
print('Training data shape', X_train.shape)
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(X_train[0].reshape(28, 28), cmap=plt.cm.Greys);
ax2.imshow(X_train[1].reshape(28, 28), cmap=plt.cm.Greys);
# Build your DNN, an example model is given for you.
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
# Try adding more layers and graph the final loss and accuracy
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train,
batch_size=64,
epochs=1,
verbose=1,
validation_data=(X_val, y_val))
```
# Part 3. VGG on CIFAR100 and CIFAR10
VGG is a simple, but powerful CNN created in 2015. Read the VGG paper here: https://arxiv.org/pdf/1409.1556.pdf
Here, we're going to try to reproduce the model's findings on the cifar10 and cifar100 dataset. Note that the paper takes 224 x 224 images, but cifar10 and 100 are only 32 x 32 images.
1. Implement all of the layers for the VGG ConvNet Configuration A. Please use the shell code below as guide. Then, train this network on the Cifar10 and Cifar100 datasets.
2. For Cifar10 and 100, VGG is probably overkill. Try changing the number of layers and number of filters without sacrificing too much performance accuracy. How many filters can you get rid of before you see the accuracy drop by more than 2%? Where in the architecture is it better to remove filters - towards the input layers, or more towards the output layers?
3. For what you experiment with--report the parameter, validation loss curves for changing the number of i) layers, ii) filter size, iii) both.
```
# This is the same model in the other notebook, looks very simplified.
import tensorflow as tf
(X_train, y_train), (X_val, y_val) = tf.keras.datasets.cifar10.load_data()
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_val = tf.keras.utils.to_categorical(y_val, 10)
X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
X_val = X_val.reshape(X_val.shape[0], 32, 32, 3)
from matplotlib import pyplot as plt
%matplotlib inline
print('Training data shape', X_train.shape)
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(X_train[0].reshape(32, 32, 3));
ax2.imshow(X_train[1].reshape(32, 32, 3));
# Example CNN used in class
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (5,5), padding='same', activation='relu', input_shape=(32, 32, 1)),
tf.keras.layers.MaxPool2D(padding='same'),
tf.keras.layers.Conv2D(64, (5,5), padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(padding='same'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.train.AdamOptimizer(0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
```
# (Optional) Part 4, state of the art
Currently, state of the art implementations in the image classification problem are DenseNet: (https://arxiv.org/abs/1608.06993), ResNet (https://arxiv.org/abs/1512.03385), and ResNext (https://arxiv.org/pdf/1611.05431.pdf). Try implementing and training one of these on the cifar10 and cifar100 dataset. Feel free to experiment.
Jargon to learn about
1. What is "residual learning"?
2. What is a "bottleneck layer"?
3. What is a "dense block"?
|
github_jupyter
|
# Download and visualize the data: see all here https://www.tensorflow.org/api_docs/python/tf/keras/datasets
import tensorflow as tf
(X_train, y_train), (X_val, y_val) = tf.keras.datasets.mnist.load_data()
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_val = tf.keras.utils.to_categorical(y_val, 10)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_val = X_val.reshape(X_val.shape[0], 28, 28, 1)
from matplotlib import pyplot as plt
%matplotlib inline
print('Training data shape', X_train.shape)
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(X_train[0].reshape(28, 28), cmap=plt.cm.Greys);
ax2.imshow(X_train[1].reshape(28, 28), cmap=plt.cm.Greys);
# Build your DNN, an example model is given for you.
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
# Try adding more layers and graph the final loss and accuracy
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train,
batch_size=64,
epochs=1,
verbose=1,
validation_data=(X_val, y_val))
# This is the same model in the other notebook, looks very simplified.
import tensorflow as tf
(X_train, y_train), (X_val, y_val) = tf.keras.datasets.cifar10.load_data()
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_val = tf.keras.utils.to_categorical(y_val, 10)
X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
X_val = X_val.reshape(X_val.shape[0], 32, 32, 3)
from matplotlib import pyplot as plt
%matplotlib inline
print('Training data shape', X_train.shape)
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(X_train[0].reshape(32, 32, 3));
ax2.imshow(X_train[1].reshape(32, 32, 3));
# Example CNN used in class
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (5,5), padding='same', activation='relu', input_shape=(32, 32, 1)),
tf.keras.layers.MaxPool2D(padding='same'),
tf.keras.layers.Conv2D(64, (5,5), padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(padding='same'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.train.AdamOptimizer(0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
| 0.9174 | 0.993686 |
# Amazon SageMaker Batch Transform: Associate prediction results with their corresponding input records
_**Use SageMaker's XGBoost to train a binary classification model and for a list of tumors in batch file, predict if each is malignant**_
_**It also shows how to use the input output joining / filter feature in Batch transform in details**_
---
## Background
This purpose of this notebook is to train a model using SageMaker's XGBoost and UCI's breast cancer diagnostic data set to illustrate at how to run batch inferences and how to use the Batch Transform I/O join feature. UCI's breast cancer diagnostic data set is available at https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29. The data set is also available on Kaggle at https://www.kaggle.com/uciml/breast-cancer-wisconsin-data. The purpose here is to use this data set to build a predictve model of whether a breast mass image indicates benign or malignant tumor.
---
## Setup
Let's start by specifying:
* The SageMaker role arn used to give training and batch transform access to your data. The snippet below will use the same role used by your SageMaker notebook instance. Otherwise, specify the full ARN of a role with the SageMakerFullAccess policy attached.
* The S3 bucket that you want to use for training and storing model objects.
```
import os
import boto3
import sagemaker
role = sagemaker.get_execution_role()
sess = sagemaker.Session()
bucket=sess.default_bucket()
prefix = 'sagemaker/breast-cancer-prediction-xgboost' # place to upload training files within the bucket
```
---
## Data preparation
Data Source: https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data
https://www.kaggle.com/uciml/breast-cancer-wisconsin-data
Let's download the data and save it in the local folder with the name data.csv and take a look at it.
```
import pandas as pd
import numpy as np
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data', header = None)
# specify columns extracted from wbdc.names
data.columns = ["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean",
"compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean",
"radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se",
"concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst",
"perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst",
"concave points_worst","symmetry_worst","fractal_dimension_worst"]
# save the data
data.to_csv("data.csv", sep=',', index=False)
data.sample(8)
```
#### Key observations:
* The data has 569 observations and 32 columns.
* The first field is the 'id' attribute that we will want to drop before batch inference and add to the final inference output next to the probability of malignancy.
* Second field, 'diagnosis', is an indicator of the actual diagnosis ('M' = Malignant; 'B' = Benign).
* There are 30 other numeric features that we will use for training and inferencing.
Let's replace the M/B diagnosis with a 1/0 boolean value.
```
data['diagnosis']=data['diagnosis'].apply(lambda x: ((x =="M"))+0)
data.sample(8)
```
Let's split the data as follows: 80% for training, 10% for validation and let's set 10% aside for our batch inference job. In addition, let's drop the 'id' field on the training set and validation set as 'id' is not a training feature. For our batch set however, we keep the 'id' feature. We'll want to filter it out prior to running our inferences so that the input data features match the ones of training set and then ultimately, we'll want to join it with inference result. We are however dropping the diagnosis attribute for the batch set since this is what we'll try to predict.
```
#data split in three sets, training, validation and batch inference
rand_split = np.random.rand(len(data))
train_list = rand_split < 0.8
val_list = (rand_split >= 0.8) & (rand_split < 0.9)
batch_list = rand_split >= 0.9
data_train = data[train_list].drop(['id'],axis=1)
data_val = data[val_list].drop(['id'],axis=1)
data_batch = data[batch_list].drop(['diagnosis'],axis=1)
data_batch_noID = data_batch.drop(['id'],axis=1)
```
Let's upload those data sets in S3
```
train_file = 'train_data.csv'
data_train.to_csv(train_file,index=False,header=False)
sess.upload_data(train_file, key_prefix='{}/train'.format(prefix))
validation_file = 'validation_data.csv'
data_val.to_csv(validation_file,index=False,header=False)
sess.upload_data(validation_file, key_prefix='{}/validation'.format(prefix))
batch_file = 'batch_data.csv'
data_batch.to_csv(batch_file,index=False,header=False)
sess.upload_data(batch_file, key_prefix='{}/batch'.format(prefix))
batch_file_noID = 'batch_data_noID.csv'
data_batch_noID.to_csv(batch_file_noID,index=False,header=False)
sess.upload_data(batch_file_noID, key_prefix='{}/batch'.format(prefix))
```
---
## Training job and model creation
The below cell uses the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) to kick off the training job using both our training set and validation set. Not that the objective is set to 'binary:logistic' which trains a model to output a probability between 0 and 1 (here the probability of a tumor being malignant).
```
%%time
from time import gmtime, strftime
from sagemaker.amazon.amazon_estimator import get_image_uri
job_name = 'xgb-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
output_location = 's3://{}/{}/output/{}'.format(bucket, prefix, job_name)
image = sagemaker.image_uris.retrieve('xgboost', boto3.Session().region_name, '1')
sm_estimator = sagemaker.estimator.Estimator(image,
role,
instance_count=1,
instance_type='ml.m5.4xlarge',
volume_size=50,
input_mode='File',
output_path=output_location,
sagemaker_session=sess)
sm_estimator.set_hyperparameters(objective="binary:logistic",
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
num_round=100)
train_data = sagemaker.inputs.TrainingInput('s3://{}/{}/train'.format(bucket, prefix), distribution='FullyReplicated',
content_type='text/csv', s3_data_type='S3Prefix')
validation_data = sagemaker.inputs.TrainingInput('s3://{}/{}/validation'.format(bucket, prefix), distribution='FullyReplicated',
content_type='text/csv', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
# Start training by calling the fit method in the estimator
sm_estimator.fit(inputs=data_channels, logs=True)
```
---
## Batch Transform
In SageMaker Batch Transform, we introduced 3 new attributes - __input_filter__, __join_source__ and __output_filter__. In the below cell, we use the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) to kick-off several Batch Transform jobs using different configurations of these 3 new attributes. Please refer to [this page](https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html) to learn more about how to use them.
#### 1. Create a transform job with the default configurations
Let's first skip these 3 new attributes and inspect the inference results. We'll use it as a baseline to compare to the results with data processing.
```
%%time
sm_transformer = sm_estimator.transformer(1, 'ml.m4.xlarge')
# start a transform job
input_location = 's3://{}/{}/batch/{}'.format(bucket, prefix, batch_file_noID) # use input data without ID column
sm_transformer.transform(input_location, content_type='text/csv', split_type='Line')
sm_transformer.wait()
```
Let's inspect the output of the Batch Transform job in S3. It should show the list probabilities of tumors being malignant.
```
import json
import io
from urllib.parse import urlparse
def get_csv_output_from_s3(s3uri, file_name):
parsed_url = urlparse(s3uri)
bucket_name = parsed_url.netloc
prefix = parsed_url.path[1:]
s3 = boto3.resource('s3')
obj = s3.Object(bucket_name, '{}/{}'.format(prefix, file_name))
return obj.get()["Body"].read().decode('utf-8')
output = get_csv_output_from_s3(sm_transformer.output_path, '{}.out'.format(batch_file_noID))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
```
#### 2. Join the input and the prediction results
Now, let's associate the prediction results with their corresponding input records. We can also use the __input_filter__ to exclude the ID column easily and there's no need to have a separate file in S3.
* Set __input_filter__ to "$[1:]": indicates that we are excluding column 0 (the 'ID') before processing the inferences and keeping everything from column 1 to the last column (all the features or predictors)
* Set __join_source__ to "Input": indicates our desire to join the input data with the inference results
* Leave __output_filter__ to default ('$'), indicating that the joined input and inference results be will saved as output.
```
# content_type / accept and split_type / assemble_with are required to use IO joining feature
sm_transformer.assemble_with = 'Line'
sm_transformer.accept = 'text/csv'
# start a transform job
input_location = 's3://{}/{}/batch/{}'.format(bucket, prefix, batch_file) # use input data with ID column cause InputFilter will filter it out
sm_transformer.transform(input_location, split_type='Line', content_type='text/csv', input_filter='$[1:]', join_source='Input')
sm_transformer.wait()
```
Let's inspect the output of the Batch Transform job in S3. It should show the list of tumors identified by their original feature columns and their corresponding probabilities of being malignant.
```
output = get_csv_output_from_s3(sm_transformer.output_path, '{}.out'.format(batch_file))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
```
#### 3. Update the output filter to keep only ID and prediction results
Let's change __output_filter__ to "$[0,-1]", indicating that when presenting the output, we only want to keep column 0 (the 'ID') and the last column (the inference result i.e. the probability of a given tumor to be malignant)
```
# start another transform job
sm_transformer.transform(input_location, split_type='Line', content_type='text/csv', input_filter='$[1:]', join_source='Input', output_filter='$[0,-1]')
sm_transformer.wait()
```
Now, let's inspect the output of the Batch Transform job in S3 again. It should show 2 columns: the ID and their corresponding probabilities of being malignant.
```
output = get_csv_output_from_s3(sm_transformer.output_path, '{}.out'.format(batch_file))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
```
In summary, we can use newly introduced 3 attributes - __input_filter__, __join_source__, __output_filter__ to
1. Filter / select useful features from the input dataset. e.g. exclude ID columns.
2. Associate the prediction results with their corresponding input records.
3. Filter the original or joined results before saving to S3. e.g. keep ID and probability columns only.
|
github_jupyter
|
import os
import boto3
import sagemaker
role = sagemaker.get_execution_role()
sess = sagemaker.Session()
bucket=sess.default_bucket()
prefix = 'sagemaker/breast-cancer-prediction-xgboost' # place to upload training files within the bucket
import pandas as pd
import numpy as np
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data', header = None)
# specify columns extracted from wbdc.names
data.columns = ["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean",
"compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean",
"radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se",
"concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst",
"perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst",
"concave points_worst","symmetry_worst","fractal_dimension_worst"]
# save the data
data.to_csv("data.csv", sep=',', index=False)
data.sample(8)
data['diagnosis']=data['diagnosis'].apply(lambda x: ((x =="M"))+0)
data.sample(8)
#data split in three sets, training, validation and batch inference
rand_split = np.random.rand(len(data))
train_list = rand_split < 0.8
val_list = (rand_split >= 0.8) & (rand_split < 0.9)
batch_list = rand_split >= 0.9
data_train = data[train_list].drop(['id'],axis=1)
data_val = data[val_list].drop(['id'],axis=1)
data_batch = data[batch_list].drop(['diagnosis'],axis=1)
data_batch_noID = data_batch.drop(['id'],axis=1)
train_file = 'train_data.csv'
data_train.to_csv(train_file,index=False,header=False)
sess.upload_data(train_file, key_prefix='{}/train'.format(prefix))
validation_file = 'validation_data.csv'
data_val.to_csv(validation_file,index=False,header=False)
sess.upload_data(validation_file, key_prefix='{}/validation'.format(prefix))
batch_file = 'batch_data.csv'
data_batch.to_csv(batch_file,index=False,header=False)
sess.upload_data(batch_file, key_prefix='{}/batch'.format(prefix))
batch_file_noID = 'batch_data_noID.csv'
data_batch_noID.to_csv(batch_file_noID,index=False,header=False)
sess.upload_data(batch_file_noID, key_prefix='{}/batch'.format(prefix))
%%time
from time import gmtime, strftime
from sagemaker.amazon.amazon_estimator import get_image_uri
job_name = 'xgb-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
output_location = 's3://{}/{}/output/{}'.format(bucket, prefix, job_name)
image = sagemaker.image_uris.retrieve('xgboost', boto3.Session().region_name, '1')
sm_estimator = sagemaker.estimator.Estimator(image,
role,
instance_count=1,
instance_type='ml.m5.4xlarge',
volume_size=50,
input_mode='File',
output_path=output_location,
sagemaker_session=sess)
sm_estimator.set_hyperparameters(objective="binary:logistic",
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
num_round=100)
train_data = sagemaker.inputs.TrainingInput('s3://{}/{}/train'.format(bucket, prefix), distribution='FullyReplicated',
content_type='text/csv', s3_data_type='S3Prefix')
validation_data = sagemaker.inputs.TrainingInput('s3://{}/{}/validation'.format(bucket, prefix), distribution='FullyReplicated',
content_type='text/csv', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
# Start training by calling the fit method in the estimator
sm_estimator.fit(inputs=data_channels, logs=True)
%%time
sm_transformer = sm_estimator.transformer(1, 'ml.m4.xlarge')
# start a transform job
input_location = 's3://{}/{}/batch/{}'.format(bucket, prefix, batch_file_noID) # use input data without ID column
sm_transformer.transform(input_location, content_type='text/csv', split_type='Line')
sm_transformer.wait()
import json
import io
from urllib.parse import urlparse
def get_csv_output_from_s3(s3uri, file_name):
parsed_url = urlparse(s3uri)
bucket_name = parsed_url.netloc
prefix = parsed_url.path[1:]
s3 = boto3.resource('s3')
obj = s3.Object(bucket_name, '{}/{}'.format(prefix, file_name))
return obj.get()["Body"].read().decode('utf-8')
output = get_csv_output_from_s3(sm_transformer.output_path, '{}.out'.format(batch_file_noID))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
# content_type / accept and split_type / assemble_with are required to use IO joining feature
sm_transformer.assemble_with = 'Line'
sm_transformer.accept = 'text/csv'
# start a transform job
input_location = 's3://{}/{}/batch/{}'.format(bucket, prefix, batch_file) # use input data with ID column cause InputFilter will filter it out
sm_transformer.transform(input_location, split_type='Line', content_type='text/csv', input_filter='$[1:]', join_source='Input')
sm_transformer.wait()
output = get_csv_output_from_s3(sm_transformer.output_path, '{}.out'.format(batch_file))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
# start another transform job
sm_transformer.transform(input_location, split_type='Line', content_type='text/csv', input_filter='$[1:]', join_source='Input', output_filter='$[0,-1]')
sm_transformer.wait()
output = get_csv_output_from_s3(sm_transformer.output_path, '{}.out'.format(batch_file))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
| 0.435661 | 0.977862 |
# $\chi$ parameters
$\chi$ parameters introduced by [Ackland and Jones](http://pyscal.com/en/latest/methods/angularmethods/chiparams.html) measures the angles generated by pairs of neighbor atom around the host atom, and assigns it to a histogram to calculate a local structure. In this example, we will create different crystal structures and see how the $\chi$ parameters change with respect to the local coordination.
```
import pyscal as pc
import pyscal.crystal_structures as pcs
import matplotlib.pyplot as plt
import numpy as np
```
The :mod:`~pyscal.crystal_structures` module is used to create different perfect crystal structures. The created atoms and simulation box is then assigned to a :class:`~pyscal.core.System` object. For this example, fcc, bcc, hcp and diamond structures are created.
```
fcc_atoms, fcc_box = pcs.make_crystal('fcc', lattice_constant=4, repetitions=[4,4,4])
fcc = pc.System()
fcc.box = fcc_box
fcc.atoms = fcc_atoms
bcc_atoms, bcc_box = pcs.make_crystal('bcc', lattice_constant=4, repetitions=[4,4,4])
bcc = pc.System()
bcc.box = bcc_box
bcc.atoms = bcc_atoms
hcp_atoms, hcp_box = pcs.make_crystal('hcp', lattice_constant=4, repetitions=[4,4,4])
hcp = pc.System()
hcp.box = hcp_box
hcp.atoms = hcp_atoms
dia_atoms, dia_box = pcs.make_crystal('diamond', lattice_constant=4, repetitions=[4,4,4])
dia = pc.System()
dia.box = dia_box
dia.atoms = dia_atoms
```
Before calculating $\chi$ parameters, the [neighbors for each atom](http://pyscal.com/en/latest/methods/nearestneighbormethods/nearestneighbormethods.html) need to be found.
```
fcc.find_neighbors(method='cutoff', cutoff='adaptive')
bcc.find_neighbors(method='cutoff', cutoff='adaptive')
hcp.find_neighbors(method='cutoff', cutoff='adaptive')
dia.find_neighbors(method='cutoff', cutoff='adaptive')
```
Now, $\chi$ parameters can be calculated
```
fcc.calculate_chiparams()
bcc.calculate_chiparams()
hcp.calculate_chiparams()
dia.calculate_chiparams()
```
The calculated parameters for each atom can be accessed using the :attr:`~pyscal.catom.Atom.chiparams` attribute.
```
fcc_atoms = fcc.atoms
bcc_atoms = bcc.atoms
hcp_atoms = hcp.atoms
dia_atoms = dia.atoms
fcc_atoms[10].chiparams
```
The output is an array of length 9 which shows the number of neighbor angles found within specific bins as explained [here](http://pyscal.com/en/latest/methods/angularmethods/chiparams.html). The output for one atom from each structure is shown below.
```
plt.bar(np.array(range(9))-0.3, fcc_atoms[10].chiparams, width=0.2, label="fcc")
plt.bar(np.array(range(9))-0.1, bcc_atoms[10].chiparams, width=0.2, label="bcc")
plt.bar(np.array(range(9))+0.1, hcp_atoms[10].chiparams, width=0.2, label="hcp")
plt.bar(np.array(range(9))+0.3, dia_atoms[10].chiparams, width=0.2, label="diamond")
plt.xlabel("$\chi$")
plt.ylabel("Number of angles")
plt.legend()
```
The atoms exhibit a distinct fingerprint for each structure. Structural identification can be made up comparing the ratio of various $\chi$ parameters as described in the [original publication](https://journals.aps.org/prb/abstract/10.1103/PhysRevB.73.054104).
|
github_jupyter
|
import pyscal as pc
import pyscal.crystal_structures as pcs
import matplotlib.pyplot as plt
import numpy as np
fcc_atoms, fcc_box = pcs.make_crystal('fcc', lattice_constant=4, repetitions=[4,4,4])
fcc = pc.System()
fcc.box = fcc_box
fcc.atoms = fcc_atoms
bcc_atoms, bcc_box = pcs.make_crystal('bcc', lattice_constant=4, repetitions=[4,4,4])
bcc = pc.System()
bcc.box = bcc_box
bcc.atoms = bcc_atoms
hcp_atoms, hcp_box = pcs.make_crystal('hcp', lattice_constant=4, repetitions=[4,4,4])
hcp = pc.System()
hcp.box = hcp_box
hcp.atoms = hcp_atoms
dia_atoms, dia_box = pcs.make_crystal('diamond', lattice_constant=4, repetitions=[4,4,4])
dia = pc.System()
dia.box = dia_box
dia.atoms = dia_atoms
fcc.find_neighbors(method='cutoff', cutoff='adaptive')
bcc.find_neighbors(method='cutoff', cutoff='adaptive')
hcp.find_neighbors(method='cutoff', cutoff='adaptive')
dia.find_neighbors(method='cutoff', cutoff='adaptive')
fcc.calculate_chiparams()
bcc.calculate_chiparams()
hcp.calculate_chiparams()
dia.calculate_chiparams()
fcc_atoms = fcc.atoms
bcc_atoms = bcc.atoms
hcp_atoms = hcp.atoms
dia_atoms = dia.atoms
fcc_atoms[10].chiparams
plt.bar(np.array(range(9))-0.3, fcc_atoms[10].chiparams, width=0.2, label="fcc")
plt.bar(np.array(range(9))-0.1, bcc_atoms[10].chiparams, width=0.2, label="bcc")
plt.bar(np.array(range(9))+0.1, hcp_atoms[10].chiparams, width=0.2, label="hcp")
plt.bar(np.array(range(9))+0.3, dia_atoms[10].chiparams, width=0.2, label="diamond")
plt.xlabel("$\chi$")
plt.ylabel("Number of angles")
plt.legend()
| 0.498047 | 0.986942 |
# Python Algorithmic Trading Cookbook
## Chapter 6: Placing Regular Orders on the Exchange
This Jupyter Notebook is created using Python version 3.8.2
----
### Requirements
You can install the requirements for this Jupyter Notebook by executing the below cell
```
!pip install pyalgotrading
```
----
### Master Recipe
The following code will help you set up the broker connection with Zerodha, which will be used by all the recipes in this chapter. Please make sure you have followed these steps before trying out any recipe.
```
from pyalgotrading.broker.broker_connection_zerodha import BrokerConnectionZerodha
from pyalgotrading.constants import *
# Get the api_key and api_secret from broker. These are unique to you and will be used by the broker to identify your demat account.
api_key = "<your-api-key>"
api_secret = "<your-api-secret>"
broker_connection = BrokerConnectionZerodha(api_key, api_secret)
# Get the request token from the above URL
request_token = "<your-request-token>"
broker_connection.set_access_token(request_token)
```
----
### Recipe 1: Placing a Regular Market Order
```
instrument = broker_connection.get_instrument('NSE', 'HDFCBANK')
# Place a Buy, Regular, Intraday, Market order
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order1_id
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Market order
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order2_id
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Market order
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order3_id
broker_connection.get_order_status(order3_id)
# Place a Sell, Regular, Delivery, Market order
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order4_id
broker_connection.get_order_status(order4_id)
```
### Recipe 2: Placing a Regular Limit Order
```
instrument = broker_connection.get_instrument('NSE', 'ICICIBANK')
# Place a Buy, Regular, Intraday, Limit order
ltp = broker_connection.get_ltp(instrument)
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp-1)
order1_id
broker_connection.get_order_status(order1_id)
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Limit order
ltp = broker_connection.get_ltp(instrument)
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp+1)
order2_id
broker_connection.get_order_status(order2_id)
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Limit order
ltp = broker_connection.get_ltp(instrument)
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp-1)
order3_id
broker_connection.get_order_status(order3_id)
broker_connection.get_order_status(order3_id)
# Place a Buy, Regular, Delivery, Limit order
ltp = broker_connection.get_ltp(instrument)
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp+1)
order4_id
broker_connection.get_order_status(order4_id)
broker_connection.get_order_status(order2_id)
```
### Recipe 3: Placing a Regular Stoploss-Limit Order
```
instrument = broker_connection.get_instrument('NSE', 'AXISBANK')
# Place a Buy, Regular, Intraday, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp+1,
trigger_price=ltp+1)
order1_id
broker_connection.get_order_status(order1_id)
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp-1,
trigger_price=ltp-1)
order2_id
broker_connection.get_order_status(order2_id)
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp+1,
trigger_price=ltp+1)
order3_id
broker_connection.get_order_status(order3_id)
broker_connection.get_order_status(order3_id)
# Place a Sell, Regular, Intraday, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp-1,
trigger_price=ltp-1)
order4_id
broker_connection.get_order_status(order4_id)
broker_connection.get_order_status(order4_id)
```
### Recipe 4: Placing a Regular Stoploss-Market Order
```
instrument = broker_connection.get_instrument('NSE', 'KOTAKBANK')
# Place a Buy, Regular, Intraday, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp+1)
order1_id
broker_connection.get_order_status(order1_id)
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp-1)
order2_id
broker_connection.get_order_status(order2_id)
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp+1)
order3_id
broker_connection.get_order_status(order3_id)
broker_connection.get_order_status(order3_id)
# Place a Sell, Regular, Delivery, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp-1)
order4_id
broker_connection.get_order_status(order4_id)
broker_connection.get_order_status(order4_id)
```
|
github_jupyter
|
!pip install pyalgotrading
from pyalgotrading.broker.broker_connection_zerodha import BrokerConnectionZerodha
from pyalgotrading.constants import *
# Get the api_key and api_secret from broker. These are unique to you and will be used by the broker to identify your demat account.
api_key = "<your-api-key>"
api_secret = "<your-api-secret>"
broker_connection = BrokerConnectionZerodha(api_key, api_secret)
# Get the request token from the above URL
request_token = "<your-request-token>"
broker_connection.set_access_token(request_token)
instrument = broker_connection.get_instrument('NSE', 'HDFCBANK')
# Place a Buy, Regular, Intraday, Market order
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order1_id
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Market order
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order2_id
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Market order
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order3_id
broker_connection.get_order_status(order3_id)
# Place a Sell, Regular, Delivery, Market order
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.MARKET,
quantity=1)
order4_id
broker_connection.get_order_status(order4_id)
instrument = broker_connection.get_instrument('NSE', 'ICICIBANK')
# Place a Buy, Regular, Intraday, Limit order
ltp = broker_connection.get_ltp(instrument)
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp-1)
order1_id
broker_connection.get_order_status(order1_id)
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Limit order
ltp = broker_connection.get_ltp(instrument)
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp+1)
order2_id
broker_connection.get_order_status(order2_id)
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Limit order
ltp = broker_connection.get_ltp(instrument)
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp-1)
order3_id
broker_connection.get_order_status(order3_id)
broker_connection.get_order_status(order3_id)
# Place a Buy, Regular, Delivery, Limit order
ltp = broker_connection.get_ltp(instrument)
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.LIMIT,
quantity=1,
price=ltp+1)
order4_id
broker_connection.get_order_status(order4_id)
broker_connection.get_order_status(order2_id)
instrument = broker_connection.get_instrument('NSE', 'AXISBANK')
# Place a Buy, Regular, Intraday, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp+1,
trigger_price=ltp+1)
order1_id
broker_connection.get_order_status(order1_id)
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp-1,
trigger_price=ltp-1)
order2_id
broker_connection.get_order_status(order2_id)
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp+1,
trigger_price=ltp+1)
order3_id
broker_connection.get_order_status(order3_id)
broker_connection.get_order_status(order3_id)
# Place a Sell, Regular, Intraday, Stoploss Limit order
ltp = broker_connection.get_ltp(instrument)
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_LIMIT,
quantity=1,
price=ltp-1,
trigger_price=ltp-1)
order4_id
broker_connection.get_order_status(order4_id)
broker_connection.get_order_status(order4_id)
instrument = broker_connection.get_instrument('NSE', 'KOTAKBANK')
# Place a Buy, Regular, Intraday, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order1_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp+1)
order1_id
broker_connection.get_order_status(order1_id)
broker_connection.get_order_status(order1_id)
# Place a Sell, Regular, Intraday, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order2_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.INTRADAY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp-1)
order2_id
broker_connection.get_order_status(order2_id)
broker_connection.get_order_status(order2_id)
# Place a Buy, Regular, Delivery, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order3_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.BUY,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp+1)
order3_id
broker_connection.get_order_status(order3_id)
broker_connection.get_order_status(order3_id)
# Place a Sell, Regular, Delivery, Stoploss Market order
ltp = broker_connection.get_ltp(instrument)
order4_id = broker_connection.place_order(instrument=instrument,
order_transaction_type=BrokerOrderTransactionTypeConstants.SELL,
order_type=BrokerOrderTypeConstants.REGULAR,
order_code=BrokerOrderCodeConstants.DELIVERY,
order_variety=BrokerOrderVarietyConstants.STOPLOSS_MARKET,
quantity=1,
trigger_price=ltp-1)
order4_id
broker_connection.get_order_status(order4_id)
broker_connection.get_order_status(order4_id)
| 0.536313 | 0.759292 |
# VacationPy
----
#### Note
* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
```
### Store Part I results into DataFrame
* Load the csv exported in Part I to a DataFrame
```
csv_path = "../output_data/cities.csv"
cities_weather_df = pd.read_csv(csv_path)
cities_weather_df
```
### Humidity Heatmap
* Configure gmaps.
* Use the Lat and Lng as locations and Humidity as the weight.
* Add Heatmap layer to map.
```
gmaps.configure(api_key = g_key)
city_lat_lng = cities_weather_df[["Lat", "Lng"]]
humidity = cities_weather_df["Humidity"].astype(float)
figure_layout = {"width": "100%",
"height": "500px",
"padding": "1px"}
fig = gmaps.figure(layout = figure_layout, center = (30.0, 10.0), zoom_level = 2)
heat_layer = gmaps.heatmap_layer(city_lat_lng,
weights = humidity,
max_intensity = np.max(humidity),
dissipating = False,
point_radius = 3)
fig.add_layer(heat_layer)
fig
```
### Create new DataFrame fitting weather criteria
* Narrow down the cities to fit weather conditions.
* Drop any rows will null values.
```
ideal_locations_df = cities_weather_df.loc[(cities_weather_df["Max Temp"] >= 70) &
(cities_weather_df["Max Temp"] <= 80) &
(cities_weather_df["Wind Speed"] <= 10) &
(cities_weather_df["Cloudiness"] == 0)].dropna()
ideal_locations_df.head()
```
### Hotel Map
* Store into variable named `hotel_df`.
* Add a "Hotel Name" column to the DataFrame.
* Set parameters to search for hotels with 5000 meters.
* Hit the Google Places API for each city's coordinates.
* Store the first Hotel result into the DataFrame.
* Plot markers on top of the heatmap.
```
hotel_df = ideal_locations_df.loc[:, ["City", "Lat", "Lng", "Country"]]
hotel_df["Hotel Name"] = ""
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
params = {"keyword": "hotel",
"type": "lodging",
"key": g_key,
"radius": 5000,
"location": ""}
for index, row in hotel_df.iterrows():
params["location"] = str(row[1]) +","+ str(row[2])
response = requests.get(base_url, params).json()
hotel_results = response["results"]
try:
print(f"{row['City']}, {row['Country'].upper()} hotel: {hotel_results[0]['name']}")
hotel_df.loc[index, "Hotel Name"] = hotel_results[0]["name"]
# Print error statement if hotels cannot be found
except (KeyError, IndexError):
print(f"There is no hotel found for {row['City']} within 5000 meters.")
hotel_df
cities_count = hotel_df["City"].nunique()
print(f"There are %s cities that match the requirements" % cities_count)
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
fig.add_layer(markers)
# Display figure
fig
```
|
github_jupyter
|
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
csv_path = "../output_data/cities.csv"
cities_weather_df = pd.read_csv(csv_path)
cities_weather_df
gmaps.configure(api_key = g_key)
city_lat_lng = cities_weather_df[["Lat", "Lng"]]
humidity = cities_weather_df["Humidity"].astype(float)
figure_layout = {"width": "100%",
"height": "500px",
"padding": "1px"}
fig = gmaps.figure(layout = figure_layout, center = (30.0, 10.0), zoom_level = 2)
heat_layer = gmaps.heatmap_layer(city_lat_lng,
weights = humidity,
max_intensity = np.max(humidity),
dissipating = False,
point_radius = 3)
fig.add_layer(heat_layer)
fig
ideal_locations_df = cities_weather_df.loc[(cities_weather_df["Max Temp"] >= 70) &
(cities_weather_df["Max Temp"] <= 80) &
(cities_weather_df["Wind Speed"] <= 10) &
(cities_weather_df["Cloudiness"] == 0)].dropna()
ideal_locations_df.head()
hotel_df = ideal_locations_df.loc[:, ["City", "Lat", "Lng", "Country"]]
hotel_df["Hotel Name"] = ""
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
params = {"keyword": "hotel",
"type": "lodging",
"key": g_key,
"radius": 5000,
"location": ""}
for index, row in hotel_df.iterrows():
params["location"] = str(row[1]) +","+ str(row[2])
response = requests.get(base_url, params).json()
hotel_results = response["results"]
try:
print(f"{row['City']}, {row['Country'].upper()} hotel: {hotel_results[0]['name']}")
hotel_df.loc[index, "Hotel Name"] = hotel_results[0]["name"]
# Print error statement if hotels cannot be found
except (KeyError, IndexError):
print(f"There is no hotel found for {row['City']} within 5000 meters.")
hotel_df
cities_count = hotel_df["City"].nunique()
print(f"There are %s cities that match the requirements" % cities_count)
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
fig.add_layer(markers)
# Display figure
fig
| 0.383526 | 0.846387 |
<a href="https://colab.research.google.com/github/felix0097/CVAE_mnist/blob/master/cvae_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install tensorflow==2.1.0
!pip install tensorflow-probability==0.9.0
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple
from math import ceil
tf.config.list_physical_devices('GPU')
```
**Define Convolutional CVAE model**
```
class ConvCVAE(tf.keras.Model):
def __init__(self,
input_shape_img: Tuple[int, int, int],
input_shape_cond: int,
latent_dim: int):
super(ConvCVAE, self).__init__()
self.input_shape_img = input_shape_img
self.latent_dim = latent_dim
self.conv_enc = tf.keras.Sequential(
[tf.keras.layers.InputLayer(input_shape=input_shape_img),
tf.keras.layers.Conv2D(filters=64,
kernel_size=3,
activation='relu',
padding='same'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
padding='same'),
tf.keras.layers.Conv2D(filters=128,
kernel_size=3,
activation='relu',
padding='same'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
padding='same'),
tf.keras.layers.Flatten()
],
name='encoder')
self.enc = tf.keras.Sequential(
[tf.keras.layers.InputLayer(
input_shape=self.conv_enc.output_shape[1] + input_shape_cond
),
tf.keras.layers.Dense(self.conv_enc.output_shape[1] // 4,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(
l=1e-4)
),
tf.keras.layers.Dense(2*latent_dim,
kernel_regularizer=tf.keras.regularizers.l2(
l=1e-4)
)
]
)
self.dec = tf.keras.Sequential(
[tf.keras.layers.InputLayer(
input_shape=(latent_dim + input_shape_cond)
),
tf.keras.layers.Dense(units=self.conv_enc.output_shape[1],
activation=tf.nn.relu,
kernel_regularizer=tf.keras.regularizers.l2(
l=1e-4)
),
tf.keras.layers.Reshape(
target_shape=self.conv_enc.layers[-2].output_shape[1:]
),
tf.keras.layers.Conv2DTranspose(filters=128,
kernel_size=3,
activation='relu',
padding='same'
),
tf.keras.layers.UpSampling2D(size=(2, 2)),
tf.keras.layers.Conv2DTranspose(filters=64,
kernel_size=3,
activation='relu',
padding='same'
),
tf.keras.layers.UpSampling2D(size=(2, 2)),
tf.keras.layers.Conv2DTranspose(filters=input_shape_img[2],
kernel_size=3,
strides=(1, 1),
padding='same'
),
tf.keras.layers.Activation('sigmoid')
],
name='decoder')
def call(self, inputs, training=False):
img_input = inputs[0]
cond_input = inputs[1]
encoded_img = self.conv_enc(img_input)
enc_output = self.enc(tf.concat([encoded_img, cond_input], axis=1))
mean, log_scale = tf.split(enc_output, num_or_size_splits=2, axis=1)
scale= tf.math.exp(log_scale)
latent_dist = tfp.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=scale)
ref_dist = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros([self.latent_dim])
)
kl_divergence = tfp.distributions.kl_divergence(latent_dist, ref_dist)
self.add_loss(tf.math.reduce_mean(kl_divergence,
name='KL_divergence_loss'),
inputs=True
)
input_dec = tf.concat([latent_dist.sample(), cond_input], axis=1)
dec_img = self.dec(input_dec)
dec_img = tf.image.resize_with_crop_or_pad(dec_img,
self.input_shape_img[0],
self.input_shape_img[1])
return dec_img
```
**Prepare data set for fitting**
```
BATCH_SIZE = 1024
def preprocess_data(elem):
img = elem['image']
label = elem['label']
# convert input image to [0, 1]
img = tf.cast(img, dtype=tf.float32) / tf.cast(255., dtype=tf.float32)
# binarize image
img = tf.cast(img >= 0.25, dtype=tf.float32)
img_input = img - 0.5
# one hot encode label
label = tf.one_hot(tf.cast(label, dtype=tf.uint8), depth=10)
return ((img_input, label), img)
ds_train = tfds.load(name="mnist", split="train")
ds_val = tfds.load(name="mnist", split="test")
ds_train = ds_train.map(preprocess_data,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_val = ds_val.map(preprocess_data,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.shuffle(5000, reshuffle_each_iteration=True)\
.repeat()\
.batch(BATCH_SIZE)\
.prefetch(10)
ds_val = ds_val.shuffle(5000, reshuffle_each_iteration=True)\
.repeat()\
.batch(BATCH_SIZE)\
.prefetch(10)
```
**Fit model**
```
# remove logs data
# !kill 7030
# !rm -r logs
%load_ext tensorboard
%tensorboard --logdir=logs
conv_cvae = ConvCVAE(input_shape_img=(28, 28, 1),
input_shape_cond=10,
latent_dim=50)
conv_cvae.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss=tf.keras.losses.BinaryCrossentropy())
hist = conv_cvae.fit(ds_train,
validation_data=ds_val,
epochs=50,
steps_per_epoch=ceil(60000 / BATCH_SIZE),
validation_steps=ceil(10000 / BATCH_SIZE),
callbacks=[
tf.keras.callbacks.EarlyStopping(
patience=5,
restore_best_weights=True
),
tf.keras.callbacks.ReduceLROnPlateau(),
tf.keras.callbacks.TensorBoard()
]
)
```
**See how generated images look like**
```
NUM_IMGS = 30
LATENT_DIM = 50
noise = np.random.normal(size=(NUM_IMGS, LATENT_DIM))
cond_input = np.zeros(shape=(NUM_IMGS, 10))
x_idx = np.arange(0, NUM_IMGS)
y_idx = np.random.randint(low=0, high=10, size=(NUM_IMGS))
cond_input[x_idx, y_idx] = 1.
dec_input = np.concatenate([noise, cond_input], axis=1).astype(np.float32)
imgs = conv_cvae.dec(dec_input).numpy()
fig, axs = plt.subplots(nrows=NUM_IMGS//5, ncols=5, figsize=(9, 9))
for img_idx in range(imgs.shape[0]):
img = np.sum(imgs[img_idx, :, :, :], axis=-1)
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].imshow(img)
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].axis('off')
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].set_title(
f'Should be: {int(np.argwhere(cond_input[img_idx, :] == 1))}'
)
plt.show()
noise = np.random.normal(size=(NUM_IMGS, LATENT_DIM), scale=1)
cond_input = np.zeros(shape=(NUM_IMGS, 10))
cond_input[:, 3] = 1.
dec_input = np.concatenate([noise, cond_input], axis=1).astype(np.float32)
imgs = conv_cvae.dec(dec_input).numpy()
fig, axs = plt.subplots(nrows=NUM_IMGS//5, ncols=5, figsize=(9, 9))
for img_idx in range(imgs.shape[0]):
img = np.sum(imgs[img_idx, :, :, :], axis=-1) > 0.5
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].imshow(img)
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].axis('off')
plt.show()
```
|
github_jupyter
|
!pip install tensorflow==2.1.0
!pip install tensorflow-probability==0.9.0
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple
from math import ceil
tf.config.list_physical_devices('GPU')
class ConvCVAE(tf.keras.Model):
def __init__(self,
input_shape_img: Tuple[int, int, int],
input_shape_cond: int,
latent_dim: int):
super(ConvCVAE, self).__init__()
self.input_shape_img = input_shape_img
self.latent_dim = latent_dim
self.conv_enc = tf.keras.Sequential(
[tf.keras.layers.InputLayer(input_shape=input_shape_img),
tf.keras.layers.Conv2D(filters=64,
kernel_size=3,
activation='relu',
padding='same'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
padding='same'),
tf.keras.layers.Conv2D(filters=128,
kernel_size=3,
activation='relu',
padding='same'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
padding='same'),
tf.keras.layers.Flatten()
],
name='encoder')
self.enc = tf.keras.Sequential(
[tf.keras.layers.InputLayer(
input_shape=self.conv_enc.output_shape[1] + input_shape_cond
),
tf.keras.layers.Dense(self.conv_enc.output_shape[1] // 4,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(
l=1e-4)
),
tf.keras.layers.Dense(2*latent_dim,
kernel_regularizer=tf.keras.regularizers.l2(
l=1e-4)
)
]
)
self.dec = tf.keras.Sequential(
[tf.keras.layers.InputLayer(
input_shape=(latent_dim + input_shape_cond)
),
tf.keras.layers.Dense(units=self.conv_enc.output_shape[1],
activation=tf.nn.relu,
kernel_regularizer=tf.keras.regularizers.l2(
l=1e-4)
),
tf.keras.layers.Reshape(
target_shape=self.conv_enc.layers[-2].output_shape[1:]
),
tf.keras.layers.Conv2DTranspose(filters=128,
kernel_size=3,
activation='relu',
padding='same'
),
tf.keras.layers.UpSampling2D(size=(2, 2)),
tf.keras.layers.Conv2DTranspose(filters=64,
kernel_size=3,
activation='relu',
padding='same'
),
tf.keras.layers.UpSampling2D(size=(2, 2)),
tf.keras.layers.Conv2DTranspose(filters=input_shape_img[2],
kernel_size=3,
strides=(1, 1),
padding='same'
),
tf.keras.layers.Activation('sigmoid')
],
name='decoder')
def call(self, inputs, training=False):
img_input = inputs[0]
cond_input = inputs[1]
encoded_img = self.conv_enc(img_input)
enc_output = self.enc(tf.concat([encoded_img, cond_input], axis=1))
mean, log_scale = tf.split(enc_output, num_or_size_splits=2, axis=1)
scale= tf.math.exp(log_scale)
latent_dist = tfp.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=scale)
ref_dist = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros([self.latent_dim])
)
kl_divergence = tfp.distributions.kl_divergence(latent_dist, ref_dist)
self.add_loss(tf.math.reduce_mean(kl_divergence,
name='KL_divergence_loss'),
inputs=True
)
input_dec = tf.concat([latent_dist.sample(), cond_input], axis=1)
dec_img = self.dec(input_dec)
dec_img = tf.image.resize_with_crop_or_pad(dec_img,
self.input_shape_img[0],
self.input_shape_img[1])
return dec_img
BATCH_SIZE = 1024
def preprocess_data(elem):
img = elem['image']
label = elem['label']
# convert input image to [0, 1]
img = tf.cast(img, dtype=tf.float32) / tf.cast(255., dtype=tf.float32)
# binarize image
img = tf.cast(img >= 0.25, dtype=tf.float32)
img_input = img - 0.5
# one hot encode label
label = tf.one_hot(tf.cast(label, dtype=tf.uint8), depth=10)
return ((img_input, label), img)
ds_train = tfds.load(name="mnist", split="train")
ds_val = tfds.load(name="mnist", split="test")
ds_train = ds_train.map(preprocess_data,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_val = ds_val.map(preprocess_data,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.shuffle(5000, reshuffle_each_iteration=True)\
.repeat()\
.batch(BATCH_SIZE)\
.prefetch(10)
ds_val = ds_val.shuffle(5000, reshuffle_each_iteration=True)\
.repeat()\
.batch(BATCH_SIZE)\
.prefetch(10)
# remove logs data
# !kill 7030
# !rm -r logs
%load_ext tensorboard
%tensorboard --logdir=logs
conv_cvae = ConvCVAE(input_shape_img=(28, 28, 1),
input_shape_cond=10,
latent_dim=50)
conv_cvae.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss=tf.keras.losses.BinaryCrossentropy())
hist = conv_cvae.fit(ds_train,
validation_data=ds_val,
epochs=50,
steps_per_epoch=ceil(60000 / BATCH_SIZE),
validation_steps=ceil(10000 / BATCH_SIZE),
callbacks=[
tf.keras.callbacks.EarlyStopping(
patience=5,
restore_best_weights=True
),
tf.keras.callbacks.ReduceLROnPlateau(),
tf.keras.callbacks.TensorBoard()
]
)
NUM_IMGS = 30
LATENT_DIM = 50
noise = np.random.normal(size=(NUM_IMGS, LATENT_DIM))
cond_input = np.zeros(shape=(NUM_IMGS, 10))
x_idx = np.arange(0, NUM_IMGS)
y_idx = np.random.randint(low=0, high=10, size=(NUM_IMGS))
cond_input[x_idx, y_idx] = 1.
dec_input = np.concatenate([noise, cond_input], axis=1).astype(np.float32)
imgs = conv_cvae.dec(dec_input).numpy()
fig, axs = plt.subplots(nrows=NUM_IMGS//5, ncols=5, figsize=(9, 9))
for img_idx in range(imgs.shape[0]):
img = np.sum(imgs[img_idx, :, :, :], axis=-1)
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].imshow(img)
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].axis('off')
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].set_title(
f'Should be: {int(np.argwhere(cond_input[img_idx, :] == 1))}'
)
plt.show()
noise = np.random.normal(size=(NUM_IMGS, LATENT_DIM), scale=1)
cond_input = np.zeros(shape=(NUM_IMGS, 10))
cond_input[:, 3] = 1.
dec_input = np.concatenate([noise, cond_input], axis=1).astype(np.float32)
imgs = conv_cvae.dec(dec_input).numpy()
fig, axs = plt.subplots(nrows=NUM_IMGS//5, ncols=5, figsize=(9, 9))
for img_idx in range(imgs.shape[0]):
img = np.sum(imgs[img_idx, :, :, :], axis=-1) > 0.5
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].imshow(img)
axs[np.unravel_index(img_idx, (NUM_IMGS//5, 5))].axis('off')
plt.show()
| 0.89522 | 0.952926 |
# Data Wrangler Feature Store Notebook
Use this notebook to create a feature group and add features to an offline or online
feature store using a Data Wrangler .flow file.
A single *feature* corresponds to a column in your dataset. A *feature group* is a predefined
schema for a collection of features - each feature in the feature group has a specified data
type and name. A single *record* in a feature group corresponds to a row in your datataframe.
A *feature store* is a collection of feature groups.
This notebook uses Amazon SageMaker Feature Store (Feature Store) to create a feature group
and ingest data into feature store. To learn more about SageMaker Feature Store, see
[Amazon Feature Store Documentation](http://docs.aws.amazon.com/sagemaker/latest/dg/feature-store.html).
To create a feature group, you will create the following resources:
* A feature definition using a schema, record identifier, and event-time feature name.
* An online or offline store configuration.
You will use a processing job to process your data at scale and ingest the data into this feature group.
First, use the following cell to install dependencies.
```
# SageMaker Python SDK version 2.x is required
import sagemaker
import subprocess
import sys
original_version = sagemaker.__version__
if sagemaker.__version__ != "2.17.0":
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "sagemaker==2.17.0"]
)
import importlib
importlib.reload(sagemaker)
import os
import uuid
import json
import time
import boto3
import sagemaker
```
## Parameters
The following lists configurable parameters that are used throughout this notebook.
```
# S3 bucket for saving processing job outputs
# Feel free to specify a different bucket here if you wish.
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = "data_wrangler_flows"
flow_id = f"{time.strftime('%d-%H-%M-%S', time.gmtime())}-{str(uuid.uuid4())[:8]}"
flow_name = f"flow-{flow_id}"
flow_uri = f"s3://{bucket}/{prefix}/{flow_name}.flow"
flow_file_name = "workshop_antje.flow"
iam_role = sagemaker.get_execution_role()
container_uri = "663277389841.dkr.ecr.us-east-1.amazonaws.com/sagemaker-data-wrangler-container:1.0.2"
# Processing Job Resources Configurations
processing_job_name = f"data-wrangler-feature-store-processing-{flow_id}"
processing_dir = "/opt/ml/processing"
# URL to use for sagemaker client.
# If this is None, boto will automatically construct the appropriate URL to use
# when communicating with sagemaker.
sagemaker_endpoint_url = None
```
## Push Flow to S3
Use the following cell to upload the Data Wrangler .flow file to Amazon S3 so that
it can be used as an input to the processing job.
```
# Load .flow file
with open(flow_file_name) as f:
flow = json.load(f)
# Upload to S3
s3_client = boto3.client("s3")
s3_client.upload_file(flow_file_name, bucket, f"{prefix}/{flow_name}.flow")
print(f"Data Wrangler Flow notebook uploaded to {flow_uri}")
```
## Create Feature Group
```
feature_group_name = f'FG-{flow_name}'
print(f"Feature Group Name: {feature_group_name}")
```
The following cell maps types between Data Wrangler supported types and Feature Store
supported types (`String`, `Fractional`, and `Integral`). The default type is set to `String`.
This means that, if a column in your dataset is not a `float` or `long` type,
it will default to `String` in your Feature Store.
```
datawrangler_FG_type_mapping = {
'float': 'Fractional',
'long': 'Integral'
}
# Some schema types in Data Wrangler are not supported by Feature Store.
# Feature store supports String, Integral, and Fractional types.
# The following will create a default_FG_type set to String for these types.
default_FG_type = "String"
```
The following is a list of the column names and data types of the final dataset that will be produced
when your data flow is used to process your input dataset.
```
column_schema = [
{
"name": "marketplace",
"type": "string"
},
{
"name": "customer_id",
"type": "long"
},
{
"name": "review_id",
"type": "string"
},
{
"name": "product_id",
"type": "string"
},
{
"name": "product_parent",
"type": "long"
},
{
"name": "product_title",
"type": "string"
},
{
"name": "product_category",
"type": "string"
},
{
"name": "vine",
"type": "string"
},
{
"name": "verified_purchase",
"type": "string"
},
{
"name": "review_headline",
"type": "string"
},
{
"name": "review_body",
"type": "string"
},
{
"name": "review_date",
"type": "date"
},
{
"name": "star_rating",
"type": "long"
},
{
"name": "helpful_votes",
"type": "long"
},
{
"name": "total_votes",
"type": "long"
},
{
"name": "star_rating_scaled",
"type": "float"
},
{
"name": "star_rating_scaled_floored",
"type": "long"
},
{
"name": "review_date_iso",
"type": "string"
},
{
"name": "review_body_stripped",
"type": "string"
}
]
```
Select Record identifier and Event time feature name. These are required parameters for feature group
creation.
* **Record identifier name** is the name of the feature whose value uniquely identi๏ฌes a Record
de๏ฌned in the feature group's feature definitions.
* **Event time feature name** is the name of the EventTime of a Record in FeatureGroup.
A EventTime is point in time when a new event occurs that corresponds to the creation or update of a
Record in FeatureGroup. All Records in the FeatureGroup must have a corresponding EventTime.
```
record_identifier_name = 'review_id'
if record_identifier_name is None:
raise RuntimeError("Select a column name as the feature group identifier.")
event_time_feature_name = 'review_date_iso'
if event_time_feature_name is None:
raise RuntimeError("Select a column name as the event time feature name.")
# Below you map the schema detected from Data Wrangler to Feature Group Types.
feature_definitions = [
{
"FeatureName": schema['name'],
"FeatureType": datawrangler_FG_type_mapping.get(
schema['type'],
default_FG_type
)
} for schema in column_schema
]
print(feature_definitions)
```
The following are your online and offline store configurations. You enable an online
store by setting `EnableOnlineStore` to `True`. The offline store is located in an
Amazon S3 bucket in your account. To update the bucket used, update the
parameter `bucket` in the second code cell in this notebook.
```
sagemaker_client = boto3.client("sagemaker", endpoint_url=sagemaker_endpoint_url)
# Online Store Configuration
online_store_config = {
"EnableOnlineStore": True
}
# Offline Store Configuration
s3_uri = 's3://' + bucket # this is the default bucket defined in previous cells
offline_store_config = {
"S3StorageConfig": {
"S3Uri": s3_uri
}
}
# Create Feature Group
create_fg_response = sagemaker_client.create_feature_group(
FeatureGroupName = feature_group_name,
EventTimeFeatureName = event_time_feature_name,
RecordIdentifierFeatureName = record_identifier_name,
FeatureDefinitions = feature_definitions,
OnlineStoreConfig = online_store_config,
OfflineStoreConfig = offline_store_config,
RoleArn = iam_role)
# Describe Feature Group
status = sagemaker_client.describe_feature_group(FeatureGroupName=feature_group_name)
while status['FeatureGroupStatus'] != 'Created':
if status['FeatureGroupStatus'] == 'CreateFailed':
raise RuntimeError(f"Feature Group Creation Failed: {status}")
status = sagemaker_client.describe_feature_group(FeatureGroupName=feature_group_name)
print("Feature Group Status: " + status['FeatureGroupStatus'])
time.sleep(3)
print(status)
```
Use the following code cell to define helper functions for creating inputs to
a processing job.
```
def create_flow_notebook_processing_input(base_dir, flow_s3_uri):
return {
"InputName": "flow",
"S3Input": {
"LocalPath": f"{base_dir}/flow",
"S3Uri": flow_s3_uri,
"S3DataType": "S3Prefix",
"S3InputMode": "File",
},
}
def create_s3_processing_input(base_dir, name, dataset_definition):
return {
"InputName": name,
"S3Input": {
"LocalPath": f"{base_dir}/{name}",
"S3Uri": dataset_definition["s3ExecutionContext"]["s3Uri"],
"S3DataType": "S3Prefix",
"S3InputMode": "File",
},
}
def create_redshift_processing_input(base_dir, name, dataset_definition):
return {
"InputName": name,
"DatasetDefinition": {
"RedshiftDatasetDefinition": {
"ClusterId": dataset_definition["clusterIdentifier"],
"Database": dataset_definition["database"],
"DbUser": dataset_definition["dbUser"],
"QueryString": dataset_definition["queryString"],
"ClusterRoleArn": dataset_definition["unloadIamRole"],
"OutputS3Uri": f'{dataset_definition["s3OutputLocation"]}{name}/',
"OutputFormat": dataset_definition["outputFormat"].upper(),
},
"LocalPath": f"{base_dir}/{name}",
},
}
def create_athena_processing_input(base_dir, name, dataset_definition):
return {
"InputName": name,
"DatasetDefinition": {
"AthenaDatasetDefinition": {
"Catalog": dataset_definition["catalogName"],
"Database": dataset_definition["databaseName"],
"QueryString": dataset_definition["queryString"],
"OutputS3Uri": f'{dataset_definition["s3OutputLocation"]}{name}/',
"OutputFormat": dataset_definition["outputFormat"].upper(),
},
"LocalPath": f"{base_dir}/{name}",
},
}
def create_processing_inputs(processing_dir, flow, flow_uri):
"""Helper function for creating processing inputs
:param flow: loaded data wrangler flow notebook
:param flow_uri: S3 URI of the data wrangler flow notebook
"""
processing_inputs = []
flow_processing_input = create_flow_notebook_processing_input(processing_dir, flow_uri)
processing_inputs.append(flow_processing_input)
for node in flow["nodes"]:
if "dataset_definition" in node["parameters"]:
data_def = node["parameters"]["dataset_definition"]
name = data_def["name"]
source_type = data_def["datasetSourceType"]
if source_type == "S3":
s3_processing_input = create_s3_processing_input(
processing_dir, name, data_def)
processing_inputs.append(s3_processing_input)
elif source_type == "Athena":
athena_processing_input = create_athena_processing_input(
processing_dir, name, data_def)
processing_inputs.append(athena_processing_input)
elif source_type == "Redshift":
redshift_processing_input = create_redshift_processing_input(
processing_dir, name, data_def)
processing_inputs.append(redshift_processing_input)
else:
raise ValueError(f"{source_type} is not supported for Data Wrangler Processing.")
return processing_inputs
```
## Start ProcessingJob
Now, the Processing Job is submitted to a boto client. The status of the processing job is
monitored with the boto client, and this notebook waits until the job is no longer 'InProgress'.
```
# Processing job name
print(f'Processing Job Name: {processing_job_name}')
processingResources = {
'ClusterConfig': {
'InstanceCount': 1,
'InstanceType': 'ml.m5.4xlarge',
'VolumeSizeInGB': 30
}
}
appSpecification = {'ImageUri': container_uri}
sagemaker_client.create_processing_job(
ProcessingInputs=create_processing_inputs(processing_dir, flow, flow_uri),
ProcessingOutputConfig={
'Outputs': [
{
'OutputName': '4d276eac-19de-4045-994d-180f18ba12d1.default',
'FeatureStoreOutput': {
'FeatureGroupName': feature_group_name
},
'AppManaged': True
}
],
},
ProcessingJobName=processing_job_name,
ProcessingResources=processingResources,
AppSpecification=appSpecification,
RoleArn=iam_role
)
status = sagemaker_client.describe_processing_job(ProcessingJobName=processing_job_name)
while status['ProcessingJobStatus'] in ('InProgress', 'Failed'):
if status['ProcessingJobStatus'] == 'Failed':
raise RuntimeError(f"Processing Job failed: {status}")
status = sagemaker_client.describe_processing_job(ProcessingJobName=processing_job_name)
print(status['ProcessingJobStatus'])
time.sleep(60)
print(status)
```
### Cleanup
Uncomment the following code cell to revert the SageMaker Python SDK to the original version used
before running this notebook. This notebook upgrades the SageMaker Python SDK to 2.x, which may
cause other example notebooks to break. To learn more about the changes introduced in the
SageMaker Python SDK 2.x update, see
[Use Version 2.x of the SageMaker Python SDK.](https://sagemaker.readthedocs.io/en/stable/v2.html).
```
# _ = subprocess.check_call(
# [sys.executable, "-m", "pip", "install", f"sagemaker=={original_version}"]
# )
```
|
github_jupyter
|
# SageMaker Python SDK version 2.x is required
import sagemaker
import subprocess
import sys
original_version = sagemaker.__version__
if sagemaker.__version__ != "2.17.0":
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "sagemaker==2.17.0"]
)
import importlib
importlib.reload(sagemaker)
import os
import uuid
import json
import time
import boto3
import sagemaker
# S3 bucket for saving processing job outputs
# Feel free to specify a different bucket here if you wish.
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = "data_wrangler_flows"
flow_id = f"{time.strftime('%d-%H-%M-%S', time.gmtime())}-{str(uuid.uuid4())[:8]}"
flow_name = f"flow-{flow_id}"
flow_uri = f"s3://{bucket}/{prefix}/{flow_name}.flow"
flow_file_name = "workshop_antje.flow"
iam_role = sagemaker.get_execution_role()
container_uri = "663277389841.dkr.ecr.us-east-1.amazonaws.com/sagemaker-data-wrangler-container:1.0.2"
# Processing Job Resources Configurations
processing_job_name = f"data-wrangler-feature-store-processing-{flow_id}"
processing_dir = "/opt/ml/processing"
# URL to use for sagemaker client.
# If this is None, boto will automatically construct the appropriate URL to use
# when communicating with sagemaker.
sagemaker_endpoint_url = None
# Load .flow file
with open(flow_file_name) as f:
flow = json.load(f)
# Upload to S3
s3_client = boto3.client("s3")
s3_client.upload_file(flow_file_name, bucket, f"{prefix}/{flow_name}.flow")
print(f"Data Wrangler Flow notebook uploaded to {flow_uri}")
feature_group_name = f'FG-{flow_name}'
print(f"Feature Group Name: {feature_group_name}")
datawrangler_FG_type_mapping = {
'float': 'Fractional',
'long': 'Integral'
}
# Some schema types in Data Wrangler are not supported by Feature Store.
# Feature store supports String, Integral, and Fractional types.
# The following will create a default_FG_type set to String for these types.
default_FG_type = "String"
column_schema = [
{
"name": "marketplace",
"type": "string"
},
{
"name": "customer_id",
"type": "long"
},
{
"name": "review_id",
"type": "string"
},
{
"name": "product_id",
"type": "string"
},
{
"name": "product_parent",
"type": "long"
},
{
"name": "product_title",
"type": "string"
},
{
"name": "product_category",
"type": "string"
},
{
"name": "vine",
"type": "string"
},
{
"name": "verified_purchase",
"type": "string"
},
{
"name": "review_headline",
"type": "string"
},
{
"name": "review_body",
"type": "string"
},
{
"name": "review_date",
"type": "date"
},
{
"name": "star_rating",
"type": "long"
},
{
"name": "helpful_votes",
"type": "long"
},
{
"name": "total_votes",
"type": "long"
},
{
"name": "star_rating_scaled",
"type": "float"
},
{
"name": "star_rating_scaled_floored",
"type": "long"
},
{
"name": "review_date_iso",
"type": "string"
},
{
"name": "review_body_stripped",
"type": "string"
}
]
record_identifier_name = 'review_id'
if record_identifier_name is None:
raise RuntimeError("Select a column name as the feature group identifier.")
event_time_feature_name = 'review_date_iso'
if event_time_feature_name is None:
raise RuntimeError("Select a column name as the event time feature name.")
# Below you map the schema detected from Data Wrangler to Feature Group Types.
feature_definitions = [
{
"FeatureName": schema['name'],
"FeatureType": datawrangler_FG_type_mapping.get(
schema['type'],
default_FG_type
)
} for schema in column_schema
]
print(feature_definitions)
sagemaker_client = boto3.client("sagemaker", endpoint_url=sagemaker_endpoint_url)
# Online Store Configuration
online_store_config = {
"EnableOnlineStore": True
}
# Offline Store Configuration
s3_uri = 's3://' + bucket # this is the default bucket defined in previous cells
offline_store_config = {
"S3StorageConfig": {
"S3Uri": s3_uri
}
}
# Create Feature Group
create_fg_response = sagemaker_client.create_feature_group(
FeatureGroupName = feature_group_name,
EventTimeFeatureName = event_time_feature_name,
RecordIdentifierFeatureName = record_identifier_name,
FeatureDefinitions = feature_definitions,
OnlineStoreConfig = online_store_config,
OfflineStoreConfig = offline_store_config,
RoleArn = iam_role)
# Describe Feature Group
status = sagemaker_client.describe_feature_group(FeatureGroupName=feature_group_name)
while status['FeatureGroupStatus'] != 'Created':
if status['FeatureGroupStatus'] == 'CreateFailed':
raise RuntimeError(f"Feature Group Creation Failed: {status}")
status = sagemaker_client.describe_feature_group(FeatureGroupName=feature_group_name)
print("Feature Group Status: " + status['FeatureGroupStatus'])
time.sleep(3)
print(status)
def create_flow_notebook_processing_input(base_dir, flow_s3_uri):
return {
"InputName": "flow",
"S3Input": {
"LocalPath": f"{base_dir}/flow",
"S3Uri": flow_s3_uri,
"S3DataType": "S3Prefix",
"S3InputMode": "File",
},
}
def create_s3_processing_input(base_dir, name, dataset_definition):
return {
"InputName": name,
"S3Input": {
"LocalPath": f"{base_dir}/{name}",
"S3Uri": dataset_definition["s3ExecutionContext"]["s3Uri"],
"S3DataType": "S3Prefix",
"S3InputMode": "File",
},
}
def create_redshift_processing_input(base_dir, name, dataset_definition):
return {
"InputName": name,
"DatasetDefinition": {
"RedshiftDatasetDefinition": {
"ClusterId": dataset_definition["clusterIdentifier"],
"Database": dataset_definition["database"],
"DbUser": dataset_definition["dbUser"],
"QueryString": dataset_definition["queryString"],
"ClusterRoleArn": dataset_definition["unloadIamRole"],
"OutputS3Uri": f'{dataset_definition["s3OutputLocation"]}{name}/',
"OutputFormat": dataset_definition["outputFormat"].upper(),
},
"LocalPath": f"{base_dir}/{name}",
},
}
def create_athena_processing_input(base_dir, name, dataset_definition):
return {
"InputName": name,
"DatasetDefinition": {
"AthenaDatasetDefinition": {
"Catalog": dataset_definition["catalogName"],
"Database": dataset_definition["databaseName"],
"QueryString": dataset_definition["queryString"],
"OutputS3Uri": f'{dataset_definition["s3OutputLocation"]}{name}/',
"OutputFormat": dataset_definition["outputFormat"].upper(),
},
"LocalPath": f"{base_dir}/{name}",
},
}
def create_processing_inputs(processing_dir, flow, flow_uri):
"""Helper function for creating processing inputs
:param flow: loaded data wrangler flow notebook
:param flow_uri: S3 URI of the data wrangler flow notebook
"""
processing_inputs = []
flow_processing_input = create_flow_notebook_processing_input(processing_dir, flow_uri)
processing_inputs.append(flow_processing_input)
for node in flow["nodes"]:
if "dataset_definition" in node["parameters"]:
data_def = node["parameters"]["dataset_definition"]
name = data_def["name"]
source_type = data_def["datasetSourceType"]
if source_type == "S3":
s3_processing_input = create_s3_processing_input(
processing_dir, name, data_def)
processing_inputs.append(s3_processing_input)
elif source_type == "Athena":
athena_processing_input = create_athena_processing_input(
processing_dir, name, data_def)
processing_inputs.append(athena_processing_input)
elif source_type == "Redshift":
redshift_processing_input = create_redshift_processing_input(
processing_dir, name, data_def)
processing_inputs.append(redshift_processing_input)
else:
raise ValueError(f"{source_type} is not supported for Data Wrangler Processing.")
return processing_inputs
# Processing job name
print(f'Processing Job Name: {processing_job_name}')
processingResources = {
'ClusterConfig': {
'InstanceCount': 1,
'InstanceType': 'ml.m5.4xlarge',
'VolumeSizeInGB': 30
}
}
appSpecification = {'ImageUri': container_uri}
sagemaker_client.create_processing_job(
ProcessingInputs=create_processing_inputs(processing_dir, flow, flow_uri),
ProcessingOutputConfig={
'Outputs': [
{
'OutputName': '4d276eac-19de-4045-994d-180f18ba12d1.default',
'FeatureStoreOutput': {
'FeatureGroupName': feature_group_name
},
'AppManaged': True
}
],
},
ProcessingJobName=processing_job_name,
ProcessingResources=processingResources,
AppSpecification=appSpecification,
RoleArn=iam_role
)
status = sagemaker_client.describe_processing_job(ProcessingJobName=processing_job_name)
while status['ProcessingJobStatus'] in ('InProgress', 'Failed'):
if status['ProcessingJobStatus'] == 'Failed':
raise RuntimeError(f"Processing Job failed: {status}")
status = sagemaker_client.describe_processing_job(ProcessingJobName=processing_job_name)
print(status['ProcessingJobStatus'])
time.sleep(60)
print(status)
# _ = subprocess.check_call(
# [sys.executable, "-m", "pip", "install", f"sagemaker=={original_version}"]
# )
| 0.363873 | 0.939415 |
```
%load_ext autoreload
%autoreload 2
import logging
logging.basicConfig(format="%(asctime)s [%(process)d] %(levelname)-8s "
"%(name)s,%(lineno)s\t%(message)s")
logging.getLogger().setLevel('INFO')
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqdm
# Read information to connect to the database and put it in environment variables
import os
with open('../ENVVARS.txt') as f:
for line in f:
parts = line.split('=')
if len(parts) == 2:
os.environ[parts[0]] = parts[1].strip()
db_name = 'ticclat_wikipedia'
os.environ['dbname'] = db_name
from ticclat.ticclat_schema import Lexicon, Wordform, Anahash, Document, Corpus
from ticclat.dbutils import get_session, session_scope
Session = get_session(os.environ['user'], os.environ['password'], os.environ['dbname'])
with session_scope(Session) as session:
print('number of wordforms:', session.query(Wordform).count())
print('number of lexica:', session.query(Lexicon).count())
print('number of documents:', session.query(Document).count())
print('number of corpora:', session.query(Corpus).count())
# note: must install nltk for this! This used to be in ticclat.tokenize, but it was no longer used anywhere but in this notebook, so we took it out of the package dependencies. Note that it is also still used in some tests, but we have a separate utility function in the tests directory for that.
import nltk.data
from nltk import word_tokenize
def nltk_tokenize(texts_file, punkt='tokenizers/punkt/dutch.pickle'):
"""
Inputs:
texts_file (str): File name of a file that contains the texts. This
should contain one document per line.
punkt (str): Path to the nltk punctuation data to be used.
Yields:
Counter: term-frequency vector representing a document.
"""
nltk.download('punkt')
tokenizer = nltk.data.load(punkt)
with open(texts_file) as f:
for line in f:
tokens = [word_tokenize(sent)
for sent in tokenizer.tokenize(line.strip())]
yield list(chain(*tokens))
%%time
# Ingest wikipedia dump as corpus
import os
from tqdm import tqdm_notebook as tqdm
from ticclat.utils import get_temp_file, write_json_lines, read_json_lines
from ticclat.tokenize import terms_documents_matrix_word_lists
from ticclat.sacoreutils import add_corpus_core
wiki = '/home/jvdzwaan/data/tmp/nlwiki'
corpus_name = 'nlwiki-20190201-pages-articles-complete'
print('Tokenizing corpus')
tokenized_file = '/home/jvdzwaan/data/tmp/nlwiki-json_lines'
num_documents = write_json_lines(tokenized_file, tqdm(nltk_tokenize(wiki)))
%%time
from ticclat.tokenize import terms_documents_matrix_word_lists
print('Creating the terms/document matrix')
documents_iterator = read_json_lines(tokenized_file)
corpus_m, v = terms_documents_matrix_word_lists(documents_iterator)
os.remove(tokenized_file)
%%time
wfs = pd.DataFrame()
wfs['wordform'] = v.vocabulary_
document_metadata = pd.DataFrame()
document_metadata['language'] = ['nl' for i in range(num_documents)]
document_metadata['pub_year'] = 2019
# More metadata?
with session_scope(Session) as session:
add_corpus_core(session, corpus_m, v, corpus_name, document_metadata)
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import logging
logging.basicConfig(format="%(asctime)s [%(process)d] %(levelname)-8s "
"%(name)s,%(lineno)s\t%(message)s")
logging.getLogger().setLevel('INFO')
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqdm
# Read information to connect to the database and put it in environment variables
import os
with open('../ENVVARS.txt') as f:
for line in f:
parts = line.split('=')
if len(parts) == 2:
os.environ[parts[0]] = parts[1].strip()
db_name = 'ticclat_wikipedia'
os.environ['dbname'] = db_name
from ticclat.ticclat_schema import Lexicon, Wordform, Anahash, Document, Corpus
from ticclat.dbutils import get_session, session_scope
Session = get_session(os.environ['user'], os.environ['password'], os.environ['dbname'])
with session_scope(Session) as session:
print('number of wordforms:', session.query(Wordform).count())
print('number of lexica:', session.query(Lexicon).count())
print('number of documents:', session.query(Document).count())
print('number of corpora:', session.query(Corpus).count())
# note: must install nltk for this! This used to be in ticclat.tokenize, but it was no longer used anywhere but in this notebook, so we took it out of the package dependencies. Note that it is also still used in some tests, but we have a separate utility function in the tests directory for that.
import nltk.data
from nltk import word_tokenize
def nltk_tokenize(texts_file, punkt='tokenizers/punkt/dutch.pickle'):
"""
Inputs:
texts_file (str): File name of a file that contains the texts. This
should contain one document per line.
punkt (str): Path to the nltk punctuation data to be used.
Yields:
Counter: term-frequency vector representing a document.
"""
nltk.download('punkt')
tokenizer = nltk.data.load(punkt)
with open(texts_file) as f:
for line in f:
tokens = [word_tokenize(sent)
for sent in tokenizer.tokenize(line.strip())]
yield list(chain(*tokens))
%%time
# Ingest wikipedia dump as corpus
import os
from tqdm import tqdm_notebook as tqdm
from ticclat.utils import get_temp_file, write_json_lines, read_json_lines
from ticclat.tokenize import terms_documents_matrix_word_lists
from ticclat.sacoreutils import add_corpus_core
wiki = '/home/jvdzwaan/data/tmp/nlwiki'
corpus_name = 'nlwiki-20190201-pages-articles-complete'
print('Tokenizing corpus')
tokenized_file = '/home/jvdzwaan/data/tmp/nlwiki-json_lines'
num_documents = write_json_lines(tokenized_file, tqdm(nltk_tokenize(wiki)))
%%time
from ticclat.tokenize import terms_documents_matrix_word_lists
print('Creating the terms/document matrix')
documents_iterator = read_json_lines(tokenized_file)
corpus_m, v = terms_documents_matrix_word_lists(documents_iterator)
os.remove(tokenized_file)
%%time
wfs = pd.DataFrame()
wfs['wordform'] = v.vocabulary_
document_metadata = pd.DataFrame()
document_metadata['language'] = ['nl' for i in range(num_documents)]
document_metadata['pub_year'] = 2019
# More metadata?
with session_scope(Session) as session:
add_corpus_core(session, corpus_m, v, corpus_name, document_metadata)
| 0.322099 | 0.1881 |
# Fixing problematic df with sub dataframe in text
Copyright (C) 2021 ServiceNow, Inc.
One particular df has the same text in every row and that text is actually a dataframe with the text from *every* row.
Here, we examine that data point, fix it, and overwrite the file. The original is saved in /replaced.
**NOTE:** This notebook is not reproducible since it altered the files which caused the error
```
vv = v.droplevel(level=0, axis=1).reset_index().groupby(['id1', 'file'])['sum'].sum().to_frame()
vv = df[df.nchars_stripped_nocid > 17500].iloc[0,:].text
len(re.sub('\s', '', vv))
vv.value_counts()
vv.value_counts()
df[df.file.str.contains('215068_GSC_B578.pdfminer_split.txt')]
dff = pd.read_csv('/nrcan_p2/data/02_intermediate/20201006/geoscan/pdf/v1_all/299332.pdfminer_split.csv', dtype={'text': object})
dff.dtypes
'texts' in dff.columns
df[df.file.str.contains('215068_GSC_B578.pdfminer_split.txt')]
vv = df[df.file.str.contains('timeout')]
vv[vv.text.str.contains('Unnamed: 0')].file.unique()
text = vv[vv.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt'].text.iloc[0]
from io import StringIO
stext = StringIO(text)
ss = pd.read_csv(stext, sep='\n')
ss['splitlen'] = ss['Unnamed: 0'].str.strip().str.split(' ', 1).str.len()
#s['Unnamed: 0'].str.split(' ', 1).str[1]
ss['Unnamed: 0'].iloc[0][-10:]
ss['text'] = ss['Unnamed: 0'].str.split(' ', 1).str[1]
ss
fixed_df = vv[vv.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt']
df.loc[df.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt','text'] = fixed_df.text_fixed.values
df.loc[df.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt','text']
fixed_df['text_fixed'] = ss.text.values
fixed_df
vvv = df[df.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/209578_bu_516.pdfminer_split.txt']
vvv.
```
## The Solution
```
#dff = df[df.file.str.contains('215068_GSC_B578.pdfminer_split.txt')][['obj_type', 'pg','pos_x0', 'pos_y0', 'pos_x1', 'pos_y1', 'text', 'file']]
#dff.to_csv('/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_all/215068_GSC_B578.pdfminer_split.csv')
dff = pd.read_csv('/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_all/215068_GSC_B578.pdfminer_split.csv', index_col=[0])
dff
```
|
github_jupyter
|
vv = v.droplevel(level=0, axis=1).reset_index().groupby(['id1', 'file'])['sum'].sum().to_frame()
vv = df[df.nchars_stripped_nocid > 17500].iloc[0,:].text
len(re.sub('\s', '', vv))
vv.value_counts()
vv.value_counts()
df[df.file.str.contains('215068_GSC_B578.pdfminer_split.txt')]
dff = pd.read_csv('/nrcan_p2/data/02_intermediate/20201006/geoscan/pdf/v1_all/299332.pdfminer_split.csv', dtype={'text': object})
dff.dtypes
'texts' in dff.columns
df[df.file.str.contains('215068_GSC_B578.pdfminer_split.txt')]
vv = df[df.file.str.contains('timeout')]
vv[vv.text.str.contains('Unnamed: 0')].file.unique()
text = vv[vv.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt'].text.iloc[0]
from io import StringIO
stext = StringIO(text)
ss = pd.read_csv(stext, sep='\n')
ss['splitlen'] = ss['Unnamed: 0'].str.strip().str.split(' ', 1).str.len()
#s['Unnamed: 0'].str.split(' ', 1).str[1]
ss['Unnamed: 0'].iloc[0][-10:]
ss['text'] = ss['Unnamed: 0'].str.split(' ', 1).str[1]
ss
fixed_df = vv[vv.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt']
df.loc[df.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt','text'] = fixed_df.text_fixed.values
df.loc[df.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/215068_GSC_B578.pdfminer_split.txt','text']
fixed_df['text_fixed'] = ss.text.values
fixed_df
vvv = df[df.file == '/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_timeout_20201218/209578_bu_516.pdfminer_split.txt']
vvv.
#dff = df[df.file.str.contains('215068_GSC_B578.pdfminer_split.txt')][['obj_type', 'pg','pos_x0', 'pos_y0', 'pos_x1', 'pos_y1', 'text', 'file']]
#dff.to_csv('/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_all/215068_GSC_B578.pdfminer_split.csv')
dff = pd.read_csv('/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/generic_pdfs_all/215068_GSC_B578.pdfminer_split.csv', index_col=[0])
dff
| 0.15876 | 0.708238 |
# This is the notebook to preprocess the data.
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.append("..")
from acse_9_irp_wafflescore import dataPreprocessing as dp
from scipy.ndimage import gaussian_filter
import numpy as np
import logging
import sys
logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
```
The user can specify which data files/model to use here, the if/else statement were used for easier management.
For future usage, additional model name and input files can be added.
The input files in this stage were generated from Dr. Michele Paulatto, the script to generate those files are located in the Synthetic model folder in the Github repository.
```
# name of the model for easier reference
model = 'M1'
# the title of each column's variable
col_name = ['vp', 'vs', 'dn', 'vp/vs', 'qp', 'qs', 'x', 'z']
# load needed files
if(model == 'M1'):
# Original Earth Model
input_npz = np.load('../Synthetic Model/input_fields.npz')
output_smooth_npz = np.load('../Synthetic Model/output_fields_smooth.npz')
output_npz = np.load('../Synthetic Model/output_fields.npz')
elif(model == 'M5a'):
# Simplified Earth Model
input_npz = np.load('../Synthetic Model/Model5a/input_fields.npz')
output_smooth_npz = np.load('../Synthetic Model/Model5a/output_fields_smooth.npz')
output_npz = np.load('../Synthetic Model/Model5a/output_fields.npz')
elif(model == 'M5b'):
# Simplified Earth Model -- less temperature anomaly
input_npz = np.load('../Synthetic Model/Model5b/input_fields.npz')
output_smooth_npz = np.load('../Synthetic Model/Model5b/output_fields_smooth.npz')
output_npz = np.load('../Synthetic Model/Model5b/output_fields.npz')
else:
# invalid model
print('Invalid model', model)
# convert npz into 1d, 2d numpy
init_label = dp.convLabel(input_npz['classes'])
init_data = dp.convData(output_smooth_npz)
# remove water and perform data preprocessing
water_idx = np.where(init_label == 0)
label = np.delete(init_label, water_idx)
data = np.delete(init_data, water_idx, axis=0)
fdir = '../data/' + model + '_init_data.npy'
np.save(fdir, data)
logging.info('Initial Data as numpy saved at: %s' % fdir)
fdir = '../data/' + model + '_init_label.npy'
np.save(fdir, label)
logging.info('Initial label as numpy saved at: %s' % fdir)
data = dp.data_cleanup(data, col_name, re_inf=-9999)
logging.debug("Water removed shape: (%d, %d)" %
(data.shape[0], data.shape[1]))
if (model):
fdir = '../data/' + model + '_clean_data.npy'
np.save(fdir, data)
logging.info('Data saved at: %s' % fdir)
fdir = '../data/' + model + '_data_label.npy'
np.save(fdir, label)
logging.info('Data label saved at: %s' % fdir)
fdir = '../data/' + model + '_xz_pos.npy'
np.save(fdir, data[:, -2:])
logging.info('XZ positions saved at: %s' % fdir)
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import sys
sys.path.append("..")
from acse_9_irp_wafflescore import dataPreprocessing as dp
from scipy.ndimage import gaussian_filter
import numpy as np
import logging
import sys
logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
# name of the model for easier reference
model = 'M1'
# the title of each column's variable
col_name = ['vp', 'vs', 'dn', 'vp/vs', 'qp', 'qs', 'x', 'z']
# load needed files
if(model == 'M1'):
# Original Earth Model
input_npz = np.load('../Synthetic Model/input_fields.npz')
output_smooth_npz = np.load('../Synthetic Model/output_fields_smooth.npz')
output_npz = np.load('../Synthetic Model/output_fields.npz')
elif(model == 'M5a'):
# Simplified Earth Model
input_npz = np.load('../Synthetic Model/Model5a/input_fields.npz')
output_smooth_npz = np.load('../Synthetic Model/Model5a/output_fields_smooth.npz')
output_npz = np.load('../Synthetic Model/Model5a/output_fields.npz')
elif(model == 'M5b'):
# Simplified Earth Model -- less temperature anomaly
input_npz = np.load('../Synthetic Model/Model5b/input_fields.npz')
output_smooth_npz = np.load('../Synthetic Model/Model5b/output_fields_smooth.npz')
output_npz = np.load('../Synthetic Model/Model5b/output_fields.npz')
else:
# invalid model
print('Invalid model', model)
# convert npz into 1d, 2d numpy
init_label = dp.convLabel(input_npz['classes'])
init_data = dp.convData(output_smooth_npz)
# remove water and perform data preprocessing
water_idx = np.where(init_label == 0)
label = np.delete(init_label, water_idx)
data = np.delete(init_data, water_idx, axis=0)
fdir = '../data/' + model + '_init_data.npy'
np.save(fdir, data)
logging.info('Initial Data as numpy saved at: %s' % fdir)
fdir = '../data/' + model + '_init_label.npy'
np.save(fdir, label)
logging.info('Initial label as numpy saved at: %s' % fdir)
data = dp.data_cleanup(data, col_name, re_inf=-9999)
logging.debug("Water removed shape: (%d, %d)" %
(data.shape[0], data.shape[1]))
if (model):
fdir = '../data/' + model + '_clean_data.npy'
np.save(fdir, data)
logging.info('Data saved at: %s' % fdir)
fdir = '../data/' + model + '_data_label.npy'
np.save(fdir, label)
logging.info('Data label saved at: %s' % fdir)
fdir = '../data/' + model + '_xz_pos.npy'
np.save(fdir, data[:, -2:])
logging.info('XZ positions saved at: %s' % fdir)
| 0.305594 | 0.68822 |
# Chapter 1- Preparing the data for analysis
## Before beginning your analysis, it is critical that you first examine and clean the dataset, to make working with it a more efficient process. In this chapter, you will practice fixing data types, handling missing values, and dropping columns and rows while learning about the Stanford Open Policing Project dataset.
### Examining the dataset
Throughout this course, you'll be analyzing a dataset of traffic stops in Rhode Island that was collected by the [Stanford Open Policing Project.](https://openpolicing.stanford.edu/)
Before beginning your analysis, it's important that you familiarize yourself with the dataset. In this exercise, you'll read the dataset into pandas, examine the first few rows, and then count the number of missing values.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# Read 'police.csv' into a DataFrame named ri
ri = pd.read_csv('police.csv')
# Examine the head of the DataFrame
ri.head()
# Count the number of missing values in each column
ri.isnull().sum()
```
### Dropping columns
Often, a DataFrame will contain columns that are not useful to your analysis. Such columns should be dropped from the DataFrame, to make it easier for you to focus on the remaining columns.
In this exercise, you'll drop the county_name column because it only contains missing values, and you'll drop the state column because all of the traffic stops took place in one state (Rhode Island). Thus, these columns can be dropped because they contain no useful information.
```
# Examine the shape of the DataFrame
ri.shape
# Drop the 'county_name' and 'state' columns
ri.drop(['county_name', 'state'], axis='columns', inplace=True)
# Examine the shape of the DataFrame (again)
ri.shape
```
### Dropping rows
When you know that a specific column will be critical to your analysis, and only a small fraction of rows are missing a value in that column, it often makes sense to remove those rows from the dataset.
During this course, the driver_gender column will be critical to many of your analyses. Because only a small fraction of rows are missing driver_gender, we'll drop those rows from the dataset.
```
# Drop all rows that are missing 'driver_gender'
ri.dropna(subset=['driver_gender'], inplace=True)
# Count the number of missing values in each column (again)
ri.isnull().sum()
# Examine the shape of the DataFrame
ri.shape
```
```
ri.dtypes
```
### Fixing a data type
We saw in the previous exercise that the is_arrested column currently has the object data type. In this exercise, we'll change the data type to bool, which is the most suitable type for a column containing True and False values.
Fixing the data type will enable us to use mathematical operations on the is_arrested column that would not be possible otherwise.
```
# Examine the head of the 'is_arrested' column
ri.is_arrested.head()
# Check the data type of 'is_arrested'
ri.is_arrested.dtype
# Change the data type of 'is_arrested' to 'bool'
ri['is_arrested'] = ri.is_arrested.astype('bool')
# Check the data type of 'is_arrested' (again)
ri.is_arrested.dtype
```
### Combining object columns
Currently, the date and time of each traffic stop are stored in separate object columns: stop_date and stop_time.
In this exercise, you'll combine these two columns into a single column, and then convert it to datetime format. This will enable convenient date-based attributes that we'll use later in the course.
```
# Concatenate 'stop_date' and 'stop_time' (separated by a space)
combined = ri.stop_date.str.cat(ri.stop_time, sep = ' ')
# Convert 'combined' to datetime format
ri['stop_datetime'] = pd.to_datetime(combined)
# Examine the data types of the DataFrame
ri.dtypes
```
### Setting the index
The last step that you'll take in this chapter is to set the stop_datetime column as the DataFrame's index. By replacing the default index with a DatetimeIndex, you'll make it easier to analyze the dataset by date and time, which will come in handy later in the course!
```
# Set 'stop_datetime' as the index
ri.set_index('stop_datetime', inplace=True)
# Examine the index
ri.index
# Examine the columns
ri.columns
```
#### Congratulations! Now that you have cleaned the dataset, you can begin analyzing it in the next chapter
---
# Chapter 2 - Exploring the relationship between gender and policing
## Does the gender of a driver have an impact on police behavior during a traffic stop? In this chapter, you will explore that question while practicing filtering, grouping, method chaining, Boolean math, string methods, and more!
### Examining traffic violations
Before comparing the violations being committed by each gender, you should examine the violations committed by all drivers to get a baseline understanding of the data.
In this exercise, you'll count the unique values in the violation column, and then separately express those counts as proportions
```
# Count the unique values in 'violation'
ri.violation.value_counts()
# Express the counts as proportions
ri.violation.value_counts(normalize = True)
```
### Comparing violations by gender
The question we're trying to answer is whether male and female drivers tend to commit different types of traffic violations.
In this exercise, you'll first create a DataFrame for each gender, and then analyze the violations in each DataFrame separately.
```
# Create a DataFrame of female drivers
female = ri[ri['driver_gender'] == 'F']
# Create a DataFrame of male drivers
male = ri[ri['driver_gender'] == 'M']
# Compute the violations by female drivers (as proportions)
female.violation.value_counts(normalize = True)
# Compute the violations by male drivers (as proportions)
male.violation.value_counts(normalize = True)
```
## Does gender affect who gets a ticket for speeding?
### Comparing speeding outcomes by gender
When a driver is pulled over for speeding, many people believe that gender has an impact on whether the driver will receive a ticket or a warning. Can you find evidence of this in the dataset?
First, you'll create two DataFrames of drivers who were stopped for speeding: one containing females and the other containing males.
Then, for each gender, you'll use the stop_outcome column to calculate what percentage of stops resulted in a "Citation" (meaning a ticket) versus a "Warning".
```
ri.head()
# Create a DataFrame of female drivers stopped for speeding
female_and_speeding = ri[(ri['driver_gender'] == 'F') & (ri['violation'] == 'Speeding')]
# Create a DataFrame of male drivers stopped for speeding
male_and_speeding = ri[(ri['driver_gender'] == 'M') & (ri['violation'] == 'Speeding')]
# Compute the stop outcomes for female drivers (as proportions)
female_and_speeding.stop_outcome.value_counts(normalize = True)
# Compute the stop outcomes for male drivers (as proportions)
male_and_speeding.stop_outcome.value_counts(normalize = True)
```
## Does gender affect whose vehicle is searched?
### Calculating the search rate
During a traffic stop, the police officer sometimes conducts a search of the vehicle. In this exercise, you'll calculate the percentage of all stops that result in a vehicle search, also known as the search rate.
```
# Check the data type of 'search_conducted'
ri.search_conducted.dtype
# Calculate the search rate by counting the values
ri.search_conducted.value_counts(normalize = True)
# Calculate the search rate by taking the mean of the Series. (It should match the proportion of True values calculated above.)
ri.search_conducted.mean()
```
### Comparing search rates by gender
In this exercise, you'll compare the rates at which female and male drivers are searched during a traffic stop. Remember that the vehicle search rate across all stops is about 3.8%.
First, you'll filter the DataFrame by gender and calculate the search rate for each group separately. Then, you'll perform the same calculation for both genders at once using a .groupby()
```
# Calculate the search rate for female drivers
ri[ri['driver_gender'] == 'F'].search_conducted.mean()
# Calculate the search rate for male drivers
ri[ri['driver_gender'] == 'M'].search_conducted.mean()
# Calculate the search rate for both groups simultaneously
ri.groupby('driver_gender').search_conducted.mean()
```
### Adding a second factor to the analysis
Even though the search rate for males is much higher than for females, it's possible that the difference is mostly due to a second factor.
For example, you might hypothesize that the search rate varies by violation type, and the difference in search rate between males and females is because they tend to commit different violations.
You can test this hypothesis by examining the search rate for each combination of gender and violation. If the hypothesis was true, you would find that males and females are searched at about the same rate for each violation. Find out below if that's the case!
```
# Calculate the search rate for each combination of gender and violation
print(ri.groupby(['driver_gender', 'violation']).search_conducted.mean())
# Reverse the ordering to group by violation before gender
print(ri.groupby(['violation', 'driver_gender']).search_conducted.mean())
```
## Does gender affect who is frisked during a search?
### Counting protective frisks
During a vehicle search, the police officer may pat down the driver to check if they have a weapon. This is known as a `"protective frisk."`
In this exercise, you'll first check to see how many times "Protective Frisk" was the only search type. Then, you'll use a string method to locate all instances in which the driver was frisked.
```
# Count the 'search_type' values
ri.search_type.value_counts()
# Check if 'search_type' contains the string 'Protective Frisk'
ri['frisk'] = ri.search_type.str.contains('Protective Frisk', na=False)
# Check the data type of 'frisk'
ri.frisk.dtypes
# Take the sum of 'frisk'
ri.frisk.sum()
```
### Comparing frisk rates by gender
In this exercise, you'll compare the rates at which female and male drivers are frisked during a search. Are males frisked more often than females, perhaps because police officers consider them to be higher risk?
Before doing any calculations, it's important to filter the DataFrame to only include the relevant subset of data, namely stops in which a search was conducted.
```
# Create a DataFrame of stops in which a search was conducted
searched = ri[ri['search_conducted'] == True]
# Calculate the overall frisk rate by taking the mean of 'frisk'
searched.frisk.mean()
# Calculate the frisk rate for each gender
searched.groupby('driver_gender').frisk.mean()
```
#### Interesting! The frisk rate is higher for males than for females, though we can't conclude that this difference is caused by the driver's gender.
---
# Chapter 3 - Visual exploratory data analysis
## Are you more likely to get arrested at a certain time of day? Are drug-related stops on the rise? In this chapter, you will answer these and other questions by analyzing the dataset visually, since plots can help you to understand trends in a way that examining the raw data cannot.
## Does time of day affect arrest rate ?
### Calculating the hourly arrest rate
When a police officer stops a driver, a small percentage of those stops ends in an arrest. This is known as the arrest rate. In this exercise, you'll find out whether the arrest rate varies by time of day.
First, you'll calculate the arrest rate across all stops. Then, you'll calculate the hourly arrest rate by using the hour attribute of the index. The hour ranges from 0 to 23, in which:
- 0 = midnight
- 12 = noon
- 23 = 11 PM
```
ri.index
ri.index.hour
# Calculate the overall arrest rate
ri.is_arrested.mean()
# Calculate the hourly arrest rate
ri.groupby(ri.index.hour).is_arrested.mean()
# Save the hourly arrest rate
hourly_arrest_rate = ri.groupby(ri.index.hour).is_arrested.mean()
```
### Plotting the hourly arrest rate
In this exercise, you'll create a line plot from the hourly_arrest_rate object. A line plot is appropriate in this case because you're showing how a quantity changes over time.
This plot should help you to spot some trends that may not have been obvious when examining the raw numbers!
```
# Create a line plot of 'hourly_arrest_rate'
hourly_arrest_rate.plot()
# Add the xlabel, ylabel, and title
plt.xlabel('Hour')
plt.ylabel('Arrest Rate')
plt.title('Arrest Rate by Time of Day')
```
## Are drug-related stops on the rise?
### Plotting drug-related stops
In a small portion of traffic stops, drugs are found in the vehicle during a search. In this exercise, you'll assess whether these drug-related stops are becoming more common over time.
The Boolean column drugs_related_stop indicates whether drugs were found during a given stop. You'll calculate the annual drug rate by resampling this column, and then you'll use a line plot to visualize how the rate has changed over time.
```
# Calculate the annual rate of drug-related stops
ri.drugs_related_stop.resample('A').mean()
# Save the annual rate of drug-related stops
annual_drug_rate = ri.drugs_related_stop.resample('A').mean()
# Create a line plot of 'annual_drug_rate'
annual_drug_rate.plot()
```
## Comparing drug and search rates
As you saw in the last exercise, the rate of drug-related stops increased significantly between 2005 and 2015. You might hypothesize that the rate of vehicle searches was also increasing, which would have led to an increase in drug-related stops even if more drivers were not carrying drugs.
You can test this hypothesis by calculating the annual search rate, and then plotting it against the annual drug rate. If the hypothesis is true, then you'll see both rates increasing over time.
```
# Calculate and save the annual search rate
annual_search_rate = ri.search_conducted.resample('A').mean()
# Concatenate 'annual_drug_rate' and 'annual_search_rate'
annual = pd.concat([annual_drug_rate, annual_search_rate], axis='columns')
# Create subplots from 'annual'
annual.plot(subplots = True)
```
## What violations are caught in each district?
### Tallying violations by district
The state of Rhode Island is broken into six police districts, also known as zones. How do the zones compare in terms of what violations are caught by police?
In this exercise, you'll create a frequency table to determine how many violations of each type took place in each of the six zones. Then, you'll filter the table to focus on the "K" zones, which you'll examine further in the next exercise.
```
# Create a frequency table of districts and violations
pd.crosstab(ri['district'], ri['violation'])
# Save the frequency table as 'all_zones'
all_zones = pd.crosstab(ri['district'], ri['violation'])
# Select rows 'Zone K1' through 'Zone K3'
all_zones.loc['Zone K1':'Zone K3']
# Save the smaller table as 'k_zones'
k_zones = all_zones.loc['Zone K1':'Zone K3']
```
### Plotting violations by district
Now that you've created a frequency table focused on the "K" zones, you'll visualize the data to help you compare what violations are being caught in each zone.
First you'll create a bar plot, which is an appropriate plot type since you're comparing categorical data. Then you'll create a stacked bar plot in order to get a slightly different look at the data. Which plot do you find to be more insightful?
```
# Create a bar plot of 'k_zones'
k_zones.plot(kind = 'bar', figsize = (8, 6))
# Create a stacked bar plot of 'k_zones'
k_zones.plot(kind = 'bar', stacked = True, figsize = (8, 6))
```
## How long might you be stopped for a violation?
### Converting stop durations to numbers
In the traffic stops dataset, the stop_duration column tells you approximately how long the driver was detained by the officer. Unfortunately, the durations are stored as strings, such as '0-15 Min'. How can you make this data easier to analyze?
In this exercise, you'll convert the stop durations to integers. Because the precise durations are not available, you'll have to estimate the numbers using reasonable values:
- Convert '0-15 Min' to 8
- Convert '16-30 Min' to 23
- Convert '30+ Min' to 45
```
# Print the unique values in 'stop_duration'
ri.stop_duration.unique()
# Create a dictionary that maps strings to integers
mapping = {'0-15 Min':8, '16-30 Min':23, '30+ Min':45}
# Convert the 'stop_duration' strings to integers using the 'mapping'
ri['stop_minutes'] = ri.stop_duration.map(mapping)
# Print the unique values in 'stop_minutes'
ri['stop_minutes'].unique()
```
### Plotting stop length
If you were stopped for a particular violation, how long might you expect to be detained?
In this exercise, you'll visualize the average length of time drivers are stopped for each type of violation. Rather than using the violation column in this exercise, you'll use violation_raw since it contains more detailed descriptions of the violations.
```
# Calculate the mean 'stop_minutes' for each value in 'violation_raw'
ri.groupby('violation_raw')['stop_minutes'].mean()
# Save the resulting Series as 'stop_length'
stop_length = ri.groupby('violation_raw')['stop_minutes'].mean()
# Sort 'stop_length' by its values and create a horizontal bar plot
stop_length.sort_values().plot(kind = 'barh', figsize = (8, 6))
```
#### Congratulations! You've completed the chapter on visual exploratory data analysis!
---
# Chapter 4 - Analyzing the effect of weather on policing
## In this chapter, you will use a second dataset to explore the impact of weather conditions on police behavior during traffic stops. You will practice merging and reshaping datasets, assessing whether a data source is trustworthy, working with categorical data, and other advanced skills.
## Exploring the weather dataset
### Plotting the temperature
In this exercise, you'll examine the temperature columns from the weather dataset to assess whether the data seems trustworthy. First you'll print the summary statistics, and then you'll visualize the data using a box plot.
When deciding whether the values seem reasonable, keep in mind that the temperature is measured in degrees Fahrenheit, not Celsius!
```
weather = pd.read_csv('weather.csv')
weather.head()
# Select the temperature columns (TMIN, TAVG, TMAX) and print their summary statistics using the .describe() method
weather[['TMIN', 'TAVG', 'TMAX']].describe()
# Create a box plot of the temperature columns
weather[['TMIN', 'TAVG', 'TMAX']].plot(kind = 'box')
```
### Plotting the temperature difference
In this exercise, you'll continue to assess whether the dataset seems trustworthy by plotting the difference between the maximum and minimum temperatures.
What do you notice about the resulting histogram? Does it match your expectations, or do you see anything unusual?
```
# Create a 'TDIFF' column that represents temperature difference
weather['TDIFF'] = weather['TMAX'] - weather['TMIN']
# Describe the 'TDIFF' column
print(weather['TDIFF'].describe())
# Create a histogram with 20 bins to visualize 'TDIFF'
weather['TDIFF'].plot(kind = 'hist', bins = 20, ec = 'white')
```
## Categorizing the weather
### Counting bad weather conditions
The weather DataFrame contains 20 columns that start with 'WT', each of which represents a bad weather condition. For example:
- WT05 indicates "Hail"
- WT11 indicates "High or damaging winds"
- WT17 indicates "Freezing rain"
For every row in the dataset, each WT column contains either a 1 (meaning the condition was present that day) or NaN (meaning the condition was not present).
In this exercise, you'll quantify "how bad" the weather was each day by counting the number of 1 values in each row.
```
# Copy 'WT01' through 'WT22' to a new DataFrame
WT = weather.loc[:, 'WT01':'WT22']
# Calculate the sum of each row in 'WT'
weather['bad_conditions'] = WT.sum(axis = 1)
# Replace missing values in 'bad_conditions' with '0'
weather['bad_conditions'] = weather.bad_conditions.fillna(0).astype('int')
# Create a histogram to visualize 'bad_conditions'
weather.bad_conditions.plot(kind = 'hist')
```
### Rating the weather conditions
In the previous exercise, you counted the number of bad weather conditions each day. In this exercise, you'll use the counts to create a rating system for the weather.
The counts range from 0 to 9, and should be converted to ratings as follows:
- Convert 0 to 'good'
- Convert 1 through 4 to 'bad'
- Convert 5 through 9 to 'worse'
```
# Count the unique values in 'bad_conditions' and sort the index
weather.bad_conditions.value_counts().sort_index()
# Create a dictionary that maps integers to strings
mapping = {0:'good', 1:'bad', 2:'bad', 3:'bad', 4:'bad', 5:'worse', 6:'worse', 7:'worse', 8:'worse', 9:'worse'}
# Convert the 'bad_conditions' integers to strings using the 'mapping'
weather['rating'] = weather.bad_conditions.map(mapping)
# Count the unique values in 'rating'
weather['rating'].value_counts()
```
### Changing the data type to category
Since the rating column only has a few possible values, you'll change its data type to category in order to store the data more efficiently. You'll also specify a logical order for the categories, which will be useful for future exercises.
```
import warnings
warnings.filterwarnings('ignore')
# Create a list of weather ratings in logical order
cats = ['good', 'bad', 'worse']
# Change the data type of 'rating' to category
weather['rating'] = weather.rating.astype('category', ordered = True, categories = cats)
# Examine the head of 'rating'
weather.rating.head()
```
## Merging datasets
### Preparing the DataFrames
In this exercise, you'll prepare the traffic stop and weather rating DataFrames so that they're ready to be merged:
With the ri DataFrame, you'll move the stop_datetime index to a column since the index will be lost during the merge.
With the weather DataFrame, you'll select the DATE and rating columns and put them in a new DataFrame.
```
# Reset the index of 'ri'
ri.reset_index(inplace = True)
# Examine the head of 'ri'
ri.head()
# Create a DataFrame from the 'DATE' and 'rating' columns
weather_rating = weather[['DATE', 'rating']]
# Examine the head of 'weather_rating'
weather_rating.head()
```
### Merging the DataFrames
In this exercise, you'll merge the ri and weather_rating DataFrames into a new DataFrame, ri_weather.
The DataFrames will be joined using the stop_date column from ri and the DATE column from weather_rating. Thankfully the date formatting matches exactly, which is not always the case!
Once the merge is complete, you'll set stop_datetime as the index, which is the column you saved in the previous exercise.
```
# Examine the shape of 'ri'
ri.shape
# Merge 'ri' and 'weather_rating' using a left join
ri_weather = pd.merge(left=ri, right=weather_rating, left_on='stop_date', right_on='DATE', how='inner')
# Examine the shape of 'ri_weather'
ri_weather.shape
# Set 'stop_datetime' as the index of 'ri_weather'
ri_weather.set_index('stop_datetime', inplace=True)
```
## Does weather affect the arrest rate?
### Comparing arrest rates by weather rating
Do police officers arrest drivers more often when the weather is bad? Find out below!
- First, you'll calculate the overall arrest rate.
- Then, you'll calculate the arrest rate for each of the weather ratings you previously assigned.
- Finally, you'll add violation type as a second factor in the analysis, to see if that accounts for any differences in the arrest rate.
Since you previously defined a logical order for the weather categories, good < bad < worse, they will be sorted that way in the results.
```
# Calculate the overall arrest rate
ri_weather.is_arrested.mean()
# Calculate the arrest rate for each 'rating'
ri_weather.groupby('rating').is_arrested.mean()
# Calculate the arrest rate for each 'violation' and 'rating'
ri_weather.groupby(['violation', 'rating']).is_arrested.mean()
```
#### Wow! The arrest rate increases as the weather gets worse, and that trend persists across many of the violation types. This doesn't prove a causal link, but it's quite an interesting result!
### Selecting from a multi-indexed Series
The output of a single .groupby() operation on multiple columns is a Series with a MultiIndex. Working with this type of object is similar to working with a DataFrame:
- The outer index level is like the DataFrame rows.
- The inner index level is like the DataFrame columns.
In this exercise, you'll practice accessing data from a multi-indexed Series using the .loc[] accessor.
```
# Save the output of the groupby operation from the last exercise
arrest_rate = ri_weather.groupby(['violation', 'rating']).is_arrested.mean()
# Print the 'arrest_rate' Series
arrest_rate
# Print the arrest rate for moving violations in bad weather
arrest_rate.loc['Moving violation', 'bad']
# Print the arrest rates for speeding violations in all three weather conditions
arrest_rate.loc['Speeding']
```
### Reshaping the arrest rate data
In this exercise, you'll start by reshaping the arrest_rate Series into a DataFrame. This is a useful step when working with any multi-indexed Series, since it enables you to access the full range of DataFrame methods.
Then, you'll create the exact same DataFrame using a pivot table. This is a great example of how pandas often gives you more than one way to reach the same result!
```
# Unstack the 'arrest_rate' Series into a DataFrame
arrest_rate.unstack()
# Create the same DataFrame using a pivot table
ri_weather.pivot_table(index='violation', columns='rating', values='is_arrested')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# Read 'police.csv' into a DataFrame named ri
ri = pd.read_csv('police.csv')
# Examine the head of the DataFrame
ri.head()
# Count the number of missing values in each column
ri.isnull().sum()
# Examine the shape of the DataFrame
ri.shape
# Drop the 'county_name' and 'state' columns
ri.drop(['county_name', 'state'], axis='columns', inplace=True)
# Examine the shape of the DataFrame (again)
ri.shape
# Drop all rows that are missing 'driver_gender'
ri.dropna(subset=['driver_gender'], inplace=True)
# Count the number of missing values in each column (again)
ri.isnull().sum()
# Examine the shape of the DataFrame
ri.shape
ri.dtypes
# Examine the head of the 'is_arrested' column
ri.is_arrested.head()
# Check the data type of 'is_arrested'
ri.is_arrested.dtype
# Change the data type of 'is_arrested' to 'bool'
ri['is_arrested'] = ri.is_arrested.astype('bool')
# Check the data type of 'is_arrested' (again)
ri.is_arrested.dtype
# Concatenate 'stop_date' and 'stop_time' (separated by a space)
combined = ri.stop_date.str.cat(ri.stop_time, sep = ' ')
# Convert 'combined' to datetime format
ri['stop_datetime'] = pd.to_datetime(combined)
# Examine the data types of the DataFrame
ri.dtypes
# Set 'stop_datetime' as the index
ri.set_index('stop_datetime', inplace=True)
# Examine the index
ri.index
# Examine the columns
ri.columns
# Count the unique values in 'violation'
ri.violation.value_counts()
# Express the counts as proportions
ri.violation.value_counts(normalize = True)
# Create a DataFrame of female drivers
female = ri[ri['driver_gender'] == 'F']
# Create a DataFrame of male drivers
male = ri[ri['driver_gender'] == 'M']
# Compute the violations by female drivers (as proportions)
female.violation.value_counts(normalize = True)
# Compute the violations by male drivers (as proportions)
male.violation.value_counts(normalize = True)
ri.head()
# Create a DataFrame of female drivers stopped for speeding
female_and_speeding = ri[(ri['driver_gender'] == 'F') & (ri['violation'] == 'Speeding')]
# Create a DataFrame of male drivers stopped for speeding
male_and_speeding = ri[(ri['driver_gender'] == 'M') & (ri['violation'] == 'Speeding')]
# Compute the stop outcomes for female drivers (as proportions)
female_and_speeding.stop_outcome.value_counts(normalize = True)
# Compute the stop outcomes for male drivers (as proportions)
male_and_speeding.stop_outcome.value_counts(normalize = True)
# Check the data type of 'search_conducted'
ri.search_conducted.dtype
# Calculate the search rate by counting the values
ri.search_conducted.value_counts(normalize = True)
# Calculate the search rate by taking the mean of the Series. (It should match the proportion of True values calculated above.)
ri.search_conducted.mean()
# Calculate the search rate for female drivers
ri[ri['driver_gender'] == 'F'].search_conducted.mean()
# Calculate the search rate for male drivers
ri[ri['driver_gender'] == 'M'].search_conducted.mean()
# Calculate the search rate for both groups simultaneously
ri.groupby('driver_gender').search_conducted.mean()
# Calculate the search rate for each combination of gender and violation
print(ri.groupby(['driver_gender', 'violation']).search_conducted.mean())
# Reverse the ordering to group by violation before gender
print(ri.groupby(['violation', 'driver_gender']).search_conducted.mean())
# Count the 'search_type' values
ri.search_type.value_counts()
# Check if 'search_type' contains the string 'Protective Frisk'
ri['frisk'] = ri.search_type.str.contains('Protective Frisk', na=False)
# Check the data type of 'frisk'
ri.frisk.dtypes
# Take the sum of 'frisk'
ri.frisk.sum()
# Create a DataFrame of stops in which a search was conducted
searched = ri[ri['search_conducted'] == True]
# Calculate the overall frisk rate by taking the mean of 'frisk'
searched.frisk.mean()
# Calculate the frisk rate for each gender
searched.groupby('driver_gender').frisk.mean()
ri.index
ri.index.hour
# Calculate the overall arrest rate
ri.is_arrested.mean()
# Calculate the hourly arrest rate
ri.groupby(ri.index.hour).is_arrested.mean()
# Save the hourly arrest rate
hourly_arrest_rate = ri.groupby(ri.index.hour).is_arrested.mean()
# Create a line plot of 'hourly_arrest_rate'
hourly_arrest_rate.plot()
# Add the xlabel, ylabel, and title
plt.xlabel('Hour')
plt.ylabel('Arrest Rate')
plt.title('Arrest Rate by Time of Day')
# Calculate the annual rate of drug-related stops
ri.drugs_related_stop.resample('A').mean()
# Save the annual rate of drug-related stops
annual_drug_rate = ri.drugs_related_stop.resample('A').mean()
# Create a line plot of 'annual_drug_rate'
annual_drug_rate.plot()
# Calculate and save the annual search rate
annual_search_rate = ri.search_conducted.resample('A').mean()
# Concatenate 'annual_drug_rate' and 'annual_search_rate'
annual = pd.concat([annual_drug_rate, annual_search_rate], axis='columns')
# Create subplots from 'annual'
annual.plot(subplots = True)
# Create a frequency table of districts and violations
pd.crosstab(ri['district'], ri['violation'])
# Save the frequency table as 'all_zones'
all_zones = pd.crosstab(ri['district'], ri['violation'])
# Select rows 'Zone K1' through 'Zone K3'
all_zones.loc['Zone K1':'Zone K3']
# Save the smaller table as 'k_zones'
k_zones = all_zones.loc['Zone K1':'Zone K3']
# Create a bar plot of 'k_zones'
k_zones.plot(kind = 'bar', figsize = (8, 6))
# Create a stacked bar plot of 'k_zones'
k_zones.plot(kind = 'bar', stacked = True, figsize = (8, 6))
# Print the unique values in 'stop_duration'
ri.stop_duration.unique()
# Create a dictionary that maps strings to integers
mapping = {'0-15 Min':8, '16-30 Min':23, '30+ Min':45}
# Convert the 'stop_duration' strings to integers using the 'mapping'
ri['stop_minutes'] = ri.stop_duration.map(mapping)
# Print the unique values in 'stop_minutes'
ri['stop_minutes'].unique()
# Calculate the mean 'stop_minutes' for each value in 'violation_raw'
ri.groupby('violation_raw')['stop_minutes'].mean()
# Save the resulting Series as 'stop_length'
stop_length = ri.groupby('violation_raw')['stop_minutes'].mean()
# Sort 'stop_length' by its values and create a horizontal bar plot
stop_length.sort_values().plot(kind = 'barh', figsize = (8, 6))
weather = pd.read_csv('weather.csv')
weather.head()
# Select the temperature columns (TMIN, TAVG, TMAX) and print their summary statistics using the .describe() method
weather[['TMIN', 'TAVG', 'TMAX']].describe()
# Create a box plot of the temperature columns
weather[['TMIN', 'TAVG', 'TMAX']].plot(kind = 'box')
# Create a 'TDIFF' column that represents temperature difference
weather['TDIFF'] = weather['TMAX'] - weather['TMIN']
# Describe the 'TDIFF' column
print(weather['TDIFF'].describe())
# Create a histogram with 20 bins to visualize 'TDIFF'
weather['TDIFF'].plot(kind = 'hist', bins = 20, ec = 'white')
# Copy 'WT01' through 'WT22' to a new DataFrame
WT = weather.loc[:, 'WT01':'WT22']
# Calculate the sum of each row in 'WT'
weather['bad_conditions'] = WT.sum(axis = 1)
# Replace missing values in 'bad_conditions' with '0'
weather['bad_conditions'] = weather.bad_conditions.fillna(0).astype('int')
# Create a histogram to visualize 'bad_conditions'
weather.bad_conditions.plot(kind = 'hist')
# Count the unique values in 'bad_conditions' and sort the index
weather.bad_conditions.value_counts().sort_index()
# Create a dictionary that maps integers to strings
mapping = {0:'good', 1:'bad', 2:'bad', 3:'bad', 4:'bad', 5:'worse', 6:'worse', 7:'worse', 8:'worse', 9:'worse'}
# Convert the 'bad_conditions' integers to strings using the 'mapping'
weather['rating'] = weather.bad_conditions.map(mapping)
# Count the unique values in 'rating'
weather['rating'].value_counts()
import warnings
warnings.filterwarnings('ignore')
# Create a list of weather ratings in logical order
cats = ['good', 'bad', 'worse']
# Change the data type of 'rating' to category
weather['rating'] = weather.rating.astype('category', ordered = True, categories = cats)
# Examine the head of 'rating'
weather.rating.head()
# Reset the index of 'ri'
ri.reset_index(inplace = True)
# Examine the head of 'ri'
ri.head()
# Create a DataFrame from the 'DATE' and 'rating' columns
weather_rating = weather[['DATE', 'rating']]
# Examine the head of 'weather_rating'
weather_rating.head()
# Examine the shape of 'ri'
ri.shape
# Merge 'ri' and 'weather_rating' using a left join
ri_weather = pd.merge(left=ri, right=weather_rating, left_on='stop_date', right_on='DATE', how='inner')
# Examine the shape of 'ri_weather'
ri_weather.shape
# Set 'stop_datetime' as the index of 'ri_weather'
ri_weather.set_index('stop_datetime', inplace=True)
# Calculate the overall arrest rate
ri_weather.is_arrested.mean()
# Calculate the arrest rate for each 'rating'
ri_weather.groupby('rating').is_arrested.mean()
# Calculate the arrest rate for each 'violation' and 'rating'
ri_weather.groupby(['violation', 'rating']).is_arrested.mean()
# Save the output of the groupby operation from the last exercise
arrest_rate = ri_weather.groupby(['violation', 'rating']).is_arrested.mean()
# Print the 'arrest_rate' Series
arrest_rate
# Print the arrest rate for moving violations in bad weather
arrest_rate.loc['Moving violation', 'bad']
# Print the arrest rates for speeding violations in all three weather conditions
arrest_rate.loc['Speeding']
# Unstack the 'arrest_rate' Series into a DataFrame
arrest_rate.unstack()
# Create the same DataFrame using a pivot table
ri_weather.pivot_table(index='violation', columns='rating', values='is_arrested')
| 0.724481 | 0.992885 |
```
import torch
import torch.distributions as d
import torch.nn.functional as F
from retail import retail
import numpy as np
```
In this experiment, we define our CVaR at the 5% level: knowing that we're in the 5% worst cases, how much do we expect to waste per item on average? To do so, we weight a linear utility that only caters about waste, not sales or availability.
```
n_customers = 2500
n_buckets = 4
monte_carlo_size = 100
store_args= {'assortment_size': 1000, 'bucket_cov': torch.eye(n_buckets)/100, 'seed' : 1066,
'max_stock': 1000, 'forecastVariance' :0., 'horizon': 100, 'lead_time': 1}
bucketDist = d.uniform.Uniform(0,1)
store_args = {
'assortment_size': 1000,
'max_stock': 1000,
'bucket_cov': torch.eye(n_buckets)/100,
'seed' : 1066,
'utility_function': 'linear',
# We give a null weight to availability and sales
'utility_weights': {
'alpha': 0.,
'beta': 1.,
'gamma':0. },
'forecastVariance' :0., 'horizon': 100, 'lead_time': 1
}
# We define our quantile for the CVAR
cvar_level = 0.05
```
We simply define a computation loop where we operate on our environment and store waste results. We compute the CVaR over trajectories of length 100, over 100 stores.
```
#Create the list of the average daily reward for each customer distribution for the chosen policy
summed_rewards_policy = []
for i in range(monte_carlo_size):
sub_rewards = []
done = False
#Generate the store and its customer repartition throughout the day
torch.manual_seed(i)
sampled = bucketDist.sample((n_buckets,))
sample_bucket_customers = (n_customers*sampled/sampled.sum()).round()
store = retail.StoreEnv(**store_args, bucket_customers = sample_bucket_customers)
while not (done):
#Compute the order according to the policy
customers = sample_bucket_customers.max()
p = store.forecast.squeeze()
std = torch.sqrt(customers*p+(1-p))
order = F.relu(3*std+store.forecast.squeeze()*customers-store.get_full_inventory_position()).round()
# Step the environment and get its observation
obs = store.step(order.numpy())
# Store reward for the specific time step
sub_rewards.append(obs[1])
done = obs[2]
#Append average reward of this customer repartition to the list of rewards
summed_rewards_policy.append(torch.stack(sub_rewards).mean())
```
Having stored the results, we simply need to compute the estimator of the 5% quantile (VaR), and the expectation below it (our CVaR)
```
rewards = torch.stack(summed_rewards_policy)
# We first obtain the Value-at-risk
var = np.quantile(rewards, cvar_level)
# We retrieve elements below the var
bad_cases = rewards[rewards<var]
# Finally, we compute the CVAR:
bad_cases.mean()
```
Thus, we can expect to waste 48 cents of monetary unit (aka โฌ here) per item in the assortment per day, over our defined item distribution for 100-days long trajectories.
|
github_jupyter
|
import torch
import torch.distributions as d
import torch.nn.functional as F
from retail import retail
import numpy as np
n_customers = 2500
n_buckets = 4
monte_carlo_size = 100
store_args= {'assortment_size': 1000, 'bucket_cov': torch.eye(n_buckets)/100, 'seed' : 1066,
'max_stock': 1000, 'forecastVariance' :0., 'horizon': 100, 'lead_time': 1}
bucketDist = d.uniform.Uniform(0,1)
store_args = {
'assortment_size': 1000,
'max_stock': 1000,
'bucket_cov': torch.eye(n_buckets)/100,
'seed' : 1066,
'utility_function': 'linear',
# We give a null weight to availability and sales
'utility_weights': {
'alpha': 0.,
'beta': 1.,
'gamma':0. },
'forecastVariance' :0., 'horizon': 100, 'lead_time': 1
}
# We define our quantile for the CVAR
cvar_level = 0.05
#Create the list of the average daily reward for each customer distribution for the chosen policy
summed_rewards_policy = []
for i in range(monte_carlo_size):
sub_rewards = []
done = False
#Generate the store and its customer repartition throughout the day
torch.manual_seed(i)
sampled = bucketDist.sample((n_buckets,))
sample_bucket_customers = (n_customers*sampled/sampled.sum()).round()
store = retail.StoreEnv(**store_args, bucket_customers = sample_bucket_customers)
while not (done):
#Compute the order according to the policy
customers = sample_bucket_customers.max()
p = store.forecast.squeeze()
std = torch.sqrt(customers*p+(1-p))
order = F.relu(3*std+store.forecast.squeeze()*customers-store.get_full_inventory_position()).round()
# Step the environment and get its observation
obs = store.step(order.numpy())
# Store reward for the specific time step
sub_rewards.append(obs[1])
done = obs[2]
#Append average reward of this customer repartition to the list of rewards
summed_rewards_policy.append(torch.stack(sub_rewards).mean())
rewards = torch.stack(summed_rewards_policy)
# We first obtain the Value-at-risk
var = np.quantile(rewards, cvar_level)
# We retrieve elements below the var
bad_cases = rewards[rewards<var]
# Finally, we compute the CVAR:
bad_cases.mean()
| 0.658747 | 0.902952 |
```
import bz2
import datetime
import itertools
import lzma as xz
import logging
import json
import os
import re
import requests
from collections import Counter
from dateutil.relativedelta import relativedelta
from functools import partial
from multiprocessing import Pool
from multiprocessing.dummy import Pool as dPool
from tqdm import tqdm_notebook as tqdm
os.environ['REDDIT_DATA'] = "/media/brian/ColdStore/Datasets/nlp/reddit"
_REDDIT_COMMENT_BASE_URL = "https://files.pushshift.io/reddit/comments/"
_BZ2_FILENAME_TEMPLATE = "RC_%Y-%m.bz2"
_XZ_FILENAME_TEMPLATE = "RC_%Y-%m.xz"
_DATA_START_DATE = datetime.date(2005, 12, 1)
_XZ_START_DATE = datetime.date(2017, 12, 1)
DEFAULT_REDDIT_DATA = os.environ.get('REDDIT_DATA') or os.path.expanduser("~/reddit")
DEFAULT_REDDIT_COMMENTS_DATA = os.path.join(DEFAULT_REDDIT_DATA, "comments")
def populate_reddit_comments_json(dest=DEFAULT_REDDIT_COMMENTS_DATA):
curr_date = _DATA_START_DATE
end_date = datetime.date.today() + relativedelta(months=-1)
dates = []
while curr_date <= end_date:
dates.append(curr_date)
curr_date += relativedelta(months=1)
download_fn = partial(_download_reddit_comments_json, dest=dest)
# Using too many processes causes "ERROR 429: Too Many Requests."
list(multiproc_imap(download_fn,
dates,
processes=4,
thread_only=True,
total=len(dates)))
def download_reddit_comments_json(year, month, dest=DEFAULT_REDDIT_COMMENTS_DATA):
url = get_reddit_comments_url(year, month)
if not url:
logging.warning(datetime.date(year, month, 1).strftime("No data exists for %Y-%m."))
return False
return download(url, dest=dest)
def load_reddit_comments_json(year, month, root=DEFAULT_REDDIT_COMMENTS_DATA):
path = get_reddit_comments_local(year, month)
if not path:
logging.warning(datetime.date(year, month, 1).strftime("No data exists for %Y-%m."))
return None
assert path.endswith('.bz2') or path.endswith('.xz'), (
"Failed to load {}.Only bz2 and xz are supported.".format(path))
reader = bz2.BZ2File if path.endswith('.bz2') else xz.LZMAFile
with reader(path, 'r') as fh:
for line in fh:
yield json.loads(line.decode())
def _download_reddit_comments_json(date, dest=DEFAULT_REDDIT_COMMENTS_DATA):
return download_reddit_comments_json(date.year, date.month, dest=dest)
def get_reddit_comments_url(year, month):
target_date = datetime.date(year, month, 1)
url = _get_reddit_comments_path(target_date, _REDDIT_COMMENT_BASE_URL)
return url
def get_reddit_comments_local(year, month, root=DEFAULT_REDDIT_COMMENTS_DATA):
target_date = datetime.date(year, month, 1)
path = _get_reddit_comments_path(target_date, root=root)
return path
def download(url, dest='/tmp/'):
filename = os.path.basename(url)
if dest[-1] == '/' or os.path.isdir(dest):
if not os.path.isdir(dest):
os.makedirs(dest)
dest = os.path.join(dest, filename)
if os.path.isfile(dest):
logging.info("{} already exist in {}.".format(url, dest))
else:
logging.info("Downloading {} to {}...".format(url, dest))
resp = requests.get(url, stream=True)
if not resp.ok:
logging.warning("{}: {}".format(resp.reason, url))
return False
total_size = int(resp.headers.get('content-length', 0));
block_size = 2**20
with open(dest, 'wb') as fh:
for data in tqdm(resp.iter_content(block_size),
unit="MB",
total=total_size//block_size):
fh.write(data)
return True
def multiproc_imap(func,
iterable,
processes=None,
thread_only=False,
total=None,
chunksize=1):
pool_fn = dPool if thread_only else Pool
pool = pool_fn(processes=processes)
return tqdm(pool.imap(func, iterable, chunksize=chunksize), total=total)
def _get_reddit_comments_path(date, root):
if not _validate_reddit_comments_date(date):
return None
filename = _get_reddit_comments_filename(date)
path = os.path.join(root, filename)
return path
def _get_reddit_comments_filename(date):
if date < _XZ_START_DATE:
return date.strftime(_BZ2_FILENAME_TEMPLATE)
else:
return date.strftime(_XZ_FILENAME_TEMPLATE)
def _validate_reddit_comments_date(date):
start_date = _DATA_START_DATE
end_date = datetime.date.today() + relativedelta(months=-1)
if (date > end_date or date < start_date):
logging.warning("date must be between {} and {}: given {}".format(
start_date.strftime("%Y-%m"),
end_date.strftime("%Y-%m"),
date.strftime("%Y-%m")))
return False
return True
```
# Download Reddit Comments
There's about 450GB of data from 2005-12 to 2018-09, so make sure you have enough disk space.
```
populate_reddit_comments_json()
```
# Generate N-grams
```
DEFAULT_TOKEN_MAX_CHARS = 25
def extract_reddit_comments_upto_ngram_strs(year, month, n):
"""Extract 1- to n-gram simultaneously because file load is the bottleneck."""
jsons = load_reddit_comments_json(year, month)
texts = map(lambda d: d['body'], jsons)
for text in texts:
upto_ngrams = []
for m in range(n):
mgrams = extract_filtered_ngram_strs(text, m)
upto_ngrams.append(mgrams)
yield upto_ngrams
def extract_reddit_comments_ngram_strs(year, month, n):
jsons = load_reddit_comments_json(year, month)
texts = map(lambda d: d['body'], jsons)
ngram_strs = map(lambda s: extract_filtered_ngram_strs(s, n), texts)
return ngram_strs
def extract_filtered_ngram_strs(text, n, tok_max_chars=DEFAULT_TOKEN_MAX_CHARS):
text_cleaned = re.sub('\s+', ' ', text)
token_match_str = "[\^ ][^ ]{1,%d}" % tok_max_chars
ngram_match_str = "(?=(%s))" % (token_match_str * n)
return re.findall(ngram_match_str, text_cleaned)
def extract_filtered_ngram_strs_slow(text, n, tok_max_chars=DEFAULT_TOKEN_MAX_CHARS):
ngrams = extract_ngrams(text, n)
filtered_ngrams = filter(
lambda ngram: not has_long_token(ngram, tok_max_chars=tok_max_chars), ngrams)
filtered_ngram_strs = map(
lambda ngram: ' '.join(ngram), filtered_ngrams)
return filtered_ngram_strs
def extract_ngrams(text, n):
tokens = text.split()
return zip(*[tokens[i:] for i in range(n)])
def has_long_token(tokens, tok_max_chars=DEFAULT_TOKEN_MAX_CHARS):
for tok in tokens:
if len(tok) > tok_max_chars:
return True
return False
```
**Benchmark n-gram extraction with regex vs tokenization**
```
test_string = "asdf " * 1000
%%timeit
list(extract_filtered_ngram_strs(test_string, 5))
%%timeit
list(extract_filtered_ngram_strs_slow(test_string, 5))
```
**Benchmark file loading vs file loading + n-gram extraction**
```
%%timeit -r 1 -n 1
_ = list(load_reddit_comments_json(2006, 12))
%%timeit -r 1 -n 1
_ = list(extract_reddit_comments_ngram_strs(2006, 12, 3))
```
|
github_jupyter
|
import bz2
import datetime
import itertools
import lzma as xz
import logging
import json
import os
import re
import requests
from collections import Counter
from dateutil.relativedelta import relativedelta
from functools import partial
from multiprocessing import Pool
from multiprocessing.dummy import Pool as dPool
from tqdm import tqdm_notebook as tqdm
os.environ['REDDIT_DATA'] = "/media/brian/ColdStore/Datasets/nlp/reddit"
_REDDIT_COMMENT_BASE_URL = "https://files.pushshift.io/reddit/comments/"
_BZ2_FILENAME_TEMPLATE = "RC_%Y-%m.bz2"
_XZ_FILENAME_TEMPLATE = "RC_%Y-%m.xz"
_DATA_START_DATE = datetime.date(2005, 12, 1)
_XZ_START_DATE = datetime.date(2017, 12, 1)
DEFAULT_REDDIT_DATA = os.environ.get('REDDIT_DATA') or os.path.expanduser("~/reddit")
DEFAULT_REDDIT_COMMENTS_DATA = os.path.join(DEFAULT_REDDIT_DATA, "comments")
def populate_reddit_comments_json(dest=DEFAULT_REDDIT_COMMENTS_DATA):
curr_date = _DATA_START_DATE
end_date = datetime.date.today() + relativedelta(months=-1)
dates = []
while curr_date <= end_date:
dates.append(curr_date)
curr_date += relativedelta(months=1)
download_fn = partial(_download_reddit_comments_json, dest=dest)
# Using too many processes causes "ERROR 429: Too Many Requests."
list(multiproc_imap(download_fn,
dates,
processes=4,
thread_only=True,
total=len(dates)))
def download_reddit_comments_json(year, month, dest=DEFAULT_REDDIT_COMMENTS_DATA):
url = get_reddit_comments_url(year, month)
if not url:
logging.warning(datetime.date(year, month, 1).strftime("No data exists for %Y-%m."))
return False
return download(url, dest=dest)
def load_reddit_comments_json(year, month, root=DEFAULT_REDDIT_COMMENTS_DATA):
path = get_reddit_comments_local(year, month)
if not path:
logging.warning(datetime.date(year, month, 1).strftime("No data exists for %Y-%m."))
return None
assert path.endswith('.bz2') or path.endswith('.xz'), (
"Failed to load {}.Only bz2 and xz are supported.".format(path))
reader = bz2.BZ2File if path.endswith('.bz2') else xz.LZMAFile
with reader(path, 'r') as fh:
for line in fh:
yield json.loads(line.decode())
def _download_reddit_comments_json(date, dest=DEFAULT_REDDIT_COMMENTS_DATA):
return download_reddit_comments_json(date.year, date.month, dest=dest)
def get_reddit_comments_url(year, month):
target_date = datetime.date(year, month, 1)
url = _get_reddit_comments_path(target_date, _REDDIT_COMMENT_BASE_URL)
return url
def get_reddit_comments_local(year, month, root=DEFAULT_REDDIT_COMMENTS_DATA):
target_date = datetime.date(year, month, 1)
path = _get_reddit_comments_path(target_date, root=root)
return path
def download(url, dest='/tmp/'):
filename = os.path.basename(url)
if dest[-1] == '/' or os.path.isdir(dest):
if not os.path.isdir(dest):
os.makedirs(dest)
dest = os.path.join(dest, filename)
if os.path.isfile(dest):
logging.info("{} already exist in {}.".format(url, dest))
else:
logging.info("Downloading {} to {}...".format(url, dest))
resp = requests.get(url, stream=True)
if not resp.ok:
logging.warning("{}: {}".format(resp.reason, url))
return False
total_size = int(resp.headers.get('content-length', 0));
block_size = 2**20
with open(dest, 'wb') as fh:
for data in tqdm(resp.iter_content(block_size),
unit="MB",
total=total_size//block_size):
fh.write(data)
return True
def multiproc_imap(func,
iterable,
processes=None,
thread_only=False,
total=None,
chunksize=1):
pool_fn = dPool if thread_only else Pool
pool = pool_fn(processes=processes)
return tqdm(pool.imap(func, iterable, chunksize=chunksize), total=total)
def _get_reddit_comments_path(date, root):
if not _validate_reddit_comments_date(date):
return None
filename = _get_reddit_comments_filename(date)
path = os.path.join(root, filename)
return path
def _get_reddit_comments_filename(date):
if date < _XZ_START_DATE:
return date.strftime(_BZ2_FILENAME_TEMPLATE)
else:
return date.strftime(_XZ_FILENAME_TEMPLATE)
def _validate_reddit_comments_date(date):
start_date = _DATA_START_DATE
end_date = datetime.date.today() + relativedelta(months=-1)
if (date > end_date or date < start_date):
logging.warning("date must be between {} and {}: given {}".format(
start_date.strftime("%Y-%m"),
end_date.strftime("%Y-%m"),
date.strftime("%Y-%m")))
return False
return True
populate_reddit_comments_json()
DEFAULT_TOKEN_MAX_CHARS = 25
def extract_reddit_comments_upto_ngram_strs(year, month, n):
"""Extract 1- to n-gram simultaneously because file load is the bottleneck."""
jsons = load_reddit_comments_json(year, month)
texts = map(lambda d: d['body'], jsons)
for text in texts:
upto_ngrams = []
for m in range(n):
mgrams = extract_filtered_ngram_strs(text, m)
upto_ngrams.append(mgrams)
yield upto_ngrams
def extract_reddit_comments_ngram_strs(year, month, n):
jsons = load_reddit_comments_json(year, month)
texts = map(lambda d: d['body'], jsons)
ngram_strs = map(lambda s: extract_filtered_ngram_strs(s, n), texts)
return ngram_strs
def extract_filtered_ngram_strs(text, n, tok_max_chars=DEFAULT_TOKEN_MAX_CHARS):
text_cleaned = re.sub('\s+', ' ', text)
token_match_str = "[\^ ][^ ]{1,%d}" % tok_max_chars
ngram_match_str = "(?=(%s))" % (token_match_str * n)
return re.findall(ngram_match_str, text_cleaned)
def extract_filtered_ngram_strs_slow(text, n, tok_max_chars=DEFAULT_TOKEN_MAX_CHARS):
ngrams = extract_ngrams(text, n)
filtered_ngrams = filter(
lambda ngram: not has_long_token(ngram, tok_max_chars=tok_max_chars), ngrams)
filtered_ngram_strs = map(
lambda ngram: ' '.join(ngram), filtered_ngrams)
return filtered_ngram_strs
def extract_ngrams(text, n):
tokens = text.split()
return zip(*[tokens[i:] for i in range(n)])
def has_long_token(tokens, tok_max_chars=DEFAULT_TOKEN_MAX_CHARS):
for tok in tokens:
if len(tok) > tok_max_chars:
return True
return False
test_string = "asdf " * 1000
%%timeit
list(extract_filtered_ngram_strs(test_string, 5))
%%timeit
list(extract_filtered_ngram_strs_slow(test_string, 5))
%%timeit -r 1 -n 1
_ = list(load_reddit_comments_json(2006, 12))
%%timeit -r 1 -n 1
_ = list(extract_reddit_comments_ngram_strs(2006, 12, 3))
| 0.399577 | 0.176743 |
# kaggle_quora: single model of yuhaitao
ๆฏ่ตbaseline
ๅ่:
https://www.kaggle.com/shujian/single-rnn-with-4-folds-clr
https://www.kaggle.com/gmhost/gru-capsule
https://github.com/dennybritz/cnn-text-classification-tf
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
```
# load package
```
import os
import time
import random
import re
from tqdm import tqdm
from IPython.display import display
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import f1_score, roc_auc_score
from collections import Counter
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
```
# global parameters
```
data_dir = "../input/"
train_file = os.path.join(data_dir, "train.csv")
test_file = os.path.join(data_dir, "test.csv")
embedding_size = 300
max_len = 50
max_features = 120000
batch_size = 512
use_local_test = False
```
# Data preprocess
```
# ๅฐ็นๆฎๅญ็ฌฆๅ็ฌๆๅบ
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', 'โข', '~', '@', 'ยฃ',
'ยท', '_', '{', '}', 'ยฉ', '^', 'ยฎ', '`', '<', 'โ', 'ยฐ', 'โฌ', 'โข', 'โบ', 'โฅ', 'โ', 'ร', 'ยง', 'โณ', 'โฒ', 'ร', 'โ', 'ยฝ', 'ร ', 'โฆ',
'โ', 'โ
', 'โ', 'โ', 'โ', 'รข', 'โบ', 'โ', 'ยข', 'ยฒ', 'ยฌ', 'โ', 'ยถ', 'โ', 'ยฑ', 'ยฟ', 'โพ', 'โ', 'ยฆ', 'โ', 'โ', 'ยฅ', 'โ', 'โ', 'โน', 'โ',
'โ', '๏ผ', 'ยผ', 'โ', 'โผ', 'โช', 'โ ', 'โ ', 'โ', 'โ', 'ยจ', 'โ', 'โซ', 'โ', 'รฉ', 'ยฏ', 'โฆ', 'ยค', 'โฒ', 'รจ', 'ยธ', 'ยพ', 'ร', 'โ
', 'โ', 'โ',
'โ', '๏ผ', 'โ', 'ใ', 'โ', '๏ผ', 'ยป', '๏ผ', 'โช', 'โฉ', 'โ', 'ยณ', 'ใป', 'โฆ', 'โฃ', 'โ', 'โ', 'โฌ', 'โค', 'รฏ', 'ร', 'ยน', 'โค', 'โก', 'โ', ]
def clean_text(x):
x = str(x)
for punct in puncts:
if punct in x:
# x = x.replace(punct, f' {punct} ') # ่ฟๆฏpython3.6่ฏญๆณ
x = x.replace(punct, ' '+punct+' ')
return x
# ๆธ
ๆดๆฐๅญ
def clean_numbers(x):
if bool(re.search(r'\d', x)):
x = re.sub('[0-9]{5,}', '#####', x)
x = re.sub('[0-9]{4}', '####', x)
x = re.sub('[0-9]{3}', '###', x)
x = re.sub('[0-9]{2}', '##', x)
return x
# ๆธ
ๆดๆผๅ
mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"that's" : "that is",
"there's" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not",
"tryin'":"trying"}
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace_typical_misspell(text):
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def load_and_prec(use_local_test=True):
train_df = pd.read_csv(train_file)
test_df = pd.read_csv(test_file)
print("Train shape : ",train_df.shape)
print("Test shape : ",test_df.shape)
display(train_df.head())
display(test_df.head())
# ๅฐๅ
train_df["question_text"] = train_df["question_text"].str.lower()
test_df["question_text"] = test_df["question_text"].str.lower()
# ๆฐๅญๆธ
ๆด
train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_numbers(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x))
# ๆธ
ๆดๆผๅ
train_df["question_text"] = train_df["question_text"].apply(lambda x: replace_typical_misspell(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x))
# ๆฐๆฎๆธ
ๆด
train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_text(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x))
## fill up the missing values
train_X = train_df["question_text"].fillna("_##_").values
test_X = test_df["question_text"].fillna("_##_").values
## Tokenize the sentences
# ่ฟไธชๆนๆณๆๆๆๅญๆฏ้ฝๅฐๅไบ
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Get the target values
train_Y = train_df['target'].values
print(np.sum(train_Y))
# # ๅจpadไนๅๆๅ30ไธช่ฏๅปๆ
# train_cut = []
# test_cut = []
# for x in train_X:
# train_cut.append([i for i in x if i>30])
# for x in test_X:
# test_cut.append([i for i in x if i>30])
# train_X = train_cut
# test_X = test_cut
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=max_len, padding="post", truncating="post")
test_X = pad_sequences(test_X, maxlen=max_len, padding="post", truncating="post")
# # # ๆๆๅธธ็จ็40ไธช่ฏๅปๆ๏ผpadไธบ0
# # train_X = np.where(train_X>=40, train_X, 0)
# # test_X = np.where(test_X>=40, test_X, 0)
#shuffling the data
np.random.seed(20190101)
trn_idx = np.random.permutation(len(train_X))
train_X = train_X[trn_idx]
train_Y = train_Y[trn_idx]
# ไฝฟ็จๆฌๅฐๆต่ฏ้
if use_local_test:
train_X, local_test_X = (train_X[:-4*len(test_X)], train_X[-4*len(test_X):])
train_Y, local_test_Y = (train_Y[:-4*len(test_X)], train_Y[-4*len(test_X):])
else:
local_test_X = np.zeros(shape=[1,max_len], dtype=np.int32)
local_test_Y = np.zeros(shape=[1], dtype=np.int32)
print(train_X.shape)
print(local_test_X.shape)
print(test_X.shape)
print(len(tokenizer.word_index))
return train_X, test_X, train_Y, local_test_X, local_test_Y, tokenizer.word_index
# load_and_prec()
```
# load embeddings
```
def load_glove(word_index):
EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
# word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_fasttext(word_index):
"""
่ฟไธชๅ ่ฝฝ่ฏๅ้่ฟๆฒกๆ็ป็
"""
EMBEDDING_FILE = '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'
def get_coefs(word,*arr):
return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE) if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
# word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_para(word_index):
EMBEDDING_FILE = '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore') if len(o)>100 and o.split(" ")[0] in word_index)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
embedding_matrix = np.random.normal(emb_mean, emb_std, (max_features, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
```
# Models
text_rnn(Bi-GRU)
```
# dense layer
def dense(inputs, hidden, use_bias=True,
w_initializer=tf.contrib.layers.xavier_initializer(), b_initializer=tf.constant_initializer(0.1), scope="dense"):
"""
ๅ
จ่ฟๆฅๅฑ
"""
with tf.variable_scope(scope):
shape = tf.shape(inputs)
dim = inputs.get_shape().as_list()[-1]
out_shape = [shape[idx] for idx in range(
len(inputs.get_shape().as_list()) - 1)] + [hidden]
# ๅฆๆๆฏไธ็ปด็inputs๏ผreshapeๆไบ็ปด
flat_inputs = tf.reshape(inputs, [-1, dim])
W = tf.get_variable("W", [dim, hidden], initializer=w_initializer)
res = tf.matmul(flat_inputs, W)
if use_bias:
b = tf.get_variable("b", [hidden], initializer=b_initializer)
res = tf.nn.bias_add(res, b)
# outshapeๅฐฑๆฏinput็ๆๅไธ็ปดๅๆhidden
res = tf.reshape(res, out_shape)
return res
# dot-product attention
def dot_attention(inputs, memory, mask, hidden, keep_prob, scope="dot_attention"):
"""
้จๆงattentionๅฑ
"""
def softmax_mask(val, mask):
return -1e30 * (1 - tf.cast(mask, tf.float32)) + val
with tf.variable_scope(scope):
JX = tf.shape(inputs)[1] # inputs็1็ปดๅบฆ๏ผๅบ่ฏฅๆฏc_maxlen
with tf.variable_scope("attention"):
# inputs_็shape:[batch_size, c_maxlen, hidden]
inputs_ = tf.nn.relu(
dense(inputs, hidden, use_bias=False, scope="inputs"))
memory_ = tf.nn.relu(
dense(memory, hidden, use_bias=False, scope="memory"))
# ไธ็ปด็ฉ้ต็ธไน๏ผ็ปๆ็shapeๆฏ[batch_size, c_maxlen, q_maxlen]
outputs = tf.matmul(inputs_, tf.transpose(
memory_, [0, 2, 1])) / (hidden ** 0.5)
# ๅฐmaskๅนณ้บๆไธoutputs็ธๅ็ๅฝข็ถ๏ผ่ฟ้่่๏ผๆน่ฟๆinputๅmemory้ฝ้่ฆmask
mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1])
logits = tf.nn.softmax(softmax_mask(outputs, mask))
outputs = tf.matmul(logits, memory)
# res:[batch_size, c_maxlen, 12*hidden]
res = tf.concat([inputs, outputs], axis=2)
return res
# with tf.variable_scope("gate"):
# """
# attention * gate
# """
# dim = res.get_shape().as_list()[-1]
# d_res = dropout(res, keep_prob=keep_prob, is_train=is_train)
# gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False))
# return res * gate # ๅ้็้ๅ
็ด ็ธไน
# ๅฎไนไธไธชๅคๅฑ็ๅๅgru็ฑป๏ผไฝฟ็จcudnnๅ ้
class cudnn_gru:
def __init__(self, num_layers, num_units, input_size, scope=None):
self.num_layers = num_layers
self.grus = []
self.inits = []
self.dropout_mask = []
self.scope = scope
for layer in range(num_layers):
input_size_ = input_size if layer == 0 else 2 * num_units
gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(
1, num_units, name="f_cudnn_gru")
gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(
1, num_units, name="b_cudnn_gru")
self.grus.append((gru_fw, gru_bw, ))
def __call__(self, inputs, seq_len, keep_prob, concat_layers=True):
# cudnn GRU้่ฆไบคๆขๅผ ้็็ปดๅบฆ๏ผๅฏ่ฝๆฏไพฟไบ่ฎก็ฎ
outputs = [tf.transpose(inputs, [1, 0, 2])]
out_states = []
with tf.variable_scope(self.scope):
for layer in range(self.num_layers):
gru_fw, gru_bw = self.grus[layer]
with tf.variable_scope("fw_{}".format(layer)):
out_fw, (fw_state,) = gru_fw(outputs[-1])
with tf.variable_scope("bw_{}".format(layer)):
inputs_bw = tf.reverse_sequence(outputs[-1], seq_lengths=seq_len, seq_dim=0, batch_dim=1)
out_bw, (bw_state,) = gru_bw(inputs_bw)
out_bw = tf.reverse_sequence(out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
out_states.append(tf.concat([fw_state, bw_state], axis=-1))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
final_state = tf.squeeze(tf.transpose(tf.concat(out_states, axis=0), [1,0,2]), axis=1)
else:
res = outputs[-1]
final_state = tf.squeeze(out_states[-1], axis=0)
res = tf.transpose(res, [1, 0, 2])
return res, final_state
class model_fastText(object):
"""
ๅฐ่ฏๅ้ๅนณๅ๏ผ็ถๅ็ดๆฅๅ
จ่ฟๆฅๅฑๅ็ฑปใ
"""
def __init__(self, embedding_matrix, sequence_length=50, num_classes=1,
embedding_size=300, trainable=True):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.int32, [None], name="input_y")
self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# Some variables
self.embedding_matrix = tf.get_variable("embedding_matrix", initializer=tf.constant(
embedding_matrix, dtype=tf.float32), trainable=False)
self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
with tf.name_scope("process"):
self.seq_len = tf.reduce_sum(tf.cast(tf.cast(self.input_x, dtype=tf.bool), dtype=tf.int32), axis=1, name="seq_len")
self.mask = tf.cast(self.input_x, dtype=tf.bool)
# The structure of the model
self.layers(num_classes)
# optimizer
if trainable:
self.learning_rate = tf.train.exponential_decay(
learning_rate=0.001, global_step=self.global_step, decay_steps=2000, decay_rate=0.95)
self.opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=1e-8)
self.train_op = self.opt.minimize(self.loss, global_step=self.global_step)
def layers(self, num_classes):
# Embedding layer
with tf.variable_scope("embedding"):
self.embedding_inputs = tf.nn.embedding_lookup(self.embedding_matrix, self.input_x)
# self.embedding_inputs = tf.nn.dropout(self.embedding_inputs, self.keep_prob)
with tf.variable_scope("pooling"):
"""
text็่กจ็คบ๏ผ็ดๆฅๅฐ่ฏๅ้average-pooling
"""
self.pool_out = tf.reduce_mean(self.embedding_inputs, axis=1)
with tf.variable_scope("fully_connected"):
"""
ๅ
จ่ฟๆฅๅฑ
"""
fc_W1 = tf.get_variable(
shape=[self.pool_out.get_shape().as_list()[1], 512],
initializer=tf.contrib.layers.xavier_initializer(),
name="fc_w1")
fc_b1 = tf.get_variable(shape=[512], initializer=tf.constant_initializer(0.1), name="fc_b1")
fc_1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(self.pool_out, fc_W1), fc_b1))
fc_1_drop = tf.nn.dropout(fc_1, self.keep_prob)
fc_W3 = tf.get_variable(
shape=[fc_1_drop.get_shape().as_list()[1], 128],
initializer=tf.contrib.layers.xavier_initializer(),
name="fc_w3")
fc_b3 = tf.get_variable(shape=[128], initializer=tf.constant_initializer(0.1), name="fc_b3")
fc_3 = tf.nn.relu(tf.nn.bias_add(tf.matmul(fc_1_drop, fc_W3), fc_b3))
fc_3_drop = tf.nn.dropout(fc_3, self.keep_prob)
fc_W2 = tf.get_variable(
shape=[fc_3.get_shape().as_list()[1], num_classes],
initializer=tf.contrib.layers.variance_scaling_initializer(),
name="fc_w2")
fc_b2 = tf.get_variable(shape=[num_classes], initializer=tf.constant_initializer(0.1), name="fc_b2")
self.logits = tf.squeeze(tf.nn.bias_add(tf.matmul(fc_3_drop, fc_W2), fc_b2), name="logits")
with tf.variable_scope("sigmoid_and_loss"):
"""
็จsigmoidๅฝๆฐๅ ้ๅผไปฃๆฟsoftmax็ๅคๅ็ฑป
"""
self.sigmoid = tf.nn.sigmoid(self.logits)
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.logits, labels=tf.cast(self.input_y, dtype=tf.float32)))
```
# Training Tools
```
# batch็ๆๅจ
def batch_generator(train_X, train_Y, batch_size, is_train=True):
"""
batch็ๆๅจ:
ๅจis_trainไธบtrue็ๆ
ๅตไธ๏ผ่กฅๅ
batch๏ผๅนถshuffle
"""
data_number = train_X.shape[0]
batch_count = 0
while True:
if batch_count * batch_size + batch_size > data_number:
# ๆๅไธไธชbatch็ๆไฝ
if is_train:
# ๅ้ข็็ดๆฅ่ๅผ๏ผ้ๆฐๅผๅง
# shuffle
np.random.seed(2018)
trn_idx = np.random.permutation(data_number)
train_X = train_X[trn_idx]
train_Y = train_Y[trn_idx]
one_batch_X = train_X[0:batch_size]
one_batch_Y = train_Y[0:batch_size]
batch_count = 1
yield one_batch_X, one_batch_Y
else:
one_batch_X = train_X[batch_count * batch_size:data_number]
one_batch_Y = train_Y[batch_count * batch_size:data_number]
batch_count = 0
yield one_batch_X, one_batch_Y
else:
one_batch_X = train_X[batch_count * batch_size:batch_count * batch_size + batch_size]
one_batch_Y = train_Y[batch_count * batch_size:batch_count * batch_size + batch_size]
batch_count += 1
yield one_batch_X, one_batch_Y
# ๆญฃ็ฑปๆฌ ้ๆ ท๏ผ่ด็ฑปๆฐๆฎๅขๅผบ๏ผๆๆถ็จ้ๆบๆไนฑๆฐๆฎๅขๅผบ.
def data_augmentation(X, Y, under_sample=100000, aug_num=3):
"""
under_sample: ๆฌ ้ๆ ทไธชๆฐ
aug: ๆฐๆฎๅขๅผบๅๆฐ
"""
pos_X = []
neg_X = []
for i in range(X.shape[0]):
if Y[i] == 1:
neg_X.append(list(X[i]))
else:
pos_X.append(list(X[i]))
# ๆญฃๆ ทๆฌๆฌ ้ๆ ท
random.shuffle(pos_X)
pos_X = pos_X[:-under_sample]
# ๆญฃๆ ทๆฌๆฐๆฎๅขๅผบ
pos_X_aug = []
for i in range(200000):
aug = []
for x in pos_X[i]:
if x != 0:
aug.append(x)
else:
break
random.shuffle(aug)
aug += [0] * (max_len-len(aug))
pos_X_aug.append(aug)
pos_X.extend(pos_X_aug)
print(len(pos_X))
# ่ดๆ ทๆฌๆฐๆฎๅขๅผบ
neg_X_aug = []
for i in range(aug_num):
for neg in neg_X:
aug = []
for x in neg:
if x != 0:
aug.append(x)
else:
break
random.shuffle(aug)
aug += [0] * (max_len-len(aug))
neg_X_aug.append(aug)
neg_X.extend(neg_X_aug)
print(len(neg_X))
pos_Y = np.zeros(shape=[len(pos_X)], dtype=np.int32)
neg_Y = np.ones(shape=[len(neg_X)], dtype=np.int32)
pos_X.extend(neg_X)
X_out = np.array(pos_X, dtype=np.int32)
Y_out = np.append(pos_Y, neg_Y)
print(X_out.shape)
#shuffling the data
np.random.seed(2018)
trn_idx = np.random.permutation(len(X_out))
X_out = X_out[trn_idx]
Y_out = Y_out[trn_idx]
print(X_out.shape)
print(Y_out.shape)
return X_out, Y_out
# ๆ็ดขๆไฝณ้ๅผ
def bestThreshold(y,y_preds):
tmp = [0,0,0] # idx, cur, max
delta = 0
for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)):
tmp[1] = metrics.f1_score(y, np.array(y_preds)>tmp[0])
if tmp[1] > tmp[2]:
delta = tmp[0]
tmp[2] = tmp[1]
print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2]))
return delta , tmp[2]
```
# Main part
```
# ๅ ่ฝฝๆฐๆฎ๏ผๅนณๅ่ฏๅ้
train_X, test_X, train_Y, local_test_X, local_test_Y, word_index = load_and_prec(use_local_test)
# embedding_matrix_1 = load_glove(word_index)
embedding_matrix = load_fasttext(word_index)
# embedding_matrix = load_para(word_index)
# embedding_matrix = np.mean([embedding_matrix_1, embedding_matrix_3], axis = 0)
np.shape(embedding_matrix)
# embedding_matrix = np.zeros(shape=[100,300],dtype=np.float32)
# ๅคๆ่ฎญ็ป๏ผไบคๅ้ช่ฏๅนณๅ๏ผๆต่ฏ
# ๅๅไบคๅ้ช่ฏ้
DATA_SPLIT_SEED = 20190101
splits = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=DATA_SPLIT_SEED).split(train_X, train_Y))
# test batch
test_batch = batch_generator(test_X, np.zeros(shape=[test_X.shape[0]], dtype=np.int32), batch_size, False)
local_test_batch = batch_generator(local_test_X, local_test_Y, batch_size, False)
# ๆ็ป่พๅบ
train_preds = np.zeros(len(train_X), dtype=np.float32)
test_preds = np.zeros((len(test_X), len(splits)), dtype=np.float32)
test_preds_local = np.zeros((len(local_test_X), len(splits)), dtype=np.float32)
best_threshold = 0.33
# ๅคๆ่ฎญ็ป
for i, (train_idx, valid_idx) in enumerate(splits):
print("fold:{}".format(i+1))
X_train = train_X[train_idx]
Y_train = train_Y[train_idx]
X_val = train_X[valid_idx]
Y_val = train_Y[valid_idx]
# # ๆฐๆฎๅขๅผบ
# X_train, Y_train = data_augmentation(X_train, Y_train)
# print(Y_train[:100])
# print(Y_train[-100:])
# ่ฎญ็ปbatch็ๆๅจ
train_batch = batch_generator(X_train, Y_train, batch_size, True)
val_batch = batch_generator(X_val, Y_val, batch_size, False)
# ้ๆฉๆๅฅฝ็็ปๆ
best_val_f1 = 0.0
best_val_loss = 99999.99999
best_val_fold = []
best_test_fold = []
best_local_test_fold = []
# ่ฎญ็ป & ้ช่ฏ & ๆต่ฏ
with tf.Graph().as_default():
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
writer = tf.summary.FileWriter("./log/", sess.graph)
# ๆจกๅ
model = model_fastText(embedding_matrix=embedding_matrix, sequence_length=max_len)
sess.run(tf.global_variables_initializer())
train_loss_sum = 0.0
start_time = time.time()
for go in range(80000):
steps = sess.run(model.global_step) + 1
# ่ฎญ็ป
train_batch_X, train_batch_Y = next(train_batch)
feed = {model.input_x:train_batch_X, model.input_y:train_batch_Y, model.keep_prob:0.9}
loss, train_op = sess.run([model.loss, model.train_op], feed_dict=feed)
train_loss_sum += loss
# ้ช่ฏ & ๆต่ฏ
if steps % 1000 == 0:
val_predictions = []
val_loss_sum = 0.0
for _ in range(X_val.shape[0] // batch_size + 1):
val_batch_X, val_batch_Y = next(val_batch)
feed_val = {model.input_x:val_batch_X, model.input_y:val_batch_Y, model.keep_prob:1.0}
val_loss, val_sigmoid = sess.run([model.loss, model.sigmoid], feed_dict=feed_val)
val_predictions.extend(val_sigmoid)
val_loss_sum += val_loss
# val_f1 = metrics.f1_score(Y_val, np.array(val_predictions))
# val_pre = metrics.precision_score(Y_val, np.array(val_predictions))
# val_recall = metrics.recall_score(Y_val, np.array(val_predictions))
val_loss_sum = val_loss_sum / (X_val.shape[0] // batch_size + 1)
# print("steps:{}, train_loss:{:.5f}, val_loss:{:.5f}, val_F1:{:.5f}, val_pre:{:.5f}, val_recall:{:.5f}".format(
# steps, float(train_loss_sum / 1000), float(val_loss_sum), float(val_f1), float(val_pre), float(val_recall)))
end_time = time.time()
print("steps:{}, train_loss:{:.5f}, val_loss:{:.5f}, time:{:.5f}".format(
steps, float(train_loss_sum / 1000), float(val_loss_sum), end_time-start_time))
start_time = time.time()
# ๅๅ
ฅtensorboard
train_loss_write = tf.Summary(value=[tf.Summary.Value(tag="model/train_loss", \
simple_value=train_loss_sum / 1000), ])
writer.add_summary(train_loss_write, steps)
val_loss_write = tf.Summary(value=[tf.Summary.Value(tag="model/val_loss", simple_value=val_loss_sum), ])
writer.add_summary(val_loss_write, steps)
# val_f1_write = tf.Summary(value=[tf.Summary.Value(tag="index/val_f1", simple_value=val_f1), ])
# writer.add_summary(val_f1_write, steps)
# val_pre_write = tf.Summary(value=[tf.Summary.Value(tag="index/val_precision", simple_value=val_pre), ])
# writer.add_summary(val_pre_write, steps)
# val_recall_write = tf.Summary(value=[tf.Summary.Value(tag="index/val_recall", simple_value=val_recall), ])
# writer.add_summary(val_recall_write, steps)
writer.flush()
# train loss
train_loss_sum = 0.0
# # ๆต่ฏ๏ผๅนถ้ๅๆๅฅฝ็F1ๅผ็ๆถๅป็ๆต่ฏ็ปๆไธบๆ็ป็ปๆ
# if val_f1 > best_val_f1:
# best_val_f1 = val_f1
# best_test = []
# for _ in range(test_X.shape[0] // batch_size + 1):
# test_batch_X, _ = next(test_batch)
# feed_test = {model.input_x:test_batch_X, model.keep_prob:1.0}
# test_classes = sess.run(model.classes, feed_dict=feed_test)
# best_test.extend(test_classes)
# print("test done!")
# ๆต่ฏ๏ผๅนถ้ๅๆไฝ็lossๅผ็ๆถๅป็ๆต่ฏ็ปๆไธบๆ็ป็ปๆ
if val_loss_sum < best_val_loss and steps >= 40000:
best_val_loss = val_loss_sum
best_val_fold = val_predictions
best_test_fold = []
best_local_test_fold = []
# ็บฟไธtest
for _ in range(test_X.shape[0] // batch_size + 1):
test_batch_X, _ = next(test_batch)
feed_test = {model.input_x:test_batch_X, model.keep_prob:1.0}
test_sigmoid = sess.run(model.sigmoid, feed_dict=feed_test)
best_test_fold.extend(test_sigmoid)
# ็บฟไธtest
if use_local_test:
for _ in range(local_test_X.shape[0] // batch_size + 1):
local_test_batch_X, _ = next(local_test_batch)
feed_local_test = {model.input_x:local_test_batch_X, model.keep_prob:1.0}
local_test_sigmoid = sess.run(model.sigmoid, feed_dict=feed_local_test)
best_local_test_fold.extend(local_test_sigmoid)
print("test done!")
# ๆดๆฐ้ขๆต็ปๆ
best_threshold, best_f1 = bestThreshold(Y_val, best_val_fold)
# train_preds[valid_idx] = np.array(best_val_fold)
test_preds[:, i] = np.array(best_test_fold)
if use_local_test:
test_preds_local[:, i] = np.array(best_local_test_fold)
# print("fold:{}, threshold:{}, F1_score:{:.5f}".format(i, best_threshold_fold, \
# metrics.f1_score(Y_val, (np.array(best_val_fold)>best_threshold_fold).astype(int)))))
# ๅๆจกๅๅชๆต่ฏไธๆ
break
# ๅๅค็๏ผๆไบค็ปๆ
if use_local_test:
print("local_test_f1:{:.5f}".format(metrics.f1_score(local_test_Y, (test_preds_local.mean(axis=1) > best_threshold))))
sub = pd.read_csv('../input/sample_submission.csv')
sub["prediction"] = (test_preds.mean(axis=1)*5 > best_threshold).astype(int)
sub.to_csv("submission.csv", index=False)
pd.DataFrame(test_preds_local).corr()
```
|
github_jupyter
|
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
import os
import time
import random
import re
from tqdm import tqdm
from IPython.display import display
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import f1_score, roc_auc_score
from collections import Counter
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
data_dir = "../input/"
train_file = os.path.join(data_dir, "train.csv")
test_file = os.path.join(data_dir, "test.csv")
embedding_size = 300
max_len = 50
max_features = 120000
batch_size = 512
use_local_test = False
# ๅฐ็นๆฎๅญ็ฌฆๅ็ฌๆๅบ
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', 'โข', '~', '@', 'ยฃ',
'ยท', '_', '{', '}', 'ยฉ', '^', 'ยฎ', '`', '<', 'โ', 'ยฐ', 'โฌ', 'โข', 'โบ', 'โฅ', 'โ', 'ร', 'ยง', 'โณ', 'โฒ', 'ร', 'โ', 'ยฝ', 'ร ', 'โฆ',
'โ', 'โ
', 'โ', 'โ', 'โ', 'รข', 'โบ', 'โ', 'ยข', 'ยฒ', 'ยฌ', 'โ', 'ยถ', 'โ', 'ยฑ', 'ยฟ', 'โพ', 'โ', 'ยฆ', 'โ', 'โ', 'ยฅ', 'โ', 'โ', 'โน', 'โ',
'โ', '๏ผ', 'ยผ', 'โ', 'โผ', 'โช', 'โ ', 'โ ', 'โ', 'โ', 'ยจ', 'โ', 'โซ', 'โ', 'รฉ', 'ยฏ', 'โฆ', 'ยค', 'โฒ', 'รจ', 'ยธ', 'ยพ', 'ร', 'โ
', 'โ', 'โ',
'โ', '๏ผ', 'โ', 'ใ', 'โ', '๏ผ', 'ยป', '๏ผ', 'โช', 'โฉ', 'โ', 'ยณ', 'ใป', 'โฆ', 'โฃ', 'โ', 'โ', 'โฌ', 'โค', 'รฏ', 'ร', 'ยน', 'โค', 'โก', 'โ', ]
def clean_text(x):
x = str(x)
for punct in puncts:
if punct in x:
# x = x.replace(punct, f' {punct} ') # ่ฟๆฏpython3.6่ฏญๆณ
x = x.replace(punct, ' '+punct+' ')
return x
# ๆธ
ๆดๆฐๅญ
def clean_numbers(x):
if bool(re.search(r'\d', x)):
x = re.sub('[0-9]{5,}', '#####', x)
x = re.sub('[0-9]{4}', '####', x)
x = re.sub('[0-9]{3}', '###', x)
x = re.sub('[0-9]{2}', '##', x)
return x
# ๆธ
ๆดๆผๅ
mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"that's" : "that is",
"there's" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not",
"tryin'":"trying"}
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace_typical_misspell(text):
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def load_and_prec(use_local_test=True):
train_df = pd.read_csv(train_file)
test_df = pd.read_csv(test_file)
print("Train shape : ",train_df.shape)
print("Test shape : ",test_df.shape)
display(train_df.head())
display(test_df.head())
# ๅฐๅ
train_df["question_text"] = train_df["question_text"].str.lower()
test_df["question_text"] = test_df["question_text"].str.lower()
# ๆฐๅญๆธ
ๆด
train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_numbers(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x))
# ๆธ
ๆดๆผๅ
train_df["question_text"] = train_df["question_text"].apply(lambda x: replace_typical_misspell(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x))
# ๆฐๆฎๆธ
ๆด
train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_text(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x))
## fill up the missing values
train_X = train_df["question_text"].fillna("_##_").values
test_X = test_df["question_text"].fillna("_##_").values
## Tokenize the sentences
# ่ฟไธชๆนๆณๆๆๆๅญๆฏ้ฝๅฐๅไบ
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Get the target values
train_Y = train_df['target'].values
print(np.sum(train_Y))
# # ๅจpadไนๅๆๅ30ไธช่ฏๅปๆ
# train_cut = []
# test_cut = []
# for x in train_X:
# train_cut.append([i for i in x if i>30])
# for x in test_X:
# test_cut.append([i for i in x if i>30])
# train_X = train_cut
# test_X = test_cut
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=max_len, padding="post", truncating="post")
test_X = pad_sequences(test_X, maxlen=max_len, padding="post", truncating="post")
# # # ๆๆๅธธ็จ็40ไธช่ฏๅปๆ๏ผpadไธบ0
# # train_X = np.where(train_X>=40, train_X, 0)
# # test_X = np.where(test_X>=40, test_X, 0)
#shuffling the data
np.random.seed(20190101)
trn_idx = np.random.permutation(len(train_X))
train_X = train_X[trn_idx]
train_Y = train_Y[trn_idx]
# ไฝฟ็จๆฌๅฐๆต่ฏ้
if use_local_test:
train_X, local_test_X = (train_X[:-4*len(test_X)], train_X[-4*len(test_X):])
train_Y, local_test_Y = (train_Y[:-4*len(test_X)], train_Y[-4*len(test_X):])
else:
local_test_X = np.zeros(shape=[1,max_len], dtype=np.int32)
local_test_Y = np.zeros(shape=[1], dtype=np.int32)
print(train_X.shape)
print(local_test_X.shape)
print(test_X.shape)
print(len(tokenizer.word_index))
return train_X, test_X, train_Y, local_test_X, local_test_Y, tokenizer.word_index
# load_and_prec()
def load_glove(word_index):
EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
# word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_fasttext(word_index):
"""
่ฟไธชๅ ่ฝฝ่ฏๅ้่ฟๆฒกๆ็ป็
"""
EMBEDDING_FILE = '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'
def get_coefs(word,*arr):
return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE) if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
# word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_para(word_index):
EMBEDDING_FILE = '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore') if len(o)>100 and o.split(" ")[0] in word_index)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
embedding_matrix = np.random.normal(emb_mean, emb_std, (max_features, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
# dense layer
def dense(inputs, hidden, use_bias=True,
w_initializer=tf.contrib.layers.xavier_initializer(), b_initializer=tf.constant_initializer(0.1), scope="dense"):
"""
ๅ
จ่ฟๆฅๅฑ
"""
with tf.variable_scope(scope):
shape = tf.shape(inputs)
dim = inputs.get_shape().as_list()[-1]
out_shape = [shape[idx] for idx in range(
len(inputs.get_shape().as_list()) - 1)] + [hidden]
# ๅฆๆๆฏไธ็ปด็inputs๏ผreshapeๆไบ็ปด
flat_inputs = tf.reshape(inputs, [-1, dim])
W = tf.get_variable("W", [dim, hidden], initializer=w_initializer)
res = tf.matmul(flat_inputs, W)
if use_bias:
b = tf.get_variable("b", [hidden], initializer=b_initializer)
res = tf.nn.bias_add(res, b)
# outshapeๅฐฑๆฏinput็ๆๅไธ็ปดๅๆhidden
res = tf.reshape(res, out_shape)
return res
# dot-product attention
def dot_attention(inputs, memory, mask, hidden, keep_prob, scope="dot_attention"):
"""
้จๆงattentionๅฑ
"""
def softmax_mask(val, mask):
return -1e30 * (1 - tf.cast(mask, tf.float32)) + val
with tf.variable_scope(scope):
JX = tf.shape(inputs)[1] # inputs็1็ปดๅบฆ๏ผๅบ่ฏฅๆฏc_maxlen
with tf.variable_scope("attention"):
# inputs_็shape:[batch_size, c_maxlen, hidden]
inputs_ = tf.nn.relu(
dense(inputs, hidden, use_bias=False, scope="inputs"))
memory_ = tf.nn.relu(
dense(memory, hidden, use_bias=False, scope="memory"))
# ไธ็ปด็ฉ้ต็ธไน๏ผ็ปๆ็shapeๆฏ[batch_size, c_maxlen, q_maxlen]
outputs = tf.matmul(inputs_, tf.transpose(
memory_, [0, 2, 1])) / (hidden ** 0.5)
# ๅฐmaskๅนณ้บๆไธoutputs็ธๅ็ๅฝข็ถ๏ผ่ฟ้่่๏ผๆน่ฟๆinputๅmemory้ฝ้่ฆmask
mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1])
logits = tf.nn.softmax(softmax_mask(outputs, mask))
outputs = tf.matmul(logits, memory)
# res:[batch_size, c_maxlen, 12*hidden]
res = tf.concat([inputs, outputs], axis=2)
return res
# with tf.variable_scope("gate"):
# """
# attention * gate
# """
# dim = res.get_shape().as_list()[-1]
# d_res = dropout(res, keep_prob=keep_prob, is_train=is_train)
# gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False))
# return res * gate # ๅ้็้ๅ
็ด ็ธไน
# ๅฎไนไธไธชๅคๅฑ็ๅๅgru็ฑป๏ผไฝฟ็จcudnnๅ ้
class cudnn_gru:
def __init__(self, num_layers, num_units, input_size, scope=None):
self.num_layers = num_layers
self.grus = []
self.inits = []
self.dropout_mask = []
self.scope = scope
for layer in range(num_layers):
input_size_ = input_size if layer == 0 else 2 * num_units
gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(
1, num_units, name="f_cudnn_gru")
gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(
1, num_units, name="b_cudnn_gru")
self.grus.append((gru_fw, gru_bw, ))
def __call__(self, inputs, seq_len, keep_prob, concat_layers=True):
# cudnn GRU้่ฆไบคๆขๅผ ้็็ปดๅบฆ๏ผๅฏ่ฝๆฏไพฟไบ่ฎก็ฎ
outputs = [tf.transpose(inputs, [1, 0, 2])]
out_states = []
with tf.variable_scope(self.scope):
for layer in range(self.num_layers):
gru_fw, gru_bw = self.grus[layer]
with tf.variable_scope("fw_{}".format(layer)):
out_fw, (fw_state,) = gru_fw(outputs[-1])
with tf.variable_scope("bw_{}".format(layer)):
inputs_bw = tf.reverse_sequence(outputs[-1], seq_lengths=seq_len, seq_dim=0, batch_dim=1)
out_bw, (bw_state,) = gru_bw(inputs_bw)
out_bw = tf.reverse_sequence(out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
out_states.append(tf.concat([fw_state, bw_state], axis=-1))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
final_state = tf.squeeze(tf.transpose(tf.concat(out_states, axis=0), [1,0,2]), axis=1)
else:
res = outputs[-1]
final_state = tf.squeeze(out_states[-1], axis=0)
res = tf.transpose(res, [1, 0, 2])
return res, final_state
class model_fastText(object):
"""
ๅฐ่ฏๅ้ๅนณๅ๏ผ็ถๅ็ดๆฅๅ
จ่ฟๆฅๅฑๅ็ฑปใ
"""
def __init__(self, embedding_matrix, sequence_length=50, num_classes=1,
embedding_size=300, trainable=True):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.int32, [None], name="input_y")
self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# Some variables
self.embedding_matrix = tf.get_variable("embedding_matrix", initializer=tf.constant(
embedding_matrix, dtype=tf.float32), trainable=False)
self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
with tf.name_scope("process"):
self.seq_len = tf.reduce_sum(tf.cast(tf.cast(self.input_x, dtype=tf.bool), dtype=tf.int32), axis=1, name="seq_len")
self.mask = tf.cast(self.input_x, dtype=tf.bool)
# The structure of the model
self.layers(num_classes)
# optimizer
if trainable:
self.learning_rate = tf.train.exponential_decay(
learning_rate=0.001, global_step=self.global_step, decay_steps=2000, decay_rate=0.95)
self.opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=1e-8)
self.train_op = self.opt.minimize(self.loss, global_step=self.global_step)
def layers(self, num_classes):
# Embedding layer
with tf.variable_scope("embedding"):
self.embedding_inputs = tf.nn.embedding_lookup(self.embedding_matrix, self.input_x)
# self.embedding_inputs = tf.nn.dropout(self.embedding_inputs, self.keep_prob)
with tf.variable_scope("pooling"):
"""
text็่กจ็คบ๏ผ็ดๆฅๅฐ่ฏๅ้average-pooling
"""
self.pool_out = tf.reduce_mean(self.embedding_inputs, axis=1)
with tf.variable_scope("fully_connected"):
"""
ๅ
จ่ฟๆฅๅฑ
"""
fc_W1 = tf.get_variable(
shape=[self.pool_out.get_shape().as_list()[1], 512],
initializer=tf.contrib.layers.xavier_initializer(),
name="fc_w1")
fc_b1 = tf.get_variable(shape=[512], initializer=tf.constant_initializer(0.1), name="fc_b1")
fc_1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(self.pool_out, fc_W1), fc_b1))
fc_1_drop = tf.nn.dropout(fc_1, self.keep_prob)
fc_W3 = tf.get_variable(
shape=[fc_1_drop.get_shape().as_list()[1], 128],
initializer=tf.contrib.layers.xavier_initializer(),
name="fc_w3")
fc_b3 = tf.get_variable(shape=[128], initializer=tf.constant_initializer(0.1), name="fc_b3")
fc_3 = tf.nn.relu(tf.nn.bias_add(tf.matmul(fc_1_drop, fc_W3), fc_b3))
fc_3_drop = tf.nn.dropout(fc_3, self.keep_prob)
fc_W2 = tf.get_variable(
shape=[fc_3.get_shape().as_list()[1], num_classes],
initializer=tf.contrib.layers.variance_scaling_initializer(),
name="fc_w2")
fc_b2 = tf.get_variable(shape=[num_classes], initializer=tf.constant_initializer(0.1), name="fc_b2")
self.logits = tf.squeeze(tf.nn.bias_add(tf.matmul(fc_3_drop, fc_W2), fc_b2), name="logits")
with tf.variable_scope("sigmoid_and_loss"):
"""
็จsigmoidๅฝๆฐๅ ้ๅผไปฃๆฟsoftmax็ๅคๅ็ฑป
"""
self.sigmoid = tf.nn.sigmoid(self.logits)
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.logits, labels=tf.cast(self.input_y, dtype=tf.float32)))
# batch็ๆๅจ
def batch_generator(train_X, train_Y, batch_size, is_train=True):
"""
batch็ๆๅจ:
ๅจis_trainไธบtrue็ๆ
ๅตไธ๏ผ่กฅๅ
batch๏ผๅนถshuffle
"""
data_number = train_X.shape[0]
batch_count = 0
while True:
if batch_count * batch_size + batch_size > data_number:
# ๆๅไธไธชbatch็ๆไฝ
if is_train:
# ๅ้ข็็ดๆฅ่ๅผ๏ผ้ๆฐๅผๅง
# shuffle
np.random.seed(2018)
trn_idx = np.random.permutation(data_number)
train_X = train_X[trn_idx]
train_Y = train_Y[trn_idx]
one_batch_X = train_X[0:batch_size]
one_batch_Y = train_Y[0:batch_size]
batch_count = 1
yield one_batch_X, one_batch_Y
else:
one_batch_X = train_X[batch_count * batch_size:data_number]
one_batch_Y = train_Y[batch_count * batch_size:data_number]
batch_count = 0
yield one_batch_X, one_batch_Y
else:
one_batch_X = train_X[batch_count * batch_size:batch_count * batch_size + batch_size]
one_batch_Y = train_Y[batch_count * batch_size:batch_count * batch_size + batch_size]
batch_count += 1
yield one_batch_X, one_batch_Y
# ๆญฃ็ฑปๆฌ ้ๆ ท๏ผ่ด็ฑปๆฐๆฎๅขๅผบ๏ผๆๆถ็จ้ๆบๆไนฑๆฐๆฎๅขๅผบ.
def data_augmentation(X, Y, under_sample=100000, aug_num=3):
"""
under_sample: ๆฌ ้ๆ ทไธชๆฐ
aug: ๆฐๆฎๅขๅผบๅๆฐ
"""
pos_X = []
neg_X = []
for i in range(X.shape[0]):
if Y[i] == 1:
neg_X.append(list(X[i]))
else:
pos_X.append(list(X[i]))
# ๆญฃๆ ทๆฌๆฌ ้ๆ ท
random.shuffle(pos_X)
pos_X = pos_X[:-under_sample]
# ๆญฃๆ ทๆฌๆฐๆฎๅขๅผบ
pos_X_aug = []
for i in range(200000):
aug = []
for x in pos_X[i]:
if x != 0:
aug.append(x)
else:
break
random.shuffle(aug)
aug += [0] * (max_len-len(aug))
pos_X_aug.append(aug)
pos_X.extend(pos_X_aug)
print(len(pos_X))
# ่ดๆ ทๆฌๆฐๆฎๅขๅผบ
neg_X_aug = []
for i in range(aug_num):
for neg in neg_X:
aug = []
for x in neg:
if x != 0:
aug.append(x)
else:
break
random.shuffle(aug)
aug += [0] * (max_len-len(aug))
neg_X_aug.append(aug)
neg_X.extend(neg_X_aug)
print(len(neg_X))
pos_Y = np.zeros(shape=[len(pos_X)], dtype=np.int32)
neg_Y = np.ones(shape=[len(neg_X)], dtype=np.int32)
pos_X.extend(neg_X)
X_out = np.array(pos_X, dtype=np.int32)
Y_out = np.append(pos_Y, neg_Y)
print(X_out.shape)
#shuffling the data
np.random.seed(2018)
trn_idx = np.random.permutation(len(X_out))
X_out = X_out[trn_idx]
Y_out = Y_out[trn_idx]
print(X_out.shape)
print(Y_out.shape)
return X_out, Y_out
# ๆ็ดขๆไฝณ้ๅผ
def bestThreshold(y,y_preds):
tmp = [0,0,0] # idx, cur, max
delta = 0
for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)):
tmp[1] = metrics.f1_score(y, np.array(y_preds)>tmp[0])
if tmp[1] > tmp[2]:
delta = tmp[0]
tmp[2] = tmp[1]
print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2]))
return delta , tmp[2]
# ๅ ่ฝฝๆฐๆฎ๏ผๅนณๅ่ฏๅ้
train_X, test_X, train_Y, local_test_X, local_test_Y, word_index = load_and_prec(use_local_test)
# embedding_matrix_1 = load_glove(word_index)
embedding_matrix = load_fasttext(word_index)
# embedding_matrix = load_para(word_index)
# embedding_matrix = np.mean([embedding_matrix_1, embedding_matrix_3], axis = 0)
np.shape(embedding_matrix)
# embedding_matrix = np.zeros(shape=[100,300],dtype=np.float32)
# ๅคๆ่ฎญ็ป๏ผไบคๅ้ช่ฏๅนณๅ๏ผๆต่ฏ
# ๅๅไบคๅ้ช่ฏ้
DATA_SPLIT_SEED = 20190101
splits = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=DATA_SPLIT_SEED).split(train_X, train_Y))
# test batch
test_batch = batch_generator(test_X, np.zeros(shape=[test_X.shape[0]], dtype=np.int32), batch_size, False)
local_test_batch = batch_generator(local_test_X, local_test_Y, batch_size, False)
# ๆ็ป่พๅบ
train_preds = np.zeros(len(train_X), dtype=np.float32)
test_preds = np.zeros((len(test_X), len(splits)), dtype=np.float32)
test_preds_local = np.zeros((len(local_test_X), len(splits)), dtype=np.float32)
best_threshold = 0.33
# ๅคๆ่ฎญ็ป
for i, (train_idx, valid_idx) in enumerate(splits):
print("fold:{}".format(i+1))
X_train = train_X[train_idx]
Y_train = train_Y[train_idx]
X_val = train_X[valid_idx]
Y_val = train_Y[valid_idx]
# # ๆฐๆฎๅขๅผบ
# X_train, Y_train = data_augmentation(X_train, Y_train)
# print(Y_train[:100])
# print(Y_train[-100:])
# ่ฎญ็ปbatch็ๆๅจ
train_batch = batch_generator(X_train, Y_train, batch_size, True)
val_batch = batch_generator(X_val, Y_val, batch_size, False)
# ้ๆฉๆๅฅฝ็็ปๆ
best_val_f1 = 0.0
best_val_loss = 99999.99999
best_val_fold = []
best_test_fold = []
best_local_test_fold = []
# ่ฎญ็ป & ้ช่ฏ & ๆต่ฏ
with tf.Graph().as_default():
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
writer = tf.summary.FileWriter("./log/", sess.graph)
# ๆจกๅ
model = model_fastText(embedding_matrix=embedding_matrix, sequence_length=max_len)
sess.run(tf.global_variables_initializer())
train_loss_sum = 0.0
start_time = time.time()
for go in range(80000):
steps = sess.run(model.global_step) + 1
# ่ฎญ็ป
train_batch_X, train_batch_Y = next(train_batch)
feed = {model.input_x:train_batch_X, model.input_y:train_batch_Y, model.keep_prob:0.9}
loss, train_op = sess.run([model.loss, model.train_op], feed_dict=feed)
train_loss_sum += loss
# ้ช่ฏ & ๆต่ฏ
if steps % 1000 == 0:
val_predictions = []
val_loss_sum = 0.0
for _ in range(X_val.shape[0] // batch_size + 1):
val_batch_X, val_batch_Y = next(val_batch)
feed_val = {model.input_x:val_batch_X, model.input_y:val_batch_Y, model.keep_prob:1.0}
val_loss, val_sigmoid = sess.run([model.loss, model.sigmoid], feed_dict=feed_val)
val_predictions.extend(val_sigmoid)
val_loss_sum += val_loss
# val_f1 = metrics.f1_score(Y_val, np.array(val_predictions))
# val_pre = metrics.precision_score(Y_val, np.array(val_predictions))
# val_recall = metrics.recall_score(Y_val, np.array(val_predictions))
val_loss_sum = val_loss_sum / (X_val.shape[0] // batch_size + 1)
# print("steps:{}, train_loss:{:.5f}, val_loss:{:.5f}, val_F1:{:.5f}, val_pre:{:.5f}, val_recall:{:.5f}".format(
# steps, float(train_loss_sum / 1000), float(val_loss_sum), float(val_f1), float(val_pre), float(val_recall)))
end_time = time.time()
print("steps:{}, train_loss:{:.5f}, val_loss:{:.5f}, time:{:.5f}".format(
steps, float(train_loss_sum / 1000), float(val_loss_sum), end_time-start_time))
start_time = time.time()
# ๅๅ
ฅtensorboard
train_loss_write = tf.Summary(value=[tf.Summary.Value(tag="model/train_loss", \
simple_value=train_loss_sum / 1000), ])
writer.add_summary(train_loss_write, steps)
val_loss_write = tf.Summary(value=[tf.Summary.Value(tag="model/val_loss", simple_value=val_loss_sum), ])
writer.add_summary(val_loss_write, steps)
# val_f1_write = tf.Summary(value=[tf.Summary.Value(tag="index/val_f1", simple_value=val_f1), ])
# writer.add_summary(val_f1_write, steps)
# val_pre_write = tf.Summary(value=[tf.Summary.Value(tag="index/val_precision", simple_value=val_pre), ])
# writer.add_summary(val_pre_write, steps)
# val_recall_write = tf.Summary(value=[tf.Summary.Value(tag="index/val_recall", simple_value=val_recall), ])
# writer.add_summary(val_recall_write, steps)
writer.flush()
# train loss
train_loss_sum = 0.0
# # ๆต่ฏ๏ผๅนถ้ๅๆๅฅฝ็F1ๅผ็ๆถๅป็ๆต่ฏ็ปๆไธบๆ็ป็ปๆ
# if val_f1 > best_val_f1:
# best_val_f1 = val_f1
# best_test = []
# for _ in range(test_X.shape[0] // batch_size + 1):
# test_batch_X, _ = next(test_batch)
# feed_test = {model.input_x:test_batch_X, model.keep_prob:1.0}
# test_classes = sess.run(model.classes, feed_dict=feed_test)
# best_test.extend(test_classes)
# print("test done!")
# ๆต่ฏ๏ผๅนถ้ๅๆไฝ็lossๅผ็ๆถๅป็ๆต่ฏ็ปๆไธบๆ็ป็ปๆ
if val_loss_sum < best_val_loss and steps >= 40000:
best_val_loss = val_loss_sum
best_val_fold = val_predictions
best_test_fold = []
best_local_test_fold = []
# ็บฟไธtest
for _ in range(test_X.shape[0] // batch_size + 1):
test_batch_X, _ = next(test_batch)
feed_test = {model.input_x:test_batch_X, model.keep_prob:1.0}
test_sigmoid = sess.run(model.sigmoid, feed_dict=feed_test)
best_test_fold.extend(test_sigmoid)
# ็บฟไธtest
if use_local_test:
for _ in range(local_test_X.shape[0] // batch_size + 1):
local_test_batch_X, _ = next(local_test_batch)
feed_local_test = {model.input_x:local_test_batch_X, model.keep_prob:1.0}
local_test_sigmoid = sess.run(model.sigmoid, feed_dict=feed_local_test)
best_local_test_fold.extend(local_test_sigmoid)
print("test done!")
# ๆดๆฐ้ขๆต็ปๆ
best_threshold, best_f1 = bestThreshold(Y_val, best_val_fold)
# train_preds[valid_idx] = np.array(best_val_fold)
test_preds[:, i] = np.array(best_test_fold)
if use_local_test:
test_preds_local[:, i] = np.array(best_local_test_fold)
# print("fold:{}, threshold:{}, F1_score:{:.5f}".format(i, best_threshold_fold, \
# metrics.f1_score(Y_val, (np.array(best_val_fold)>best_threshold_fold).astype(int)))))
# ๅๆจกๅๅชๆต่ฏไธๆ
break
# ๅๅค็๏ผๆไบค็ปๆ
if use_local_test:
print("local_test_f1:{:.5f}".format(metrics.f1_score(local_test_Y, (test_preds_local.mean(axis=1) > best_threshold))))
sub = pd.read_csv('../input/sample_submission.csv')
sub["prediction"] = (test_preds.mean(axis=1)*5 > best_threshold).astype(int)
sub.to_csv("submission.csv", index=False)
pd.DataFrame(test_preds_local).corr()
| 0.540681 | 0.608332 |
## Pytorch pipeline
Pytroch pipeline to train and validate on the CIFAR10 dataset
The produced pipeline is composed of the following steps:
**1. data-processing**: Download and preprocess CIFAR10 dataset
**2. visualization**: Visualize and show statistics about the dataset
**3. train**: Train the deep learning model
**4. test-on-test**: Test the model on the test set
**5. test-on-whole**: Test the model performance on the whole dataset
The pipeline is sequential, so every step in dependent on the previous one.
```
import torch
import torchvision
import torchvision.transforms as transforms
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
TRAIN_STEPS = 2
```
#### Load and transform dataset
```
input_data_folder = "./data"
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root=input_data_folder, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=input_data_folder, train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
```
#### Visualize dataset
```
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
```
#### Define model
```
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
```
#### Define loss func and optimizer
```
import torch.optim as optim
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
```
#### Train
```
for epoch in range(int(TRAIN_STEPS)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
```
#### Test on test data
```
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
```
#### Performance on whole dataset
```
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
```
|
github_jupyter
|
import torch
import torchvision
import torchvision.transforms as transforms
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
TRAIN_STEPS = 2
input_data_folder = "./data"
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root=input_data_folder, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=input_data_folder, train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
import torch.optim as optim
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(int(TRAIN_STEPS)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
| 0.818954 | 0.982873 |
```
%%time
import pandas as pd
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from itertools import combinations
from sklearn import model_selection
import copy
from statistics import mean,mode
from itertools import combinations
from sklearn.ensemble import GradientBoostingClassifier
df=pd.read_csv('../60s_window_wrist_chest.csv',index_col=0)
features=df.columns.tolist()
features
removed = ['label']
for rem in removed:
features.remove(rem)
features_with_sub=[]
features_with_sub[:]=features
removed = ['subject']
for rem in removed:
features.remove(rem)
feature=features
print(len(feature))
len(features_with_sub)
sm = SMOTE(random_state=2)
X, y= sm.fit_sample(df[features_with_sub], df['label'])
df_new=pd.concat([pd.DataFrame(X,columns=features_with_sub),pd.DataFrame(y,columns=['label'])],axis=1)
df_new
for i in range (len(list(df_new['subject']))):
df_new['subject'][i] = min([2,3,4,5,6,7,8,9,10,11,13,14,15,16,17], key=lambda x:abs(x-df_new['subject'][i]))
df_new['subject']=df_new['subject'].astype(int)
p_d=pd.read_csv('../personal_detail.csv',index_col=0)
df_new_1=df_new.merge(p_d,on='subject')
df_new_1
sel_fea = ['EDA_tonic_mean','EDA_smna_mean','EDA_tonic_min','EDA_phasic_mean','TEMP_std','BVP_peak_freq','smoker_YES','ACC_y_min','ACC_x_mean','weight','gender_ female','c_Temp_max','ACC_x_max','TEMP_mean',
'c_ACC_y_std','net_acc_max','Resp_std']
user_list = [2,3,4,5,6,7,8,9,10,11,13,14,15,16,17]
len(user_list)
for cp in range (9,len(user_list)):
print ('*'*20)
print ("15C"+str(cp))
print ('*'*20)
com = cp # combination number, If any doubt plz call me
combi = combinations(user_list, com)
tot = str(len(list(copy.deepcopy(combi))))
best_random_state_train = user_list[0:com]
best_random_state_test = user_list[com:]
train= df_new_1.loc[df_new_1.subject.isin(best_random_state_train)]
test= df_new_1.loc[df_new_1.subject.isin(best_random_state_test)]
scaler = Normalizer()
scaled_data_train = scaler.fit_transform(train[sel_fea])
scaled_data_test = scaler.transform(test[sel_fea])
index = 1
subjects_in_train = []
subjects_in_test = []
best_acc = []
mean_acc = []
min_acc = []
acc = []
for c in list(combi):
local_acc = []
# print (str(index)+" of "+ tot)
train_sub = list(c)
test_sub = list(set(user_list)-set(train_sub))
print (train_sub,test_sub)
train= df_new_1.loc[df_new_1.subject.isin(train_sub)]
test= df_new_1.loc[df_new_1.subject.isin(test_sub)]
scaler = Normalizer()
scaled_data_train = scaler.fit_transform(train[sel_fea])
scaled_data_test = scaler.transform(test[sel_fea])
clf = GradientBoostingClassifier()
clf.fit(scaled_data_train,train['label'])
y_pred=clf.predict(scaled_data_test)
#print (classification_report(test['label'],y_pred))
rpt = classification_report(test['label'],y_pred,output_dict=True)['accuracy']
acc.append(rpt)
subjects_in_train.append(str(train_sub))
subjects_in_test.append(str(test_sub))
index += 1
combi_dict = {'subjects_in_train':subjects_in_train,'subjects_in_test':subjects_in_test, 'acc':acc}
df_plot_combi = pd.DataFrame(combi_dict)
print("****** Writing to File ********")
# Plz cross check with the file name before saving to df to csv file
file_name = '4_class_combination_'+str(com)+'-'+str(15-com)+'.csv'
print (file_name)
df_plot_combi.to_csv(file_name)
temp = df_plot_combi[df_plot_combi['acc']>=max(df_plot_combi['acc'])]
print("Max:",max(df_plot_combi['acc']))
print("Min:",min(df_plot_combi['acc']))
print("Mean:",mean(df_plot_combi['acc']))
```
|
github_jupyter
|
%%time
import pandas as pd
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from itertools import combinations
from sklearn import model_selection
import copy
from statistics import mean,mode
from itertools import combinations
from sklearn.ensemble import GradientBoostingClassifier
df=pd.read_csv('../60s_window_wrist_chest.csv',index_col=0)
features=df.columns.tolist()
features
removed = ['label']
for rem in removed:
features.remove(rem)
features_with_sub=[]
features_with_sub[:]=features
removed = ['subject']
for rem in removed:
features.remove(rem)
feature=features
print(len(feature))
len(features_with_sub)
sm = SMOTE(random_state=2)
X, y= sm.fit_sample(df[features_with_sub], df['label'])
df_new=pd.concat([pd.DataFrame(X,columns=features_with_sub),pd.DataFrame(y,columns=['label'])],axis=1)
df_new
for i in range (len(list(df_new['subject']))):
df_new['subject'][i] = min([2,3,4,5,6,7,8,9,10,11,13,14,15,16,17], key=lambda x:abs(x-df_new['subject'][i]))
df_new['subject']=df_new['subject'].astype(int)
p_d=pd.read_csv('../personal_detail.csv',index_col=0)
df_new_1=df_new.merge(p_d,on='subject')
df_new_1
sel_fea = ['EDA_tonic_mean','EDA_smna_mean','EDA_tonic_min','EDA_phasic_mean','TEMP_std','BVP_peak_freq','smoker_YES','ACC_y_min','ACC_x_mean','weight','gender_ female','c_Temp_max','ACC_x_max','TEMP_mean',
'c_ACC_y_std','net_acc_max','Resp_std']
user_list = [2,3,4,5,6,7,8,9,10,11,13,14,15,16,17]
len(user_list)
for cp in range (9,len(user_list)):
print ('*'*20)
print ("15C"+str(cp))
print ('*'*20)
com = cp # combination number, If any doubt plz call me
combi = combinations(user_list, com)
tot = str(len(list(copy.deepcopy(combi))))
best_random_state_train = user_list[0:com]
best_random_state_test = user_list[com:]
train= df_new_1.loc[df_new_1.subject.isin(best_random_state_train)]
test= df_new_1.loc[df_new_1.subject.isin(best_random_state_test)]
scaler = Normalizer()
scaled_data_train = scaler.fit_transform(train[sel_fea])
scaled_data_test = scaler.transform(test[sel_fea])
index = 1
subjects_in_train = []
subjects_in_test = []
best_acc = []
mean_acc = []
min_acc = []
acc = []
for c in list(combi):
local_acc = []
# print (str(index)+" of "+ tot)
train_sub = list(c)
test_sub = list(set(user_list)-set(train_sub))
print (train_sub,test_sub)
train= df_new_1.loc[df_new_1.subject.isin(train_sub)]
test= df_new_1.loc[df_new_1.subject.isin(test_sub)]
scaler = Normalizer()
scaled_data_train = scaler.fit_transform(train[sel_fea])
scaled_data_test = scaler.transform(test[sel_fea])
clf = GradientBoostingClassifier()
clf.fit(scaled_data_train,train['label'])
y_pred=clf.predict(scaled_data_test)
#print (classification_report(test['label'],y_pred))
rpt = classification_report(test['label'],y_pred,output_dict=True)['accuracy']
acc.append(rpt)
subjects_in_train.append(str(train_sub))
subjects_in_test.append(str(test_sub))
index += 1
combi_dict = {'subjects_in_train':subjects_in_train,'subjects_in_test':subjects_in_test, 'acc':acc}
df_plot_combi = pd.DataFrame(combi_dict)
print("****** Writing to File ********")
# Plz cross check with the file name before saving to df to csv file
file_name = '4_class_combination_'+str(com)+'-'+str(15-com)+'.csv'
print (file_name)
df_plot_combi.to_csv(file_name)
temp = df_plot_combi[df_plot_combi['acc']>=max(df_plot_combi['acc'])]
print("Max:",max(df_plot_combi['acc']))
print("Min:",min(df_plot_combi['acc']))
print("Mean:",mean(df_plot_combi['acc']))
| 0.172102 | 0.23131 |
```
import sys
sys.path.append('../python_packages_static/')
import pyemu
import os
import shutil
import glob
import matplotlib.pyplot as plt
```
# IMPORTANT NOTE BEFORE RUNNING THIS NOTEBOOK:
## During this stage of the workflow (`notebooks_workflow` notebooks 1.0 - 4.0) there are two paths for running the analysis of results in these notebooks:
### 1. Build an ensemble and run locally (note: this will result in a smaller ensmble size that what was used in the journal article, due to the computational limitations of personal comupters)
### 2. Run all the analysis without performing PEST++ history matching and model runs, using results from the journal article documenting this work.
## The path selection is made using the `run_ensemble` variable below. If `run_ensemble` is set to `True`, _path 1_ is selected and local runs are performed. If `run_ensemble` set to `False`, _path 2_ is selected and results from the journal article are used.
# The `run_ensemble` variable should be set appropriately to whichever folder these notebooks are being run in.
```
run_ensemble=False
if run_ensemble == True:
input_dir = '../noptmax0_testing/' # we need to read from this directory to get the residuals from the noptmax 0 run
else:
input_dir = '../output/noptmax0/' # we need to read from this directory to get the residuals from the noptmax 0 run
output_dir = '../run_data' # this is the folder we will actually use to set up PEST++ runs
pst_root = 'prior_mc'
```
### run local noptmax= 0 to generate residuals
```
if run_ensemble==True:
cwd = os.getcwd()
os.chdir('../noptmax0_testing/')
pyemu.utils.os_utils.run(f'pestpp-ies prior_mc.pst')
print(os.getcwd())
os.chdir(cwd)
```
### read in the PEST control file
```
pst = pyemu.Pst(os.path.join(input_dir,'{}.pst'.format(pst_root)))
```
### make a quick pie chart showing the current distribution of the observation groups in the objective function
```
pst.plot(kind='phi_pie')
```
### we can use `pyemu` functionality to assign new weights that adjust and honor whatever balance we seek
```
new_proportions = pst.phi_components.copy()
new_proportions
pst.observation_data
```
### here we assign proportions (that sum to 1.0) to the various groups. We want to retain the same total Phi so we multiply our proportions by the total Phi. The subjective decisions of these proportions reflect a desire to obviate flooding and to highlight water balance over the knowingly uncertain head measurements. This is a good thing to experiment with!
```
new_proportions['flux'] = 0.3*pst.phi
new_proportions['head'] = 0.2*pst.phi
new_proportions['land_surface'] = 0.5*pst.phi
new_proportions['budget'] = 0
new_proportions
```
### using the `phi_components` dictionary, making a copy of it, and reassigning values, we can update the PST object using the `adjust_weights` function
```
pst.adjust_weights(obsgrp_dict=new_proportions)
pst.observation_data
```
### now we can see the updated pie chart
```
pst.plot(kind='phi_pie')
# set some values for pestpp-ies
if run_ensemble == True:
pst.pestpp_options["ies_num_reals"] = 20
else:
pst.pestpp_options["ies_num_reals"] = 500
pst.pestpp_options["ies_bad_phi_sigma"] = 2.5
pst.pestpp_options["overdue_giveup_fac"] = 4
pst.pestpp_options["ies_no_noise"] = True
pst.pestpp_options["ies_drop_conflicts"] = False
pst.pestpp_options["ies_pdc_sigma_distance"] = 3.0
pst.pestpp_options['ies_autoadaloc']=True
pst.pestpp_options['ies_num_threads']=4
pst.pestpp_options['ies_lambda_mults']=(0.1,1.0,10.0,100.0)
pst.pestpp_options['lambda_scale_fac'] = (0.75,0.9,1.0,1.1)
pst.pestpp_options['ies_subset_size']=10
# set SVD for some regularization
pst.svd_data.maxsing = 250
# check number of realizations
pst.pestpp_options["ies_num_reals"]
```
### using `pestpp-ies`, settting `noptmax=-1` and running in parallel will run a single pass through the initial ensemble, Monte Carlo style
```
# set up for prior Monte Carlo
pst.control_data.noptmax = -1
pst.write(os.path.join(output_dir, '{}.pst'.format(pst_root)))
```
## If `run_ensemble=True` the cells below will run a local `prior_mc` Monte Carlo
* using the number of realizations specified by `pst.pestpp_options["ies_num_reals"]`
* will run in parallel locally using the number of cores specified below by `num_workers`
* creates a new directory called `"../master_mc/` that will contain the PEST++ output from the parallel Monte Carlo
* while running generates worker directories that are removed when run is complete
* results moved to `"../run_data/"`
```
if run_ensemble==True:
# set some variables for starting a group of PEST++ workers on the local machine
# MAKE SURE THAT PESTPP-IES and MF6 executables are in your system path or are in '../run_data'
num_workers = 5 # number of local workers -- VERY IMPORTANT, DO NOT MAKE TOO BIG
if sys.platform == 'win32':
pst_exe = 'pestpp-ies.exe'
else:
pst_exe = 'pestpp-ies'
template_ws = '../run_data' # template_directory
m_d = '../master_mc'
pyemu.os_utils.start_workers(worker_dir=template_ws,
exe_rel_path=pst_exe,
pst_rel_path=f'{pst_root}.pst',
num_workers=num_workers,
master_dir=m_d
)
if run_ensemble==True:
# move results into run_data and clean up
move_result_files = glob.glob(os.path.join(m_d, 'prior_mc*'))
move_result_files = [f for f in move_result_files if 'pst' not in f]
[shutil.copy(os.path.join(m_d, file), output_dir) for file in move_result_files]
# can remove master dir at this point. It would also get removed by pyemu the next time we start workers.
shutil.rmtree(m_d)
```
|
github_jupyter
|
import sys
sys.path.append('../python_packages_static/')
import pyemu
import os
import shutil
import glob
import matplotlib.pyplot as plt
run_ensemble=False
if run_ensemble == True:
input_dir = '../noptmax0_testing/' # we need to read from this directory to get the residuals from the noptmax 0 run
else:
input_dir = '../output/noptmax0/' # we need to read from this directory to get the residuals from the noptmax 0 run
output_dir = '../run_data' # this is the folder we will actually use to set up PEST++ runs
pst_root = 'prior_mc'
if run_ensemble==True:
cwd = os.getcwd()
os.chdir('../noptmax0_testing/')
pyemu.utils.os_utils.run(f'pestpp-ies prior_mc.pst')
print(os.getcwd())
os.chdir(cwd)
pst = pyemu.Pst(os.path.join(input_dir,'{}.pst'.format(pst_root)))
pst.plot(kind='phi_pie')
new_proportions = pst.phi_components.copy()
new_proportions
pst.observation_data
new_proportions['flux'] = 0.3*pst.phi
new_proportions['head'] = 0.2*pst.phi
new_proportions['land_surface'] = 0.5*pst.phi
new_proportions['budget'] = 0
new_proportions
pst.adjust_weights(obsgrp_dict=new_proportions)
pst.observation_data
pst.plot(kind='phi_pie')
# set some values for pestpp-ies
if run_ensemble == True:
pst.pestpp_options["ies_num_reals"] = 20
else:
pst.pestpp_options["ies_num_reals"] = 500
pst.pestpp_options["ies_bad_phi_sigma"] = 2.5
pst.pestpp_options["overdue_giveup_fac"] = 4
pst.pestpp_options["ies_no_noise"] = True
pst.pestpp_options["ies_drop_conflicts"] = False
pst.pestpp_options["ies_pdc_sigma_distance"] = 3.0
pst.pestpp_options['ies_autoadaloc']=True
pst.pestpp_options['ies_num_threads']=4
pst.pestpp_options['ies_lambda_mults']=(0.1,1.0,10.0,100.0)
pst.pestpp_options['lambda_scale_fac'] = (0.75,0.9,1.0,1.1)
pst.pestpp_options['ies_subset_size']=10
# set SVD for some regularization
pst.svd_data.maxsing = 250
# check number of realizations
pst.pestpp_options["ies_num_reals"]
# set up for prior Monte Carlo
pst.control_data.noptmax = -1
pst.write(os.path.join(output_dir, '{}.pst'.format(pst_root)))
if run_ensemble==True:
# set some variables for starting a group of PEST++ workers on the local machine
# MAKE SURE THAT PESTPP-IES and MF6 executables are in your system path or are in '../run_data'
num_workers = 5 # number of local workers -- VERY IMPORTANT, DO NOT MAKE TOO BIG
if sys.platform == 'win32':
pst_exe = 'pestpp-ies.exe'
else:
pst_exe = 'pestpp-ies'
template_ws = '../run_data' # template_directory
m_d = '../master_mc'
pyemu.os_utils.start_workers(worker_dir=template_ws,
exe_rel_path=pst_exe,
pst_rel_path=f'{pst_root}.pst',
num_workers=num_workers,
master_dir=m_d
)
if run_ensemble==True:
# move results into run_data and clean up
move_result_files = glob.glob(os.path.join(m_d, 'prior_mc*'))
move_result_files = [f for f in move_result_files if 'pst' not in f]
[shutil.copy(os.path.join(m_d, file), output_dir) for file in move_result_files]
# can remove master dir at this point. It would also get removed by pyemu the next time we start workers.
shutil.rmtree(m_d)
| 0.168309 | 0.786664 |
## Visualizing convnet filters
Another easy thing to do to inspect the filters learned by convnets is to display the visual pattern that each filter is meant to respond
to. This can be done with __gradient ascent in input space__: applying __gradient descent__ to the value of the input image of a convnet so
as to maximize the response of a specific filter, starting from a blank input image. The resulting input image would be one that the chosen
filter is maximally responsive to.
The process is simple: we will build a loss function that maximizes the value of a given filter in a given convolution layer, then we
will use stochastic gradient descent to adjust the values of the input image so as to maximize this activation value. For instance, here's
a loss for the activation of filter 0 in the layer "block3_conv1" of the VGG16 network, pre-trained on ImageNet:
### Let's start by defining the VGG16 model in Keras
```
from keras.applications import vgg16
import numpy as np
# build the VGG16 network with ImageNet weights
base_model = vgg16.VGG16(weights='imagenet', include_top=False)
np.save('base_vgg16.npy', base_model.get_weights())
base_model.save('base_vgg16.h5')
```
### Load the saved model
```
from keras.models import load_model
import numpy as np
base_model = load_model('base_vgg16.h5')
base_model.set_weights(np.load('base_vgg16.npy'))
```
### Visualizing the model
```
from IPython.display import display, HTML
from keras.utils import plot_model
def plot_model_architecture(base_model, model_name):
plot_model(base_model, show_shapes=True, to_file=model_name)
display(HTML('<img src="{}" style="display:inline;margin:1px"/>'.format(model_name)))
plot_model_architecture(base_model, 'base_vgg16_model.svg')
```
Note that we only go up to the last convolutional layer --we don't include fully-connected layers. The reason is that adding the fully connected layers forces you to use a fixed input size for the model (224x224, the original ImageNet format). By only keeping the convolutional modules, our model can be adapted to arbitrary input sizes.
### Setting visualization variables
```
# dimensions of the generated pictures for each filter.
img_width = 128
img_height = 128
# this is the placeholder for the input images
input_img = base_model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in base_model.layers[1:]])
# the name of the layer we want to visualize
# (see model definition at keras/applications/vgg16.py)
# layer_name = 'block5_conv1'
```
### Maximize the activation of a specific filter
We first define some util functions
```
from keras import backend as K
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
```
Now we can use the Keras function we defined to do gradient ascent in the input space, with regard to our filter activation loss:
```
def gradient_ascent(iterate):
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
# print('------>Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
```
We define a loss function that will seek to maximize the activation of a specific filter (filter_index) in a specific layer (layer_name). We do this via a Keras `backend` function called `gradients`, which allows our code to run on top of TensorFlow
The only trick here is to normalize the gradient of the pixels of the input image, which avoids very small and very large gradients and ensures a smooth gradient ascent process.
```
def build_nth_filter_loss(filter_index, layer_name):
"""
We build a loss function that maximizes the activation
of the nth filter of the layer considered
"""
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
return iterate
```
We need a way to compute the value of the loss tensor and the gradient tensor, given an input image. We define a Keras backend function `iterate` that takes a Numpy tensor (as a list of tensors of size 1) and returns a list of two Numpy tensors: the loss value and the gradient value.
### Understanding how K.gradients() works
Refer: https://www.tensorflow.org/api_docs/python/tf/gradients
```
# Example of tf.gradients()
import tensorflow as tf
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b])
with tf.Session() as sess:
print(sess.run(g))
```
### Iterating over some number of filters in a given layer_name
```
layers = ['block1_conv1', 'block1_conv2',
'block2_conv1', 'block2_conv2',
'block3_conv1', 'block3_conv2', 'block3_conv3',
'block4_conv1', 'block4_conv2', 'block4_conv3',
'block5_conv1', 'block5_conv2', 'block5_conv3']
import time
kept_filters = []
filters_dict = dict()
for layer_name in layers:
layer = base_model.get_layer(layer_name)
print('Processing filter for layer:', layer_name)
for filter_index in range(min(layer.output.shape[-1], 100)):
# print('Processing filter %d' % filter_index)
start_time = time.time()
gradient_ascent(build_nth_filter_loss(filter_index, layer_name))
end_time = time.time()
# print('--->Filter %d processed in %ds' % (filter_index, end_time - start_time))
filters_dict[layer.name] = kept_filters
kept_filters = []
for layer_name, kept_filters in filters_dict.items():
print(layer_name, len(kept_filters))
```
### Stiching best filters on a black picture
We can start visualising every single filter in every layer. For simplicity, we will only look at the first 64 filters in each layer. We will arrange the outputs on a 8x8 grid of 64x64 filter patterns, with some black margins between each filter pattern.
```
from keras.preprocessing.image import save_img
def stich_filters(kept_filters, layer_name):
# By default, we will stich the best 64 (n*n) filters on a 8 x 8 grid.
n = int(np.sqrt(len(kept_filters)))
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
width_margin = (img_width + margin) * i
height_margin = (img_height + margin) * j
stitched_filters[
width_margin: width_margin + img_width,
height_margin: height_margin + img_height, :] = img
# save the result to disk
save_img('stitched_filters_{}.png'.format(layer_name), stitched_filters)
for layer_name, kept_filters in filters_dict.items():
print('Stiching filters for {}'.format(layer_name))
stich_filters(kept_filters, layer_name)
print('Completed.')
```
Visualizing the filters/kernels of each layer of VGG16 network
```
from keras.preprocessing import image
import matplotlib.pyplot as plt
%matplotlib inline
filter_name = 'block5_conv3'
img = image.img_to_array(image.load_img('stitched_filters_{}.png'.format(filter_name))) /255.
plt.figure(figsize=(17,17))
plt.imshow(img)
plt.title(filter_name)
plt.grid(False)
```
These filter visualizations tell us a lot about how convnet layers see the world: each layer in a convnet simply learns a collection of
filters such that their inputs can be expressed as a combination of the filters. This is similar to how the Fourier transform decomposes
signals onto a bank of cosine functions. The filters in these convnet filter banks get increasingly complex and refined as we go higher-up
in the model:
* The filters from the first layer in the model (`block1_conv1`) encode simple directional edges and colors (or colored edges in some
cases).
* The filters from `block2_conv1` encode simple textures made from combinations of edges and colors.
* The filters in higher-up layers start resembling textures found in natural images: feathers, eyes, leaves, etc.
|
github_jupyter
|
from keras.applications import vgg16
import numpy as np
# build the VGG16 network with ImageNet weights
base_model = vgg16.VGG16(weights='imagenet', include_top=False)
np.save('base_vgg16.npy', base_model.get_weights())
base_model.save('base_vgg16.h5')
from keras.models import load_model
import numpy as np
base_model = load_model('base_vgg16.h5')
base_model.set_weights(np.load('base_vgg16.npy'))
from IPython.display import display, HTML
from keras.utils import plot_model
def plot_model_architecture(base_model, model_name):
plot_model(base_model, show_shapes=True, to_file=model_name)
display(HTML('<img src="{}" style="display:inline;margin:1px"/>'.format(model_name)))
plot_model_architecture(base_model, 'base_vgg16_model.svg')
# dimensions of the generated pictures for each filter.
img_width = 128
img_height = 128
# this is the placeholder for the input images
input_img = base_model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in base_model.layers[1:]])
# the name of the layer we want to visualize
# (see model definition at keras/applications/vgg16.py)
# layer_name = 'block5_conv1'
from keras import backend as K
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
def gradient_ascent(iterate):
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
# print('------>Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
def build_nth_filter_loss(filter_index, layer_name):
"""
We build a loss function that maximizes the activation
of the nth filter of the layer considered
"""
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
return iterate
# Example of tf.gradients()
import tensorflow as tf
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b])
with tf.Session() as sess:
print(sess.run(g))
layers = ['block1_conv1', 'block1_conv2',
'block2_conv1', 'block2_conv2',
'block3_conv1', 'block3_conv2', 'block3_conv3',
'block4_conv1', 'block4_conv2', 'block4_conv3',
'block5_conv1', 'block5_conv2', 'block5_conv3']
import time
kept_filters = []
filters_dict = dict()
for layer_name in layers:
layer = base_model.get_layer(layer_name)
print('Processing filter for layer:', layer_name)
for filter_index in range(min(layer.output.shape[-1], 100)):
# print('Processing filter %d' % filter_index)
start_time = time.time()
gradient_ascent(build_nth_filter_loss(filter_index, layer_name))
end_time = time.time()
# print('--->Filter %d processed in %ds' % (filter_index, end_time - start_time))
filters_dict[layer.name] = kept_filters
kept_filters = []
for layer_name, kept_filters in filters_dict.items():
print(layer_name, len(kept_filters))
from keras.preprocessing.image import save_img
def stich_filters(kept_filters, layer_name):
# By default, we will stich the best 64 (n*n) filters on a 8 x 8 grid.
n = int(np.sqrt(len(kept_filters)))
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
width_margin = (img_width + margin) * i
height_margin = (img_height + margin) * j
stitched_filters[
width_margin: width_margin + img_width,
height_margin: height_margin + img_height, :] = img
# save the result to disk
save_img('stitched_filters_{}.png'.format(layer_name), stitched_filters)
for layer_name, kept_filters in filters_dict.items():
print('Stiching filters for {}'.format(layer_name))
stich_filters(kept_filters, layer_name)
print('Completed.')
from keras.preprocessing import image
import matplotlib.pyplot as plt
%matplotlib inline
filter_name = 'block5_conv3'
img = image.img_to_array(image.load_img('stitched_filters_{}.png'.format(filter_name))) /255.
plt.figure(figsize=(17,17))
plt.imshow(img)
plt.title(filter_name)
plt.grid(False)
| 0.820326 | 0.976129 |
# Create a Pipeline
You can perform the various steps required to ingest data, train a model, and register the model individually by using the Azure ML SDK to run script-based experiments. However, in an enterprise environment it is common to encapsulate the sequence of discrete steps required to build a machine learning solution into a *pipeline* that can be run on one or more compute targets, either on-demand by a user, from an automated build process, or on a schedule.
In this notebook, you'll bring together all of these elements to create a simple pipeline that pre-processes data and then trains and registers a model.
## Install the Azure Machine Learning SDK
The Azure Machine Learning SDK is updated frequently. Run the following cell to upgrade to the latest release, along with the additional package to support notebook widgets.
```
!pip install --upgrade azureml-sdk azureml-widgets
```
## Connect to your workspace
With the latest version of the SDK installed, now you're ready to connect to your workspace.
> **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
```
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
```
## Prepare data
In your pipeline, you'll use a dataset containing details of diabetes patients. Run the cell below to create this dataset (if you created it in previously, the code will find the existing version)
```
from azureml.core import Dataset
default_ds = ws.get_default_datastore()
if 'diabetes dataset' not in ws.datasets:
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
print('Dataset registered.')
except Exception as ex:
print(ex)
else:
print('Dataset already registered.')
```
## Create scripts for pipeline steps
Pipelines consist of one or more *steps*, which can be Python scripts, or specialized steps like a data transfer step that copies data from one location to another. Each step can run in its own compute context. In this exercise, you'll build a simple pipeline that contains two Python script steps: one to pre-process some training data, and another to use the pre-processed data to train and register a model.
First, let's create a folder for the script files we'll use in the pipeline steps.
```
import os
# Create a folder for the pipeline step files
experiment_folder = 'diabetes_pipeline'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder)
```
Now let's create the first script, which will read data from the diabetes dataset and apply some simple pre-processing to remove any rows with missing data and normalize the numeric features so they're on a similar scale.
The script includes a argument named **--prepped-data**, which references the folder where the resulting data should be saved.
```
%%writefile $experiment_folder/prep_diabetes.py
# Import libraries
import os
import argparse
import pandas as pd
from azureml.core import Run
from sklearn.preprocessing import MinMaxScaler
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str, dest='raw_dataset_id', help='raw dataset')
parser.add_argument('--prepped-data', type=str, dest='prepped_data', default='prepped_data', help='Folder for results')
args = parser.parse_args()
save_folder = args.prepped_data
# Get the experiment run context
run = Run.get_context()
# load the data (passed as an input dataset)
print("Loading Data...")
diabetes = run.input_datasets['raw_data'].to_pandas_dataframe()
# Log raw row count
row_count = (len(diabetes))
run.log('raw_rows', row_count)
# remove nulls
diabetes = diabetes.dropna()
# Normalize the numeric columns
scaler = MinMaxScaler()
num_cols = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree']
diabetes[num_cols] = scaler.fit_transform(diabetes[num_cols])
# Log processed rows
row_count = (len(diabetes))
run.log('processed_rows', row_count)
# Save the prepped data
print("Saving Data...")
os.makedirs(save_folder, exist_ok=True)
save_path = os.path.join(save_folder,'data.csv')
diabetes.to_csv(save_path, index=False, header=True)
# End the run
run.complete()
```
Now you can create the script for the second step, which will train a model. The script includes a argument named **--training-folder**, which references the folder where the prepared data was saved by the previous step.
```
%%writefile $experiment_folder/train_diabetes.py
# Import libraries
from azureml.core import Run, Model
import argparse
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument("--training-folder", type=str, dest='training_folder', help='training data folder')
args = parser.parse_args()
training_folder = args.training_folder
# Get the experiment run context
run = Run.get_context()
# load the prepared data file in the training folder
print("Loading Data...")
file_path = os.path.join(training_folder,'data.csv')
diabetes = pd.read_csv(file_path)
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train adecision tree model
print('Training a decision tree model...')
model = DecisionTreeClassifier().fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
# plot ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
fig = plt.figure(figsize=(6, 4))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
run.log_image(name = "ROC", plot = fig)
plt.show()
# Save the trained model in the outputs folder
print("Saving model...")
os.makedirs('outputs', exist_ok=True)
model_file = os.path.join('outputs', 'diabetes_model.pkl')
joblib.dump(value=model, filename=model_file)
# Register the model
print('Registering model...')
Model.register(workspace=run.experiment.workspace,
model_path = model_file,
model_name = 'diabetes_model',
tags={'Training context':'Pipeline'},
properties={'AUC': np.float(auc), 'Accuracy': np.float(acc)})
run.complete()
```
## Prepare a compute environment for the pipeline steps
In this exercise, you'll use the same compute for both steps, but it's important to realize that each step is run independently; so you could specify different compute contexts for each step if appropriate.
First, get the compute target you created in a previous lab (if it doesn't exist, it will be created).
> **Important**: Change *your-compute-cluster* to the name of your compute cluster in the code below before running it! Cluster names must be globally unique names between 2 to 16 characters in length. Valid characters are letters, digits, and the - character.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
cluster_name = "your-compute-cluster"
try:
# Check for existing compute target
pipeline_cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# If it doesn't already exist, create it
try:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2)
pipeline_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
pipeline_cluster.wait_for_completion(show_output=True)
except Exception as ex:
print(ex)
```
The compute will require a Python environment with the necessary package dependencies installed, so you'll need to create a run configuration.
```
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.runconfig import RunConfiguration
# Create a Python environment for the experiment
diabetes_env = Environment("diabetes-pipeline-env")
diabetes_env.python.user_managed_dependencies = False # Let Azure ML manage dependencies
diabetes_env.docker.enabled = True # Use a docker container
# Create a set of package dependencies
diabetes_packages = CondaDependencies.create(conda_packages=['scikit-learn','ipykernel','matplotlib','pandas','pip'],
pip_packages=['azureml-defaults','azureml-dataprep[pandas]','pyarrow'])
# Add the dependencies to the environment
diabetes_env.python.conda_dependencies = diabetes_packages
# Register the environment
diabetes_env.register(workspace=ws)
registered_env = Environment.get(ws, 'diabetes-pipeline-env')
# Create a new runconfig object for the pipeline
pipeline_run_config = RunConfiguration()
# Use the compute you created above.
pipeline_run_config.target = pipeline_cluster
# Assign the environment to the run configuration
pipeline_run_config.environment = registered_env
print ("Run configuration created.")
```
## Create and run a pipeline
Now you're ready to create and run a pipeline.
First you need to define the steps for the pipeline, and any data references that need to passed between them. In this case, the first step must write the prepared data to a folder that can be read from by the second step. Since the steps will be run on remote compute (and in fact, could each be run on different compute), the folder path must be passed as a data reference to a location in a datastore within the workspace. The **PipelineData** object is a special kind of data reference that is used for interim storage locations that can be passed between pipeline steps, so you'll create one and use at as the output for the first step and the input for the second step. Note that you also need to pass it as a script argument so our code can access the datastore location referenced by the data reference.
```
from azureml.pipeline.core import PipelineData
from azureml.pipeline.steps import PythonScriptStep
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create a PipelineData (temporary Data Reference) for the model folder
prepped_data_folder = PipelineData("prepped_data_folder", datastore=ws.get_default_datastore())
# Step 1, Run the data prep script
train_step = PythonScriptStep(name = "Prepare Data",
source_directory = experiment_folder,
script_name = "prep_diabetes.py",
arguments = ['--input-data', diabetes_ds.as_named_input('raw_data'),
'--prepped-data', prepped_data_folder],
outputs=[prepped_data_folder],
compute_target = pipeline_cluster,
runconfig = pipeline_run_config,
allow_reuse = True)
# Step 2, run the training script
register_step = PythonScriptStep(name = "Train and Register Model",
source_directory = experiment_folder,
script_name = "train_diabetes.py",
arguments = ['--training-folder', prepped_data_folder],
inputs=[prepped_data_folder],
compute_target = pipeline_cluster,
runconfig = pipeline_run_config,
allow_reuse = True)
print("Pipeline steps defined")
```
OK, you're ready build the pipeline from the steps you've defined and run it as an experiment.
```
from azureml.core import Experiment
from azureml.pipeline.core import Pipeline
from azureml.widgets import RunDetails
# Construct the pipeline
pipeline_steps = [train_step, register_step]
pipeline = Pipeline(workspace=ws, steps=pipeline_steps)
print("Pipeline is built.")
# Create an experiment and run the pipeline
experiment = Experiment(workspace=ws, name = 'mslearn-diabetes-pipeline')
pipeline_run = experiment.submit(pipeline, regenerate_outputs=True)
print("Pipeline submitted for execution.")
RunDetails(pipeline_run).show()
pipeline_run.wait_for_completion(show_output=True)
```
A graphical representation of the pipeline experiment will be displayed in the widget as it runs. keep an eye on the kernel indicator at the top right of the page, when it turns from **⚫** to **◯**, the code has finished running. You can also monitor pipeline runs in the **Experiments** page in [Azure Machine Learning studio](https://ml.azure.com).
When the pipeline has finished, you can examine the metrics recorded by it's child runs.
```
for run in pipeline_run.get_children():
print(run.name, ':')
metrics = run.get_metrics()
for metric_name in metrics:
print('\t',metric_name, ":", metrics[metric_name])
```
Assuming the pipeline was successful, a new model should be registered with a *Training context* tag indicating it was trained in a pipeline. Run the following code to verify this.
```
from azureml.core import Model
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
## Publish the pipeline
After you've created and tested a pipeline, you can publish it as a REST service.
```
# Publish the pipeline from the run
published_pipeline = pipeline_run.publish_pipeline(
name="diabetes-training-pipeline", description="Trains diabetes model", version="1.0")
published_pipeline
```
Note that the published pipeline has an endpoint, which you can see in the **Endpoints** page (on the **Pipeline Endpoints** tab) in [Azure Machine Learning studio](https://ml.azure.com). You can also find its URI as a property of the published pipeline object:
```
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
```
## Call the pipeline endpoint
To use the endpoint, client applications need to make a REST call over HTTP. This request must be authenticated, so an authorization header is required. A real application would require a service principal with which to be authenticated, but to test this out, we'll use the authorization header from your current connection to your Azure workspace, which you can get using the following code:
```
from azureml.core.authentication import InteractiveLoginAuthentication
interactive_auth = InteractiveLoginAuthentication()
auth_header = interactive_auth.get_authentication_header()
print("Authentication header ready.")
```
Now we're ready to call the REST interface. The pipeline runs asynchronously, so we'll get an identifier back, which we can use to track the pipeline experiment as it runs:
```
import requests
experiment_name = 'mslearn-diabetes-pipeline'
rest_endpoint = published_pipeline.endpoint
response = requests.post(rest_endpoint,
headers=auth_header,
json={"ExperimentName": experiment_name})
run_id = response.json()["Id"]
run_id
```
Since you have the run ID, you can use it to wait for the run to complete.
> **Note**: The pipeline should complete quickly, because each step was configured to allow output reuse. This was done primarily for convenience and to save time in this course. In reality, you'd likely want the first step to run every time in case the data has changed, and trigger the subsequent steps only if the output from step one changes.
```
from azureml.pipeline.core.run import PipelineRun
published_pipeline_run = PipelineRun(ws.experiments[experiment_name], run_id)
pipeline_run.wait_for_completion(show_output=True)
```
## Schedule the Pipeline
Suppose the clinic for the diabetes patients collects new data each week, and adds it to the dataset. You could run the pipeline every week to retrain the model with the new data.
```
from azureml.pipeline.core import ScheduleRecurrence, Schedule
# Submit the Pipeline every Monday at 00:00 UTC
recurrence = ScheduleRecurrence(frequency="Week", interval=1, week_days=["Monday"], time_of_day="00:00")
weekly_schedule = Schedule.create(ws, name="weekly-diabetes-training",
description="Based on time",
pipeline_id=published_pipeline.id,
experiment_name='mslearn-diabetes-pipeline',
recurrence=recurrence)
print('Pipeline scheduled.')
```
You can retrieve the schedules that are defined in the workspace like this:
```
schedules = Schedule.list(ws)
schedules
```
You can check the latest run like this:
```
pipeline_experiment = ws.experiments.get('mslearn-diabetes-pipeline')
latest_run = list(pipeline_experiment.get_runs())[0]
latest_run.get_details()
```
This is a simple example, designed to demonstrate the principle. In reality, you could build more sophisticated logic into the pipeline steps - for example, evaluating the model against some test data to calculate a performance metric like AUC or accuracy, comparing the metric to that of any previously registered versions of the model, and only registering the new model if it performs better.
You can use the [Azure Machine Learning extension for Azure DevOps](https://marketplace.visualstudio.com/items?itemName=ms-air-aiagility.vss-services-azureml) to combine Azure ML pipelines with Azure DevOps pipelines (yes, it *is* confusing that they have the same name!) and integrate model retraining into a *continuous integration/continuous deployment (CI/CD)* process. For example you could use an Azure DevOps *build* pipeline to trigger an Azure ML pipeline that trains and registers a model, and when the model is registered it could trigger an Azure Devops *release* pipeline that deploys the model as a web service, along with the application or service that consumes the model.
|
github_jupyter
|
!pip install --upgrade azureml-sdk azureml-widgets
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
from azureml.core import Dataset
default_ds = ws.get_default_datastore()
if 'diabetes dataset' not in ws.datasets:
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
print('Dataset registered.')
except Exception as ex:
print(ex)
else:
print('Dataset already registered.')
import os
# Create a folder for the pipeline step files
experiment_folder = 'diabetes_pipeline'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder)
%%writefile $experiment_folder/prep_diabetes.py
# Import libraries
import os
import argparse
import pandas as pd
from azureml.core import Run
from sklearn.preprocessing import MinMaxScaler
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str, dest='raw_dataset_id', help='raw dataset')
parser.add_argument('--prepped-data', type=str, dest='prepped_data', default='prepped_data', help='Folder for results')
args = parser.parse_args()
save_folder = args.prepped_data
# Get the experiment run context
run = Run.get_context()
# load the data (passed as an input dataset)
print("Loading Data...")
diabetes = run.input_datasets['raw_data'].to_pandas_dataframe()
# Log raw row count
row_count = (len(diabetes))
run.log('raw_rows', row_count)
# remove nulls
diabetes = diabetes.dropna()
# Normalize the numeric columns
scaler = MinMaxScaler()
num_cols = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree']
diabetes[num_cols] = scaler.fit_transform(diabetes[num_cols])
# Log processed rows
row_count = (len(diabetes))
run.log('processed_rows', row_count)
# Save the prepped data
print("Saving Data...")
os.makedirs(save_folder, exist_ok=True)
save_path = os.path.join(save_folder,'data.csv')
diabetes.to_csv(save_path, index=False, header=True)
# End the run
run.complete()
%%writefile $experiment_folder/train_diabetes.py
# Import libraries
from azureml.core import Run, Model
import argparse
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument("--training-folder", type=str, dest='training_folder', help='training data folder')
args = parser.parse_args()
training_folder = args.training_folder
# Get the experiment run context
run = Run.get_context()
# load the prepared data file in the training folder
print("Loading Data...")
file_path = os.path.join(training_folder,'data.csv')
diabetes = pd.read_csv(file_path)
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train adecision tree model
print('Training a decision tree model...')
model = DecisionTreeClassifier().fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
# plot ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
fig = plt.figure(figsize=(6, 4))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
run.log_image(name = "ROC", plot = fig)
plt.show()
# Save the trained model in the outputs folder
print("Saving model...")
os.makedirs('outputs', exist_ok=True)
model_file = os.path.join('outputs', 'diabetes_model.pkl')
joblib.dump(value=model, filename=model_file)
# Register the model
print('Registering model...')
Model.register(workspace=run.experiment.workspace,
model_path = model_file,
model_name = 'diabetes_model',
tags={'Training context':'Pipeline'},
properties={'AUC': np.float(auc), 'Accuracy': np.float(acc)})
run.complete()
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
cluster_name = "your-compute-cluster"
try:
# Check for existing compute target
pipeline_cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# If it doesn't already exist, create it
try:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2)
pipeline_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
pipeline_cluster.wait_for_completion(show_output=True)
except Exception as ex:
print(ex)
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.runconfig import RunConfiguration
# Create a Python environment for the experiment
diabetes_env = Environment("diabetes-pipeline-env")
diabetes_env.python.user_managed_dependencies = False # Let Azure ML manage dependencies
diabetes_env.docker.enabled = True # Use a docker container
# Create a set of package dependencies
diabetes_packages = CondaDependencies.create(conda_packages=['scikit-learn','ipykernel','matplotlib','pandas','pip'],
pip_packages=['azureml-defaults','azureml-dataprep[pandas]','pyarrow'])
# Add the dependencies to the environment
diabetes_env.python.conda_dependencies = diabetes_packages
# Register the environment
diabetes_env.register(workspace=ws)
registered_env = Environment.get(ws, 'diabetes-pipeline-env')
# Create a new runconfig object for the pipeline
pipeline_run_config = RunConfiguration()
# Use the compute you created above.
pipeline_run_config.target = pipeline_cluster
# Assign the environment to the run configuration
pipeline_run_config.environment = registered_env
print ("Run configuration created.")
from azureml.pipeline.core import PipelineData
from azureml.pipeline.steps import PythonScriptStep
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create a PipelineData (temporary Data Reference) for the model folder
prepped_data_folder = PipelineData("prepped_data_folder", datastore=ws.get_default_datastore())
# Step 1, Run the data prep script
train_step = PythonScriptStep(name = "Prepare Data",
source_directory = experiment_folder,
script_name = "prep_diabetes.py",
arguments = ['--input-data', diabetes_ds.as_named_input('raw_data'),
'--prepped-data', prepped_data_folder],
outputs=[prepped_data_folder],
compute_target = pipeline_cluster,
runconfig = pipeline_run_config,
allow_reuse = True)
# Step 2, run the training script
register_step = PythonScriptStep(name = "Train and Register Model",
source_directory = experiment_folder,
script_name = "train_diabetes.py",
arguments = ['--training-folder', prepped_data_folder],
inputs=[prepped_data_folder],
compute_target = pipeline_cluster,
runconfig = pipeline_run_config,
allow_reuse = True)
print("Pipeline steps defined")
from azureml.core import Experiment
from azureml.pipeline.core import Pipeline
from azureml.widgets import RunDetails
# Construct the pipeline
pipeline_steps = [train_step, register_step]
pipeline = Pipeline(workspace=ws, steps=pipeline_steps)
print("Pipeline is built.")
# Create an experiment and run the pipeline
experiment = Experiment(workspace=ws, name = 'mslearn-diabetes-pipeline')
pipeline_run = experiment.submit(pipeline, regenerate_outputs=True)
print("Pipeline submitted for execution.")
RunDetails(pipeline_run).show()
pipeline_run.wait_for_completion(show_output=True)
for run in pipeline_run.get_children():
print(run.name, ':')
metrics = run.get_metrics()
for metric_name in metrics:
print('\t',metric_name, ":", metrics[metric_name])
from azureml.core import Model
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
# Publish the pipeline from the run
published_pipeline = pipeline_run.publish_pipeline(
name="diabetes-training-pipeline", description="Trains diabetes model", version="1.0")
published_pipeline
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
from azureml.core.authentication import InteractiveLoginAuthentication
interactive_auth = InteractiveLoginAuthentication()
auth_header = interactive_auth.get_authentication_header()
print("Authentication header ready.")
import requests
experiment_name = 'mslearn-diabetes-pipeline'
rest_endpoint = published_pipeline.endpoint
response = requests.post(rest_endpoint,
headers=auth_header,
json={"ExperimentName": experiment_name})
run_id = response.json()["Id"]
run_id
from azureml.pipeline.core.run import PipelineRun
published_pipeline_run = PipelineRun(ws.experiments[experiment_name], run_id)
pipeline_run.wait_for_completion(show_output=True)
from azureml.pipeline.core import ScheduleRecurrence, Schedule
# Submit the Pipeline every Monday at 00:00 UTC
recurrence = ScheduleRecurrence(frequency="Week", interval=1, week_days=["Monday"], time_of_day="00:00")
weekly_schedule = Schedule.create(ws, name="weekly-diabetes-training",
description="Based on time",
pipeline_id=published_pipeline.id,
experiment_name='mslearn-diabetes-pipeline',
recurrence=recurrence)
print('Pipeline scheduled.')
schedules = Schedule.list(ws)
schedules
pipeline_experiment = ws.experiments.get('mslearn-diabetes-pipeline')
latest_run = list(pipeline_experiment.get_runs())[0]
latest_run.get_details()
| 0.699049 | 0.981648 |
<a href="https://colab.research.google.com/github/ryanleeallred/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module1-join-and-reshape-data/LS_DS_121_Join_and_Reshape_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
_Lambda School Data Science_
# Join and Reshape datasets
Objectives
- concatenate data with pandas
- merge data with pandas
- understand tidy data formatting
- melt and pivot data with pandas
Links
- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)
- Combine Data Sets: Standard Joins
- Tidy Data
- Reshaping Data
- Python Data Science Handbook
- [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append
- [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join
- [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping
- [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables
Reference
- Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)
- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
- [Hadley Wickham's famous paper](http://vita.had.co.nz/papers/tidy-data.html) on Tidy Data
## Download data
Weโll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)!
```
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
!ls -lh *.csv
```
# Join Datasets
## Goal: Reproduce this example
The first two orders for user id 1:
```
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'
example = Image(url=url, width=600)
display(example)
```
## Load data
Here's a list of all six CSV filenames
```
!ls -lh *.csv
```
For each CSV
- Load it with pandas
- Look at the dataframe's shape
- Look at its head (first rows)
- `display(example)`
- Which columns does it have in common with the example we want to reproduce?
### aisles
```
```
### departments
```
```
### order_products__prior
```
```
### order_products__train
```
```
### orders
```
```
### products
```
```
## Concatenate order_products__prior and order_products__train
```
```
## Get a subset of orders โ the first two orders for user id 1
From `orders` dataframe:
- user_id
- order_id
- order_number
- order_dow
- order_hour_of_day
## Merge dataframes
Merge the subset from `orders` with columns from `order_products`
```
```
Merge with columns from `products`
```
```
# Reshape Datasets
## Why reshape data?
#### Some libraries prefer data in different formats
For example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always).
> "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called โlong-formโ or โtidyโ data and is described in detail by Hadley Wickham. The rules can be simply stated:
> - Each variable is a column
- Each observation is a row
> A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a โvariableโ is something that will be assigned a role in the plot."
#### Data science is often about putting square pegs in round holes
Here's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): โInvent a way to put a square peg in a round hole.โ It's a good metaphor for data wrangling!
## Hadley Wickham's Examples
From his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)
```
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['John Smith', 'Jane Doe', 'Mary Johnson'],
columns=['treatmenta', 'treatmentb'])
table2 = table1.T
```
"Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild.
The table has two columns and three rows, and both rows and columns are labelled."
```
table1
```
"There are many ways to structure the same underlying data.
Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different."
```
table2
```
"Table 3 reorganises Table 1 to make the values, variables and obserations more clear.
Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable."
| name | trt | result |
|--------------|-----|--------|
| John Smith | a | - |
| Jane Doe | a | 16 |
| Mary Johnson | a | 3 |
| John Smith | b | 2 |
| Jane Doe | b | 11 |
| Mary Johnson | b | 1 |
## Table 1 --> Tidy
We can use the pandas `melt` function to reshape Table 1 into Tidy format.
```
```
## Table 2 --> Tidy
```
##### LEAVE BLANK --an assignment exercise #####
```
## Tidy --> Table 1
The `pivot_table` function is the inverse of `melt`.
```
```
## Tidy --> Table 2
```
##### LEAVE BLANK --an assignment exercise #####
```
# Seaborn example
The rules can be simply stated:
- Each variable is a column
- Each observation is a row
A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a โvariableโ is something that will be assigned a role in the plot."
```
sns.catplot(x='trt', y='result', col='name',
kind='bar', data=tidy, height=2);
```
## Now with Instacart data
```
products = pd.read_csv('products.csv')
order_products = pd.concat([pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
orders = pd.read_csv('orders.csv')
```
## Goal: Reproduce part of this example
Instead of a plot with 50 products, we'll just do two โ the first products from each list
- Half And Half Ultra Pasteurized
- Half Baked Frozen Yogurt
```
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'
example = Image(url=url, width=600)
display(example)
```
So, given a `product_name` we need to calculate its `order_hour_of_day` pattern.
## Subset and Merge
One challenge of performing a merge on this data is that the `products` and `orders` datasets do not have any common columns that we can merge on. Due to this we will have to use the `order_products` dataset to provide the columns that we will use to perform the merge.
```
```
## 4 ways to reshape and plot
### 1. value_counts
```
```
### 2. crosstab
```
```
### 3. Pivot Table
```
```
### 4. melt
```
```
|
github_jupyter
|
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
!ls -lh *.csv
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'
example = Image(url=url, width=600)
display(example)
!ls -lh *.csv
```
### departments
### order_products__prior
### order_products__train
### orders
### products
## Concatenate order_products__prior and order_products__train
## Get a subset of orders โ the first two orders for user id 1
From `orders` dataframe:
- user_id
- order_id
- order_number
- order_dow
- order_hour_of_day
## Merge dataframes
Merge the subset from `orders` with columns from `order_products`
Merge with columns from `products`
# Reshape Datasets
## Why reshape data?
#### Some libraries prefer data in different formats
For example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always).
> "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called โlong-formโ or โtidyโ data and is described in detail by Hadley Wickham. The rules can be simply stated:
> - Each variable is a column
- Each observation is a row
> A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a โvariableโ is something that will be assigned a role in the plot."
#### Data science is often about putting square pegs in round holes
Here's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): โInvent a way to put a square peg in a round hole.โ It's a good metaphor for data wrangling!
## Hadley Wickham's Examples
From his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)
"Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild.
The table has two columns and three rows, and both rows and columns are labelled."
"There are many ways to structure the same underlying data.
Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different."
"Table 3 reorganises Table 1 to make the values, variables and obserations more clear.
Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable."
| name | trt | result |
|--------------|-----|--------|
| John Smith | a | - |
| Jane Doe | a | 16 |
| Mary Johnson | a | 3 |
| John Smith | b | 2 |
| Jane Doe | b | 11 |
| Mary Johnson | b | 1 |
## Table 1 --> Tidy
We can use the pandas `melt` function to reshape Table 1 into Tidy format.
## Table 2 --> Tidy
## Tidy --> Table 1
The `pivot_table` function is the inverse of `melt`.
## Tidy --> Table 2
# Seaborn example
The rules can be simply stated:
- Each variable is a column
- Each observation is a row
A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a โvariableโ is something that will be assigned a role in the plot."
## Now with Instacart data
## Goal: Reproduce part of this example
Instead of a plot with 50 products, we'll just do two โ the first products from each list
- Half And Half Ultra Pasteurized
- Half Baked Frozen Yogurt
So, given a `product_name` we need to calculate its `order_hour_of_day` pattern.
## Subset and Merge
One challenge of performing a merge on this data is that the `products` and `orders` datasets do not have any common columns that we can merge on. Due to this we will have to use the `order_products` dataset to provide the columns that we will use to perform the merge.
## 4 ways to reshape and plot
### 1. value_counts
### 2. crosstab
### 3. Pivot Table
### 4. melt
| 0.812867 | 0.986178 |
# Linear Regression Hardcoded
In this notebook, we are going to hardcode the linear regression algorithm (no use of libraries like scikit-learn and such).
## imports + configurations
In here, we are going to import our dependencies.
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from math import sqrt
```
In here, we import our data.<br>
The dataset represents the changes in temperature during the world war 2.<br>
I got the data from kaggle: __[Weather World War 2](https://www.kaggle.com/smid80/weatherww2)__.
```
data = pd.read_csv("datasets/weather_ww2/Summary of Weather.csv", delimiter=',', low_memory=False)
data.fillna(0,inplace=True)
data = data.sample(frac=1)
data.head()
data.describe()
```
## Representing the data
Now we select the rows we are going to use.
```
columns = ['MaxTemp','MinTemp']
filteredColumns = data[columns]
filteredColumns.describe()
```
We print a plot graph representing the data.
```
plt.figure(figsize=(15,10))
x = filteredColumns['MinTemp']
y = filteredColumns['MaxTemp']
plt.plot(x,y,'mo')
```
And we print a nice graph representing the Minimum temperature / Mean(Maximum temperature).
```
#group the data by MinTemp (x axis)
#calculate the mean of the MaxTemp (y axis)
#reset the dataframe with the column names as indexes
dataWithYAsMean = filteredColumns.groupby('MinTemp').mean().reset_index()
x_grouped = dataWithYAsMean['MinTemp']
y_grouped = dataWithYAsMean['MaxTemp']
plt.figure(figsize=(15,10))
plt.title('Minimum temperature / Mean(Maximum temperature)')
plt.xlabel('Minimum temperature')
plt.ylabel('Mean (Maximum temperature)')
plt.plot(x_grouped,y_grouped,'mo')
```
## Simple linear regression algorithm
We first declare our Error function. We are going to use the __[Root Mean Squared Error(RMSE)](http://www.statisticshowto.com/rmse/)__ to evaluate our prediction.<br>
The Root Mean Squared Error equation is:<br>
$$
\begin{align*}
RMSE &= \sqrt{\frac{1}{N}\sum_{i=1}^N(Y_i - \hat{Y})^2}
\end{align*}
$$
```
# Calculate the mean of a list of numbers
def calculate_mean(values):
return sum(values)/float(len(values))
# Calculate root mean squared error
def root_mean_squared_error(actual_values, predicted_values):
sum_value = .0
for i in range(len(actual_values)):
sum_value += ((predicted_values[i] - actual_values[i]) ** 2)
return sqrt(sum_value/float(len(actual_values)))
```
We then define our simple linear regression function. To get our line, we need to minimize our loss function. <br>
You can find out more about it in this nice __[document](https://www.amherst.edu/system/files/media/1287/SLR_Leastsquares.pdf)__. <br>
To resume, the optimal b1 and b0 values can be found using those equations: <br><br>
$$
\begin{align*}
\hat{\beta}_1 &= \frac{\sum_{i=1}^N(X_i - \bar{X})(Y_i-\bar{Y})}{\sum_{i=1}^N(X_i - \bar{X})^2} \\
\hat{\beta}_0 &= \bar{Y} - \hat{\beta}_1 \bar{X} \\
\end{align*}
$$
We can write b1 with covarience and variance formulas:
$$
\DeclareMathOperator{\Var}{Var}
\DeclareMathOperator{\Cov}{Cov}
\begin{align*}
\hat{\beta}_1 &= \frac{\Cov(X,Y)}{\Var(X)}
\end{align*}
$$
Note that Covariance and Varience formulas are:
$$
\begin{align*}
\Cov(X,Y) &= \frac{1}{N}\sum_{i=1}^N(X_i - \bar{X})(Y_i-\bar{Y}) \\
\Var(X) &= \frac{1}{N}\sum_{i=1}^N(X_i - \bar{X})^2
\end{align*}
$$
```
# Covariance function
def covariance(x,y):
# calculate the mean of x and y
mean_x = calculate_mean(x)
mean_y = calculate_mean(y)
# calculate covariance
cov = .0
for i in range(len(x)):
cov += (x[i] - mean_x) * (y[i] - mean_y)
return cov / float(len(x))
# Variance function
def variance(x):
# calculate the mean of x
mean_x = calculate_mean(x)
# calculate variance
var = .0
for x_i in x:
var += ((x_i - mean_x) ** 2)
return var / float(len(x))
# Simple linear regression algorithm
# For our example, we are going to try to find the equation of our line
# y = b0 + b1*x
def simple_linear_regression(x, y):
# calculate the mean of x and y
mean_x = calculate_mean(x)
mean_y = calculate_mean(y)
# calculate the regression coefficients
b1 = covariance(x,y) / variance(x)
b0 = mean_y - (b1 * mean_x)
return b0, b1
# linear regression plot function
def plot_linear_regression_result(x,y,predicted_y):
# plotting the actual points as a scatter
plt.figure(figsize=(12,8))
plt.scatter(x,y,color = "y", marker = "o",s=30)
# plotting the regression line
plt.plot(x, predicted_y, color = "m", linewidth=2)
# putting labels
plt.title('Linear regression of Min/Max Temperature during World War 2')
plt.xlabel('Min Temperature')
plt.ylabel('Max Temperature')
# function to show plot
plt.show()
```
We now create a function to split our data into training and testing data.
```
# function for splitting the data into training and testing
def split_data(x,train_split_rate = .5):
train_x = x.iloc[:int(round(len(x)*train_split_rate))]
test_x = x.iloc[int(round(len(x)*train_split_rate)):]
train_x.index = range(len(train_x))
test_x.index = range(len(test_x))
return train_x, test_x
```
Plotting a nice representation of our simple linear regression result
```
# splitting the data
train_data, test_data = split_data(data,train_split_rate=.5)
train_x, test_x = train_data['MinTemp'], test_data['MinTemp']
train_y, test_y = train_data['MaxTemp'], test_data['MaxTemp']
# getting the predicted y values
b0, b1 = simple_linear_regression(train_x,train_y)
# predicted vector
predicted_y = b0 + (b1 * test_x)
# calling the plot function
plot_linear_regression_result(test_x,test_y,predicted_y)
```
The predicted line equation :
```
print("Y = {} + ({} * X)".format(b0,b1))
```
And finally, we print our loss.
```
rmse = root_mean_squared_error(test_y,predicted_y)
print("Root Mean Squared Error value = {}".format(rmse))
```
## Implementing simple linear regression using sklearn
We are now going to implement the same algorithm using sklearn
### We import our dependencies
```
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
```
And now we split the data into training and testing. With shuffeling of course. And we reshape the training and testing X value.
```
train_x, test_x, train_y, test_y = train_test_split(x,y,test_size=0.5,shuffle=True)
train_x = train_x.values.reshape(-1,1)
test_x = test_x.values.reshape(-1,1)
```
Then we fit our linear regression model.
```
simple_linear_regression = LinearRegression()
simple_linear_regression.fit(train_x,train_y)
```
We can then test our model with the predict function and see how well it did with the mean_squared_error function
```
predicted_y = simple_linear_regression.predict(test_x)
rmse = sqrt(mean_squared_error(test_y,predicted_y))
print("Root Mean Squared Error value = {}".format(rmse))
```
And lastly, a little graph representing the linear regression.
```
plot_linear_regression_result(test_x,test_y,predicted_y)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from math import sqrt
data = pd.read_csv("datasets/weather_ww2/Summary of Weather.csv", delimiter=',', low_memory=False)
data.fillna(0,inplace=True)
data = data.sample(frac=1)
data.head()
data.describe()
columns = ['MaxTemp','MinTemp']
filteredColumns = data[columns]
filteredColumns.describe()
plt.figure(figsize=(15,10))
x = filteredColumns['MinTemp']
y = filteredColumns['MaxTemp']
plt.plot(x,y,'mo')
#group the data by MinTemp (x axis)
#calculate the mean of the MaxTemp (y axis)
#reset the dataframe with the column names as indexes
dataWithYAsMean = filteredColumns.groupby('MinTemp').mean().reset_index()
x_grouped = dataWithYAsMean['MinTemp']
y_grouped = dataWithYAsMean['MaxTemp']
plt.figure(figsize=(15,10))
plt.title('Minimum temperature / Mean(Maximum temperature)')
plt.xlabel('Minimum temperature')
plt.ylabel('Mean (Maximum temperature)')
plt.plot(x_grouped,y_grouped,'mo')
# Calculate the mean of a list of numbers
def calculate_mean(values):
return sum(values)/float(len(values))
# Calculate root mean squared error
def root_mean_squared_error(actual_values, predicted_values):
sum_value = .0
for i in range(len(actual_values)):
sum_value += ((predicted_values[i] - actual_values[i]) ** 2)
return sqrt(sum_value/float(len(actual_values)))
# Covariance function
def covariance(x,y):
# calculate the mean of x and y
mean_x = calculate_mean(x)
mean_y = calculate_mean(y)
# calculate covariance
cov = .0
for i in range(len(x)):
cov += (x[i] - mean_x) * (y[i] - mean_y)
return cov / float(len(x))
# Variance function
def variance(x):
# calculate the mean of x
mean_x = calculate_mean(x)
# calculate variance
var = .0
for x_i in x:
var += ((x_i - mean_x) ** 2)
return var / float(len(x))
# Simple linear regression algorithm
# For our example, we are going to try to find the equation of our line
# y = b0 + b1*x
def simple_linear_regression(x, y):
# calculate the mean of x and y
mean_x = calculate_mean(x)
mean_y = calculate_mean(y)
# calculate the regression coefficients
b1 = covariance(x,y) / variance(x)
b0 = mean_y - (b1 * mean_x)
return b0, b1
# linear regression plot function
def plot_linear_regression_result(x,y,predicted_y):
# plotting the actual points as a scatter
plt.figure(figsize=(12,8))
plt.scatter(x,y,color = "y", marker = "o",s=30)
# plotting the regression line
plt.plot(x, predicted_y, color = "m", linewidth=2)
# putting labels
plt.title('Linear regression of Min/Max Temperature during World War 2')
plt.xlabel('Min Temperature')
plt.ylabel('Max Temperature')
# function to show plot
plt.show()
# function for splitting the data into training and testing
def split_data(x,train_split_rate = .5):
train_x = x.iloc[:int(round(len(x)*train_split_rate))]
test_x = x.iloc[int(round(len(x)*train_split_rate)):]
train_x.index = range(len(train_x))
test_x.index = range(len(test_x))
return train_x, test_x
# splitting the data
train_data, test_data = split_data(data,train_split_rate=.5)
train_x, test_x = train_data['MinTemp'], test_data['MinTemp']
train_y, test_y = train_data['MaxTemp'], test_data['MaxTemp']
# getting the predicted y values
b0, b1 = simple_linear_regression(train_x,train_y)
# predicted vector
predicted_y = b0 + (b1 * test_x)
# calling the plot function
plot_linear_regression_result(test_x,test_y,predicted_y)
print("Y = {} + ({} * X)".format(b0,b1))
rmse = root_mean_squared_error(test_y,predicted_y)
print("Root Mean Squared Error value = {}".format(rmse))
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
train_x, test_x, train_y, test_y = train_test_split(x,y,test_size=0.5,shuffle=True)
train_x = train_x.values.reshape(-1,1)
test_x = test_x.values.reshape(-1,1)
simple_linear_regression = LinearRegression()
simple_linear_regression.fit(train_x,train_y)
predicted_y = simple_linear_regression.predict(test_x)
rmse = sqrt(mean_squared_error(test_y,predicted_y))
print("Root Mean Squared Error value = {}".format(rmse))
plot_linear_regression_result(test_x,test_y,predicted_y)
| 0.802672 | 0.984215 |
# Mรฉtodo de tendรชncia linear de Holt
Utilizando modelo de suavizaรงรฃo exponencial com tendรชncia linear de Holt para prever preรงo do รณleo na Arabia Saudita
* Importando bibliotecas
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
* Lendo arquivo Excel
```
dados = pd.read_csv('BrentOilPrices.csv')
```
* Exibindo cinco primeiras linhas
```
dados.head()
dados['Date'] = pd.to_datetime(dados['Date'])
```
* Definindo ano como รญndice
```
dados = dados.set_index('Date')
```
* Graficando sรฉrie temporal
```
dados.plot()
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.tight_layout()
```
* Criando amostras de treino e teste
```
t_treino = dados[dados.index<'2016-01-01'].index.values
t_teste = dados[dados.index>='2016-01-01'].index.values
X_treino = dados[dados.index<'2016-01-01'].values
X_teste = dados[dados.index>='2016-01-01'].values
```
* Aplicando modelo Exponential Smoothing
```
from statsmodels.tsa.api import ExponentialSmoothing, Holt, SimpleExpSmoothing
fit1 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.1)
fit1 = fit1.forecast(len(X_teste))
fit2 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.3)
fit2 = fit2.forecast(len(X_teste))
fit3 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.5)
fit3 = fit3.forecast(len(X_teste))
fit4 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.7)
fit4 = fit4.forecast(len(X_teste))
plt.plot(t_treino,X_treino, marker='o', color='black',label='Treino')
plt.plot(t_teste,X_teste, marker='o', color='red',label='Teste')
plt.plot(t_teste,fit1, marker='o', color='blue',label='Fit1')
plt.plot(t_teste,fit2, marker='o', color='green',label='Fit2')
plt.plot(t_teste,fit3, marker='o', color='orange',label='Fit3')
plt.plot(t_teste,fit4, marker='o', color='cyan',label='Fit4')
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.legend()
fit4a = Holt(X_treino).fit(smoothing_level = 0.1,smoothing_slope = 0.7)
fit4a = fit4a.forecast(len(X_teste))
fit4b = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.7)
fit4b = fit4b.forecast(len(X_teste))
fit4c = Holt(X_treino).fit(smoothing_level = 0.5,smoothing_slope = 0.7)
fit4c = fit4c.forecast(len(X_teste))
fit4d = Holt(X_treino).fit(smoothing_level = 0.7,smoothing_slope = 0.7)
fit4d = fit4d.forecast(len(X_teste))
plt.plot(t_treino,X_treino, marker='o', color='black',label='Treino')
plt.plot(t_teste,X_teste, marker='o', color='red',label='Teste')
plt.plot(t_teste,fit4a, marker='o', color='blue',label='Fit4a')
plt.plot(t_teste,fit4b, marker='o', color='green',label='Fit4b')
plt.plot(t_teste,fit4c, marker='o', color='orange',label='Fit4c')
plt.plot(t_teste,fit4d, marker='o', color='cyan',label='Fit4d')
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.legend()
```
* Calculando erros
```
from sklearn.metrics import mean_squared_error
MSE_fit4a = mean_squared_error(X_teste,fit4a)
MSE_fit4b = mean_squared_error(X_teste,fit4b)
MSE_fit4c = mean_squared_error(X_teste,fit4c)
MSE_fit4d = mean_squared_error(X_teste,fit4d)
RMSE_fit4a = np.sqrt(MSE_fit4a)
RMSE_fit4b = np.sqrt(MSE_fit4b)
RMSE_fit4c = np.sqrt(MSE_fit4c)
RMSE_fit4d = np.sqrt(MSE_fit4d)
print("RMSE (SL=0.1) = {:0.2f}".format(RMSE_fit4a))
print("RMSE (SL=0.3) = {:0.2f}".format(RMSE_fit4b))
print("RMSE (SL=0.5) = {:0.2f}".format(RMSE_fit4c))
print("RMSE (SL=0.7) = {:0.2f}".format(RMSE_fit4d))
best = 1e6
i_best = 0
j_best = 0
for i in range(1,10):
for j in range(50,90):
ind1 = i/10
ind2 = j/100
fit_final = Holt(X_treino).fit(smoothing_level=ind1,smoothing_slope=ind2)
fit_final = fit_final.forecast(len(X_teste))
MSE = mean_squared_error(X_teste,fit_final)
RMSE = np.sqrt(MSE)
if(RMSE<best):
best = RMSE
i_best = ind1
j_best = ind2
print("SS= {}, SL={}, RMSE={}".format(i_best,j_best,RMSE))
print("Melhor smoothing_level = {}".format(i_best/10.0))
print("Melhor smoothing_slope = {}".format(j_best/100))
```
# Melhor ajuste
```
fit_best = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.74)
fit_best = fit_best.forecast(len(X_teste))
plt.plot(t_treino,X_treino, marker='o', color='black',label='Treino')
plt.plot(t_teste,X_teste, marker='o', color='red',label='Teste')
plt.plot(t_teste,fit_best, marker='o', color='blue',label='Best Fit')
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.legend()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dados = pd.read_csv('BrentOilPrices.csv')
dados.head()
dados['Date'] = pd.to_datetime(dados['Date'])
dados = dados.set_index('Date')
dados.plot()
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.tight_layout()
t_treino = dados[dados.index<'2016-01-01'].index.values
t_teste = dados[dados.index>='2016-01-01'].index.values
X_treino = dados[dados.index<'2016-01-01'].values
X_teste = dados[dados.index>='2016-01-01'].values
from statsmodels.tsa.api import ExponentialSmoothing, Holt, SimpleExpSmoothing
fit1 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.1)
fit1 = fit1.forecast(len(X_teste))
fit2 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.3)
fit2 = fit2.forecast(len(X_teste))
fit3 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.5)
fit3 = fit3.forecast(len(X_teste))
fit4 = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.7)
fit4 = fit4.forecast(len(X_teste))
plt.plot(t_treino,X_treino, marker='o', color='black',label='Treino')
plt.plot(t_teste,X_teste, marker='o', color='red',label='Teste')
plt.plot(t_teste,fit1, marker='o', color='blue',label='Fit1')
plt.plot(t_teste,fit2, marker='o', color='green',label='Fit2')
plt.plot(t_teste,fit3, marker='o', color='orange',label='Fit3')
plt.plot(t_teste,fit4, marker='o', color='cyan',label='Fit4')
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.legend()
fit4a = Holt(X_treino).fit(smoothing_level = 0.1,smoothing_slope = 0.7)
fit4a = fit4a.forecast(len(X_teste))
fit4b = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.7)
fit4b = fit4b.forecast(len(X_teste))
fit4c = Holt(X_treino).fit(smoothing_level = 0.5,smoothing_slope = 0.7)
fit4c = fit4c.forecast(len(X_teste))
fit4d = Holt(X_treino).fit(smoothing_level = 0.7,smoothing_slope = 0.7)
fit4d = fit4d.forecast(len(X_teste))
plt.plot(t_treino,X_treino, marker='o', color='black',label='Treino')
plt.plot(t_teste,X_teste, marker='o', color='red',label='Teste')
plt.plot(t_teste,fit4a, marker='o', color='blue',label='Fit4a')
plt.plot(t_teste,fit4b, marker='o', color='green',label='Fit4b')
plt.plot(t_teste,fit4c, marker='o', color='orange',label='Fit4c')
plt.plot(t_teste,fit4d, marker='o', color='cyan',label='Fit4d')
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.legend()
from sklearn.metrics import mean_squared_error
MSE_fit4a = mean_squared_error(X_teste,fit4a)
MSE_fit4b = mean_squared_error(X_teste,fit4b)
MSE_fit4c = mean_squared_error(X_teste,fit4c)
MSE_fit4d = mean_squared_error(X_teste,fit4d)
RMSE_fit4a = np.sqrt(MSE_fit4a)
RMSE_fit4b = np.sqrt(MSE_fit4b)
RMSE_fit4c = np.sqrt(MSE_fit4c)
RMSE_fit4d = np.sqrt(MSE_fit4d)
print("RMSE (SL=0.1) = {:0.2f}".format(RMSE_fit4a))
print("RMSE (SL=0.3) = {:0.2f}".format(RMSE_fit4b))
print("RMSE (SL=0.5) = {:0.2f}".format(RMSE_fit4c))
print("RMSE (SL=0.7) = {:0.2f}".format(RMSE_fit4d))
best = 1e6
i_best = 0
j_best = 0
for i in range(1,10):
for j in range(50,90):
ind1 = i/10
ind2 = j/100
fit_final = Holt(X_treino).fit(smoothing_level=ind1,smoothing_slope=ind2)
fit_final = fit_final.forecast(len(X_teste))
MSE = mean_squared_error(X_teste,fit_final)
RMSE = np.sqrt(MSE)
if(RMSE<best):
best = RMSE
i_best = ind1
j_best = ind2
print("SS= {}, SL={}, RMSE={}".format(i_best,j_best,RMSE))
print("Melhor smoothing_level = {}".format(i_best/10.0))
print("Melhor smoothing_slope = {}".format(j_best/100))
fit_best = Holt(X_treino).fit(smoothing_level = 0.3,smoothing_slope = 0.74)
fit_best = fit_best.forecast(len(X_teste))
plt.plot(t_treino,X_treino, marker='o', color='black',label='Treino')
plt.plot(t_teste,X_teste, marker='o', color='red',label='Teste')
plt.plot(t_teste,fit_best, marker='o', color='blue',label='Best Fit')
plt.xlabel('Ano')
plt.ylabel('Preรงo')
plt.legend()
| 0.597608 | 0.971564 |
```
import numpy as np
from itertools import permutations
from collections import defaultdict
import random
```
# load and parse dataset
```
!file nations -I
raw_data = []
entities = set()
with open('nations', 'r') as to_read:
for i, line in enumerate(to_read.readlines()):
s, p, o = line.strip().split(' ')
entities.add(s)
entities.add(o)
raw_data += [(s,p,o)]
A_implies_B_rules = [
# body: head
('ngo', 'intergovorgs'),
('reldiplomacy', 'embassy'),
('relexports', 'embassy')
]
A_B_implies_C_rules = [
('commonbloc0', 'commonbloc2', 'independence'),
('commonbloc0', 'commonbloc2', 'timesinceally'),
('commonbloc0', 'commonbloc2', 'blockpositionindex')
]
# ('commonbloc0', 'commonbloc2'): 'blockpositionindex',
# ('commonbloc0', 'commonbloc2'): 'independence',
# ('commonbloc0', 'commonbloc2'): 'timesinceally',
# ('commonbloc0', 'relexports'): 'timesinceally',
# ('commonbloc1', 'eemigrants'): 'ngoorgs3',
# ('commonbloc1', 'eemigrants'): 'relngo',
# ('commonbloc1', 'emigrants3'): 'ngoorgs3',
# ('commonbloc1', 'emigrants3'): 'relngo',
# ('commonbloc2', 'commonbloc0'): 'blockpositionindex',
# ('commonbloc2', 'commonbloc0'): 'independence',
# ('commonbloc2', 'commonbloc0'): 'timesinceally',
# ('commonbloc1', 'eemigrants'): 'ngoorgs3',
# ('commonbloc1', 'eemigrants'): 'relngo',
# ('reldiplomacy', 'eemigrants'): 'ngoorgs3',
# ('reldiplomacy', 'eemigrants'): 'relngo',
# ('commonbloc1', 'emigrants3'): 'relngo',
#java -jar /Users/simon/Office/Dokumente/Uni/Data\ Science\ and\ Machine\ Learning\ Master/Masters\ Project/Libraries/amie-dev.jar -d " " -minc 1 -minpca 1 -mins 30 -maxad 3 nations | grep '?h ?h'
#java -jar /Users/simon/Office/Dokumente/Uni/Data\ Science\ and\ Machine\ Learning\ Master/Masters\ Project/Libraries/amie-dev.jar -d " " -minc 1 -minpca 1 -mins 40 -maxad 2 nations | grep '?b =>'
train = set()
valid = set()
test = set()
entities = set()
A_implies_B_rule_examples = defaultdict(lambda: [])
A_B_implies_C_rule_examples = defaultdict(lambda: [])
counter_A_implies_B_rules = defaultdict(lambda: 0)
counter_A_B_implies_C_rules = defaultdict(lambda: 0)
for s,p,o in raw_data:
entities.add(s)
entities.add(o)
for x1, x2 in permutations(entities, 2):
for (A, B) in A_implies_B_rules:
if (x1, A, x2) in raw_data and (x1, B, x2) in raw_data:
valid.add((x1, B, x2))
A_implies_B_rule_examples[(A, B)] += [(x1, x2)]
counter_A_implies_B_rules[(A, B)] += 1
for x1, x2, x3 in permutations(entities, 3):
for (A, B, C) in A_B_implies_C_rules:
if (x1, A, x2) in raw_data and (x2, B, x3) in raw_data and (x1, C, x3) in raw_data:
valid.add((x1, C, x3))
A_B_implies_C_rule_examples[(A, B, C)] += [(x1, x2, x3)]
counter_A_B_implies_C_rules[(A, B, C)] += 1
for s,p,o in raw_data:
if (s,p,o) not in valid:
train.add((s,p,o))
train = list(train)
valid = list(valid)
random.Random(42).shuffle(valid)
valid, test = valid[:len(valid) // 2], valid[len(valid) // 2:]
print(len(train))
print(len(valid))
print(len(test))
```
# check that splits are mutually exclusive
```
for triple in train:
if triple in valid:
print("valid", triple)
if triple in test:
print("valid", triple)
for triple in valid:
if triple in train:
print("train", triple)
if triple in test:
print("test", triple)
for triple in test:
if triple in train:
print("train", triple)
if triple in valid:
print("valid", triple)
```
# save splits as .tsv
```
with open("train.tsv", "w", encoding='utf-8') as f:
for triple in train:
f.write("{}\t{}\t{}\n".format(*triple))
with open("valid.tsv", "w", encoding='utf-8') as f:
for triple in valid:
f.write("{}\t{}\t{}\n".format(*triple))
with open("test.tsv", "w", encoding='utf-8') as f:
for triple in test:
f.write("{}\t{}\t{}\n".format(*triple))
for (A, B), examples in A_implies_B_rule_examples.items():
with open("{}=>{}.tsv".format(A,B), "w", encoding='utf-8') as f:
for (x1, x2) in examples:
f.write("{}\t{}\t{}\n".format(x1, B, x2))
for (A, B, C), examples in A_B_implies_C_rule_examples.items():
with open("{},{}=>{}.tsv".format(A,B,C), "w", encoding='utf-8') as f:
for (x1, x2, x3) in examples:
f.write("{}\t{}\t{}\n".format(x1, C, x3))
```
|
github_jupyter
|
import numpy as np
from itertools import permutations
from collections import defaultdict
import random
!file nations -I
raw_data = []
entities = set()
with open('nations', 'r') as to_read:
for i, line in enumerate(to_read.readlines()):
s, p, o = line.strip().split(' ')
entities.add(s)
entities.add(o)
raw_data += [(s,p,o)]
A_implies_B_rules = [
# body: head
('ngo', 'intergovorgs'),
('reldiplomacy', 'embassy'),
('relexports', 'embassy')
]
A_B_implies_C_rules = [
('commonbloc0', 'commonbloc2', 'independence'),
('commonbloc0', 'commonbloc2', 'timesinceally'),
('commonbloc0', 'commonbloc2', 'blockpositionindex')
]
# ('commonbloc0', 'commonbloc2'): 'blockpositionindex',
# ('commonbloc0', 'commonbloc2'): 'independence',
# ('commonbloc0', 'commonbloc2'): 'timesinceally',
# ('commonbloc0', 'relexports'): 'timesinceally',
# ('commonbloc1', 'eemigrants'): 'ngoorgs3',
# ('commonbloc1', 'eemigrants'): 'relngo',
# ('commonbloc1', 'emigrants3'): 'ngoorgs3',
# ('commonbloc1', 'emigrants3'): 'relngo',
# ('commonbloc2', 'commonbloc0'): 'blockpositionindex',
# ('commonbloc2', 'commonbloc0'): 'independence',
# ('commonbloc2', 'commonbloc0'): 'timesinceally',
# ('commonbloc1', 'eemigrants'): 'ngoorgs3',
# ('commonbloc1', 'eemigrants'): 'relngo',
# ('reldiplomacy', 'eemigrants'): 'ngoorgs3',
# ('reldiplomacy', 'eemigrants'): 'relngo',
# ('commonbloc1', 'emigrants3'): 'relngo',
#java -jar /Users/simon/Office/Dokumente/Uni/Data\ Science\ and\ Machine\ Learning\ Master/Masters\ Project/Libraries/amie-dev.jar -d " " -minc 1 -minpca 1 -mins 30 -maxad 3 nations | grep '?h ?h'
#java -jar /Users/simon/Office/Dokumente/Uni/Data\ Science\ and\ Machine\ Learning\ Master/Masters\ Project/Libraries/amie-dev.jar -d " " -minc 1 -minpca 1 -mins 40 -maxad 2 nations | grep '?b =>'
train = set()
valid = set()
test = set()
entities = set()
A_implies_B_rule_examples = defaultdict(lambda: [])
A_B_implies_C_rule_examples = defaultdict(lambda: [])
counter_A_implies_B_rules = defaultdict(lambda: 0)
counter_A_B_implies_C_rules = defaultdict(lambda: 0)
for s,p,o in raw_data:
entities.add(s)
entities.add(o)
for x1, x2 in permutations(entities, 2):
for (A, B) in A_implies_B_rules:
if (x1, A, x2) in raw_data and (x1, B, x2) in raw_data:
valid.add((x1, B, x2))
A_implies_B_rule_examples[(A, B)] += [(x1, x2)]
counter_A_implies_B_rules[(A, B)] += 1
for x1, x2, x3 in permutations(entities, 3):
for (A, B, C) in A_B_implies_C_rules:
if (x1, A, x2) in raw_data and (x2, B, x3) in raw_data and (x1, C, x3) in raw_data:
valid.add((x1, C, x3))
A_B_implies_C_rule_examples[(A, B, C)] += [(x1, x2, x3)]
counter_A_B_implies_C_rules[(A, B, C)] += 1
for s,p,o in raw_data:
if (s,p,o) not in valid:
train.add((s,p,o))
train = list(train)
valid = list(valid)
random.Random(42).shuffle(valid)
valid, test = valid[:len(valid) // 2], valid[len(valid) // 2:]
print(len(train))
print(len(valid))
print(len(test))
for triple in train:
if triple in valid:
print("valid", triple)
if triple in test:
print("valid", triple)
for triple in valid:
if triple in train:
print("train", triple)
if triple in test:
print("test", triple)
for triple in test:
if triple in train:
print("train", triple)
if triple in valid:
print("valid", triple)
with open("train.tsv", "w", encoding='utf-8') as f:
for triple in train:
f.write("{}\t{}\t{}\n".format(*triple))
with open("valid.tsv", "w", encoding='utf-8') as f:
for triple in valid:
f.write("{}\t{}\t{}\n".format(*triple))
with open("test.tsv", "w", encoding='utf-8') as f:
for triple in test:
f.write("{}\t{}\t{}\n".format(*triple))
for (A, B), examples in A_implies_B_rule_examples.items():
with open("{}=>{}.tsv".format(A,B), "w", encoding='utf-8') as f:
for (x1, x2) in examples:
f.write("{}\t{}\t{}\n".format(x1, B, x2))
for (A, B, C), examples in A_B_implies_C_rule_examples.items():
with open("{},{}=>{}.tsv".format(A,B,C), "w", encoding='utf-8') as f:
for (x1, x2, x3) in examples:
f.write("{}\t{}\t{}\n".format(x1, C, x3))
| 0.300438 | 0.549157 |
# Overview
One of the most powerful features of spaCy is the ability to add [custom attributes and extensions](https://spacy.io/usage/processing-pipelines#custom-components-attributes) to `Doc`, `Span`, and `Token` classes. These extensions are stored in the underscore attribute (ie., `token._`). This allows us to store custom data and implement custom methods which are useful to medspaCy while still using the spaCy API.
MedspaCy adds a number of methods to the underscore attribute for each class. This notebook will walk through what these extensions are and how they can be used.
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.insert(0, "..")
```
## Set up example data
First, we'll load a pipeline and set up a simple example of text to process with some entities.
```
import medspacy
from medspacy.target_matcher import TargetRule
nlp = medspacy.load(enable=["pyrush", "target_matcher", "context", "sectionizer"])
target_rules = [
TargetRule("afib", "CONDITION"),
TargetRule("pneumonia", "CONDITION", pattern=r"community[- ]acquired pneumonia"),
TargetRule("acute stroke", "CONDITION")
]
nlp.get_pipe("medspacy_target_matcher").add(target_rules)
text = """Past Medical History: Afib and community-acquired pneumonia.
Assessment/Plan: Acute stroke
"""
doc = nlp(text)
doc.ents
```
# All extensions
You can get a dict containing the extension names and default values or getter/setters for each by the top-level `get_extensions` method:
```
medspacy.get_extensions()
```
In the rest of the notebook, we'll walk through the 3 classes and show each of the extensions.
# I. Doc
```
medspacy.get_doc_extensions().keys()
```
## Sections
The only default `Doc` extensions relate to the sections of the doc which are identified by the `Sectionizer` class.
#### `doc._.sections`
A list of named tuples representing the different sections in a doc. Each tuple contains:
- `category`: A string representing the normalized name of the section
- `title_span`: The Span of the Doc which begins the section header
- `body_span`: The Span of the Section which begins after the section header
- `section_span`: The entire of the Section (title + body)
- `section_parent`: A parent section of the specific section, if any
```
for section in doc._.sections:
print(section)
print()
section.body_span
```
Each of the section attributes can be accessed as a list individually:
```
doc._.section_categories
doc._.section_titles
doc._.section_bodies
doc._.section_spans
doc._.section_parents
```
# II. Span
The `Span` class contains extensions related to the TargetRule used to extract an entity, ConText assertion attributes, and section attributes.
```
medspacy.get_span_extensions().keys()
```
We'll use this ent as an example:
```
span = doc.ents[1]
span
```
## `span._.target_rule`
The `TargetMatcher` class uses instances of `TargetRule` to define entities to extract from the doc. When an entity is created, the rule which matched the Span is referenced in `span._.target_rule`. This allows you to see which rule generated an entity and to access the metadata from the original rule.
```
span._.target_rule
span._.target_rule.literal
```
## ConText Attributes
An important part of clinical is identifying whether concepts were actually experienced by the patient or if they were negated, historical, experienced by someone else, or uncertain. These attributes are asserted using the `ConTextComponent` in medspaCy and added to attributes for each entity but can also be set manually or using the `Sectionizer`.
#### `span._.context_attributes`
Get a dict mapping each ConText assertion attribute to its value (default is always `False`).
```
span._.context_attributes
```
#### `span._.any_context_attributes`
Often, you want to know if any of these values are True, as this is an indicator to exclude or ignore an entity. `any_context_attributes` is `True` if any of these values have been asserted to be True.
```
span._.any_context_attributes
```
You can also access each of these attributes individually:
```
span._.is_negated
span._.is_historical
span._.is_hypothetical
span._.is_family
span._.is_uncertain
```
## Sections
Similar to the section attributes in `Doc`, `Span` includes attributes indicating which section of a note it occurs in.
```
span._.section
span._.section_category
span._.section_title
span._.section_body
span._.section_span
span._.section_parent
span._.section_rule
```
## Window
#### `span._.window(n=1, left=True, right=True)`
You often want to look at the context surrounding a concept. One way to do this is looking at the sentence (`span.sent`), but sentence splitting is expensive. An alternative is looking at a fixed window surrounding a concept. You can do this using the custom method `span._.window()`, which returns the superspan surrounding a given span.
By default this method will return a window of one token on each side of the span, but this can be modified to be larger and to exclude either the left or right side.
```
span
span._.window()
span._.window(2)
span._.window(2, left=False)
span._.window(2, right=False)
```
## Contains
#### `span._.contains(target, regex=True, case_insensitive=True)`
Returns True if a target phrase is contained in the text underlying a span (ie., `span.text`). `target` can be either a string or list of strings. `regex` and `case_insensitive` define whether to search using regular expressions and whether to ignore case.
```
span
span._.contains(r"community[- ]acquired")
span._.contains("community acquired", regex=False)
span._.contains(["pneumonia", "pna"])
```
# III. Token
Token extensions correspond to section and window attributes of `Span`.
```
medspacy.get_token_extensions().keys()
token = doc[8]
token
```
## Section
```
token._.section
token._.section_category
token._.section_title
token._.section_body
token._.section_span
token._.section_parent
token._.section_rule
```
## Window
```
token
token._.window()
token._.window(2)
token._.window(2, left=False)
token._.window(2, right=False)
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import sys
sys.path.insert(0, "..")
import medspacy
from medspacy.target_matcher import TargetRule
nlp = medspacy.load(enable=["pyrush", "target_matcher", "context", "sectionizer"])
target_rules = [
TargetRule("afib", "CONDITION"),
TargetRule("pneumonia", "CONDITION", pattern=r"community[- ]acquired pneumonia"),
TargetRule("acute stroke", "CONDITION")
]
nlp.get_pipe("medspacy_target_matcher").add(target_rules)
text = """Past Medical History: Afib and community-acquired pneumonia.
Assessment/Plan: Acute stroke
"""
doc = nlp(text)
doc.ents
medspacy.get_extensions()
medspacy.get_doc_extensions().keys()
for section in doc._.sections:
print(section)
print()
section.body_span
doc._.section_categories
doc._.section_titles
doc._.section_bodies
doc._.section_spans
doc._.section_parents
medspacy.get_span_extensions().keys()
span = doc.ents[1]
span
span._.target_rule
span._.target_rule.literal
span._.context_attributes
span._.any_context_attributes
span._.is_negated
span._.is_historical
span._.is_hypothetical
span._.is_family
span._.is_uncertain
span._.section
span._.section_category
span._.section_title
span._.section_body
span._.section_span
span._.section_parent
span._.section_rule
span
span._.window()
span._.window(2)
span._.window(2, left=False)
span._.window(2, right=False)
span
span._.contains(r"community[- ]acquired")
span._.contains("community acquired", regex=False)
span._.contains(["pneumonia", "pna"])
medspacy.get_token_extensions().keys()
token = doc[8]
token
token._.section
token._.section_category
token._.section_title
token._.section_body
token._.section_span
token._.section_parent
token._.section_rule
token
token._.window()
token._.window(2)
token._.window(2, left=False)
token._.window(2, right=False)
| 0.31279 | 0.982574 |
# ะะตัะพะด ADMM (alternating direction methods of multipliers)
## ะะฐ ะฟัะพัะปะพะผ ัะตะผะธะฝะฐัะต
- ะกัะฑะณัะฐะดะธะตะฝัะฝัะน ะผะตัะพะด: ะฑะฐะทะพะฒัะน ะผะตัะพะด ัะตัะตะฝะธั ะฝะตะณะปะฐะดะบะธั
ะทะฐะดะฐั
- ะัะพะบัะธะผะฐะปัะฝัะน ะผะตัะพะด ะธ ะตะณะพ ัะฒะพะนััะฒะฐ: ะฐะปััะตัะฝะฐัะธะฒะฐ ะณัะฐะดะธะตะฝัะฝะพะผั ัะฟััะบั
- ะัะพะบัะธะผะฐะปัะฝัะน ะณัะฐะดะธะตะฝัะฝัะน ะผะตัะพะด: ะทะฐะณะปัะดัะฒะฐะฝะธะต ะฒ ัััะฝัะน ััะธะบ
- ะฃัะบะพัะตะฝะธะต ะฟัะพะบัะธะผะฐะปัะฝะพะณะพ ะณัะฐะดะธะตะฝัะฝะพะณะพ ะผะตัะพะดะฐ, ISTA ะธ FISTA
## ะะปะฐะฝ ะฝะฐ ัะตะณะพะดะฝั
- ะัะฟะพะปัะทะพะฒะฐะฝะธะต ะะฐะณัะฐะฝะถะธะฐะฝะฐ ะบะฐะบ ะผะพะดะตะปะธ ัะตะปะตะฒะพะน ััะฝะบัะธะธ ะฒ ะทะฐะดะฐัะต ััะปะพะฒะฝะพะน ะพะฟัะธะผะธะทะฐัะธะธ
- ะงะตัะตะดะพะฒะฐะฝะธะต ัะฟััะบะฐ ะธ ะฟะพะดััะผะฐ ะดะปั ัะตัะตะฝะธั ะผะธะฝะธะผะฐะบัะฝะพะน ะทะฐะดะฐัะธ
- ะ ะตะณัะปััะธะทะฐัะธั ะปะฐะณัะฐะฝะถะธะฐะฝะฐ
- ADMM
## ะะฒะพะนััะฒะตะฝะฝะฐั ะทะฐะดะฐัะฐ: ะฝะฐะฟะพะผะธะฝะฐะฝะธะต
- ะัั
ะพะดะฝะฐั ะทะฐะดะฐัะฐ
\begin{align*}
& \min f(x) \\
\text{s.t. } & Ax = b
\end{align*}
- ะะฐะณัะฐะฝะถะธะฐะฝ
$$
L(x, \lambda) = f(x) + \lambda^{\top}(Ax - b)
$$
- ะะฒะพะนััะฒะตะฝะฝะฐั ะทะฐะดะฐัะฐ
$$
\max_{\lambda} g(\lambda),
$$
ะณะดะต $g(\lambda) = \inf_x L(x, \lambda)$
- ะะพัััะฐะฝะพะฒะปะตะฝะธะต ัะตัะตะฝะธั ะธัั
ะพะดะฝะพะน ะทะฐะฐะดัะธ
$$
x^* = \arg\min_x L(x, \lambda^*)
$$
## ะ ะตัะตะฝะธะต ะดะฒะพะนััะฒะตะฝะฝะพะน ะทะฐะดะฐัะธ
- ะัะฐะดะธะตะฝัะฝัะน ะฟะพะดััะผ, ัะฐะบ ะบะฐะบ ะทะฐะดะฐัะฐ ะฑะตะท ะพะณัะฐะฝะธัะตะฝะธะน
$$
\lambda_{k+1} = \lambda_k + \alpha_k g'(\lambda_k)
$$
- ะัะธ ััะพะผ ะณัะฐะดะธะตะฝั ะดะฒะพะนััะฒะตะฝะฝะพะน ััะฝะบัะธะธ
$$
g'(\lambda_k) = A\hat{x} - b,
$$
ะณะดะต $\hat{x} = \arg\min_x L(x, \lambda_k)$
- ะะฑัะตะดะธะฝะธะผ ะดะฒะฐ ัะฐะณะฐ ะฒ ะพะดะธะฝ ะธ ะฟะพะปััะธะผ
\begin{align*}
& x_{k+1} = \arg\min_x L(x, \lambda_k)\\
& \lambda_{k+1} = \lambda_k + \alpha_k (Ax_{k+1} - b)
\end{align*}
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rc("text", usetex=True)
import cvxpy as cvx
def dual_ascent(update_x, A, b, alpha, x0, lambda0, max_iter):
x = x0.copy()
lam = lambda0.copy()
conv_x = [x]
conv_lam = [lam]
for i in range(max_iter):
x = update_x(x, lam, A, b)
lam = lam + alpha * (A @ x - b)
conv_x.append(x.copy())
conv_lam.append(lam.copy())
return x, lam, conv_x, conv_lam
```
### ะะพะดะตะปัะฝัะน ะฟัะธะผะตั
\begin{align*}
& \min \frac{1}{2}x^{\top}Px - c^{\top}x\\
\text{s.t. } & Ax = b
\end{align*}
- ะะฐะณัะฐะฝะถะธะฐะฝ $L(x, \lambda) = \frac{1}{2}x^{\top}Px - c^{\top}x + \lambda^{\top}(Ax - b)$
- ะะฑะฝะพะฒะปะตะฝะธะต ะฟััะผัั
ะฟะตัะตะผะตะฝะฝัั
$$
x_{k+1} = P^{-1}(c - A^{\top}\lambda_k)
$$
```
m, n = 10, 20
A = np.random.randn(m, n)
b = np.random.randn(m)
P = np.random.randn(n, n)
P = P.T @ P
c = np.random.randn(n)
spec = np.linalg.eigvalsh(P)
mu = spec.min()
print(mu)
x = cvx.Variable(n)
obj = 0.5 * cvx.quad_form(x, P) - c @ x
problem = cvx.Problem(cvx.Minimize(obj), [A @ x == b])
problem.solve(verbose=True)
print(np.linalg.norm(A @ x.value - b))
print(problem.value)
x0 = np.random.randn(n)
lam0 = np.random.randn(m)
max_iter = 100000
alpha = mu / 10
def f(x):
return 0.5 * x @ P @ x - c @ x
def L(x, lam):
return f(x) + lam @ (A @ x - b)
def update_x(x, lam, A, b):
return np.linalg.solve(P, c - A.T @ lam)
x_da, lam_da, conv_x_da, conv_lam_da = dual_ascent(update_x, A, b, alpha, x0, lam0, max_iter)
print(np.linalg.norm(A @ x_da - b))
print(0.5 * x_da @ P @ x_da - c @ x_da)
plt.plot([f(x) for x in conv_x_da], label="Objective")
plt.plot(problem.value * np.ones(len(conv_x_da)), label="Traget value")
# plt.yscale("log")
plt.xscale("log")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.plot([L(x, lam) for x, lam in zip(conv_x_da, conv_lam_da)],
label="Lagrangian")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_da], label="$\|Ax - b\|_2$")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.grid(True)
```
### ะะฐะถะฝัะน ัะฐััะฝัะน ัะปััะฐะน
- ะคัะฝะบัะธั ัะตะฟะฐัะฐะฑะตะปัะฝะฐ
- ะะฑะฝะพะฒะปะตะฝะธะต $x$ ัะฐัะฟะฐะดะฐะตััั ะฝะฐ ะฟะฐัะฐะปะปะตะปัะฝัะต ะทะฐะดะฐัะธ ะฟะพ ะบะฐะถะดะพะน ะบะพะพัะดะธะฝะฐัะต
## ะฏะฒะฝัะน ัััั ะฝะฐะปะธัะธั ะพะณัะฐะฝะธัะตะฝะธะน - ัะตะณัะปััะธะทะฐัะธั ะะฐะณัะฐะฝะถะธะฐะฝะฐ
$$
L_{\rho}(x, \lambda) = f(x) + \lambda^{\top}(Ax - b) + \frac{\rho}{2} \|Ax - b\|_2^2
$$
- ะขะตะฟะตัั ะผะตัะพะด ะฟัะธะผะตั ะฒะธะด
\begin{align*}
& x_{k+1} = \arg\min_x L_{\rho}(x, \lambda)\\
& \lambda_{k+1} = \lambda_k + \rho (Ax_{k+1} - b)
\end{align*}
- ะะพะทะผะพะถะฝั ะธะทะผะตะฝะตะฝะธั $\rho$ ะฒ ะฟัะพัะตััะต ัั
ะพะดะธะผะพััะธ
- ะะฐะผะตะฝะฐ $\alpha_k$ ะฝะฐ $\rho$ ัะฒัะทะฐะฝั ั ััะปะพะฒะธัะผะธ ะพะฟัะธะผะฐะปัะฝะพััะธ
```
def augmented_lagrangian(update_x, A, b, rho0, x0, lambda0, max_iter):
x = x0.copy()
lam = lambda0.copy()
conv_x = [x]
conv_lam = [lam]
rho = rho0
for i in range(max_iter):
x = update_x(x, lam, A, b)
lam = lam + rho * (A @ x - b)
conv_x.append(x.copy())
conv_lam.append(lam.copy())
return x, lam, conv_x, conv_lam
def update_x_al(x, lam, A, b):
return np.linalg.solve(P + rho * A.T @ A, c - A.T @ lam + A.T @ b)
rho = 10
max_iter = 1000
x_al, lam_al, conv_x_al, conv_lam_al = augmented_lagrangian(update_x_al, A, b, rho, x0, lam0, max_iter)
print(np.linalg.norm(A @ x_al - b))
print(0.5 * x_al @ P @ x_al - c @ x_al)
plt.plot([f(x) for x in conv_x_da], label="DA")
plt.plot([f(x) for x in conv_x_al], label="AL")
# plt.yscale("log")
plt.xscale("log")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("Objective", fontsize=20)
plt.plot([L(x, lam) for x, lam in zip(conv_x_da, conv_lam_da)],
label="DA")
plt.plot([L(x, lam) for x, lam in zip(conv_x_al, conv_lam_al)],
label="AL")
plt.legend(fontsize=20)
plt.xscale("log")
plt.xlabel("\# iterations", fontsize=20)
plt.xlabel("Lagrangian", fontsize=20)
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_da], label="DA")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_al], label="AL")
plt.legend(fontsize=20)
plt.xscale("log")
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$\|Ax - b\|_2$", fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
```
### ะกััะตััะฒะตะฝะฝะฐั ะฟัะพะฑะปะตะผะฐ
- ะกะปะฐะณะฐะตะผะพะต $\|Ax - b\|_2^2$ ัะดะตะปะฐะปะพ ะปะฐะณัะฐะฝะถะธะฐะฝ ะะัะตะฟะฐัะฐะฑะตะปัะฝัะผ!
## ะกะดะตะปะฐะตะผ ะตะณะพ ัะตะฟะฐัะฐะฑะตะปัะฝัะผ ะธ ะฟะพะปััะธะผ ADMM
ะะฐะดะฐัะฐ ััะฐะฝะตั ัะฐะบะพะน
\begin{align*}
& \min f(x) + I_{Ax = b} (z)\\
\text{s.t. } & x = z
\end{align*}
ะะปั ะฝะตั ะผะพะดะธัะธัะธัะพะฒะฐะฝะฝัะน ะปะฐะณัะฐะฝะถะธะฐะฝ ะฟัะธะผะตั ะฒะธะด
$$
L_{\rho}(x, z, \lambda) = f(x) + I_{Ax = b} (z) + \lambda^{\top}(x - z) + \frac{\rho}{2}\|x - z\|_2^2
$$
- ะขะตะฟะตัั ะผะตัะพะด ะฟัะธะผะตั ะฒะธะด
\begin{align*}
& x_{k+1} = \arg\min_x L_{\rho}(x, z_k, \lambda_k)\\
& z_{k+1} = \arg\min_z L_{\rho}(x_{k+1}, z, \lambda_k) \\
& \lambda_{k+1} = \lambda_k + \rho (x_{k+1} - z_{k+1})
\end{align*}
- ะะฑะฝะพะฒะปะตะฝะธะต $z$ ัะบะฒะธะฒะฐะปะตะฝัะฝะพ $\pi_{Ax = b}(x_{k+1} + \frac{\lambda_k}{\rho})$
```
def admm(update_x, update_z, rho0, x0, z0, lambda0, max_iter):
x = x0.copy()
z = z0.copy()
lam = lambda0.copy()
conv_x = [x]
conv_z = [z]
conv_lam = [lam]
rho = rho0
for i in range(max_iter):
x = update_x(x, z, lam, A, b)
z = update_z(x, z, lam, A, b)
lam = lam + rho * (x - z)
conv_x.append(x.copy())
conv_z.append(z.copy())
conv_lam.append(lam.copy())
return x, z, lam, conv_x, conv_z, conv_lam
def update_x_admm(x, z, lam, A, b):
n = x.shape[0]
return np.linalg.solve(P + rho*np.eye(n), -lam + c + rho * z)
def update_z_admm(x, z, lam, A, b):
x_hat = lam / rho + x
return x_hat - A.T @ np.linalg.solve(A @ A.T, A @ x_hat - b)
z0 = np.random.randn(n)
lam0 = np.random.randn(n)
rho = 1
x_admm, z_admm, lam_admm, conv_x_admm, conv_z_admm, conv_lam_admm = admm(update_x_admm,
update_z_admm,
rho, x0, z0, lam0,
max_iter=10000)
plt.figure(figsize=(10, 8))
plt.plot([f(x) for x in conv_x_da], label="DA")
plt.plot([f(x) for x in conv_x_al], label="AL")
plt.plot([f(x) for x in conv_x_admm], label="ADMM x")
plt.plot([f(z) for z in conv_z_admm], label="ADMM z")
# plt.yscale("log")
plt.xscale("log")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("Objective", fontsize=20)
plt.grid(True)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_da], label="DA")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_al], label="AL")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_admm], label="ADMM")
plt.legend(fontsize=20)
plt.xscale("log")
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$\|Ax - b\|_2$", fontsize=20)
plt.grid(True)
plt.yticks(fontsize=20)
plt.show()
plt.semilogy([np.linalg.norm(x - z) for x, z in zip(conv_x_admm, conv_z_admm)])
plt.grid(True)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$\|x_k - z_k\|_2$", fontsize=20)
plt.yticks(fontsize=20)
plt.show()
```
### ะฃัััะผ, ััะพ ะฒัะต ัะฒะพะนััะฒะฐ ัะพั
ัะฐะฝัััั ะฟัะธ ะฐััะธะฝะฝัั
ะฟัะตะพะฑัะฐะทะพะฒะฐะฝะธัั
- ะขะพะณะดะฐ ะฝะฐัะฐ ะทะฐะดะฐัะฐ ะฒ ะพะฑัะตะผ ะฒะธะดะต ะผะพะถะตั ะฑััั ะทะฐะฟะธัะฐะฝะฐ ะบะฐะบ
\begin{align*}
& \min f(x) + g(z)\\
\text{s.t. } & Ax + Bz = d
\end{align*}
- ะะพะดะธัะธัะธัะพะฒะฐะฝะฝัะน ะปะฐะณัะฐะฝะถะธะฐะฝ ะดะปั ะฝะตั ะฑัะดะตั
$$
L_{\rho}(x, z, \lambda) = f(x) + g(z) + \lambda^{\top}(Ax + Bz - d) + \frac{\rho}{2}\|Ax + Bz - d\|_2^2
$$
- ะ ััะพะผ ัะปััะฐะต ัะตะฟะฐัะฐะฑะตะปัะฝะพััั ะฟะพ $z$ ะธ $x$, ะฝะพ ะฝะต ะฒะฝัััะธ ััะธั
ะฟะตัะตะผะตะฝะฝัั
- ะ ะธัะพะณะต, ะฟะพัะปะต ะฒะฝะตัะตะฝะธั ะปะธะฝะตะนะฝะพะณะพ ัะปะฐะณะฐะตะผะพะณะพ ะฒ ะบะฒะฐะดัะฐัะธัะฝะพะต ะฟะพะปััะธะผ
\begin{align*}
& x_{k+1} = \arg\min_x \left( f(x) + \frac{\rho}{2}\|Ax + Bz_k - d + u_k \|_2^2 \right)\\
& z_{k+1} = \arg\min_z \left( g(z) + \frac{\rho}{2}\|Ax_{k+1} + Bz - d + u_k \|_2^2 \right)\\
& u_{k+1} = u_k + x_{k+1} - z_{k+1},
\end{align*}
ะณะดะต $u_k = \lambda_k / \rho$
### ะะฐะบ ััะพ ะฒัั ะธัะฟะพะปัะทะพะฒะฐัั?
- ะงะฐััะพ ะฟัะธะฒะพะดะธัั ะฒะฐัั ะทะฐะดะฐัั ะบ ััะฐะฝะดะฐััะฝะพะผั ะฒะธะดั ั ะฟัะตะดัะดััะตะณะพ ัะปะฐะนะดะฐ ะฝะตัะดะพะฑะฝะพ
- ะะพััะพะผั ะปัััะต ะดะปั ะบะพะฝะบัะตัะฝะพะน ะทะฐะดะฐัะธ ะฟัะธะฒะพะดะธัั ะตั ััะบะฐะผะธ ะบ ะฒะธะดั, ะบะพัะพััะน ะดะพะฟััะบะฐะตั ะฟัะธะผะตะฝะตะฝะธะต ADMM
- ะัะฟะธัะฐัั ะฐะฝะฐะปะธัะธัะตัะบะธ ะฒัะต ัะตัะตะฝะธั ะฒัะฟะพะผะพะณะฐัะตะปัะฝัั
ะทะฐะดะฐั
- ะ ะตะฐะปะธะทะพะฒะฐัั ะธั
ะฒััะธัะปะตะฝะธั ะฝะฐะธะฑะพะปะตะต ะพะฟัะธะผะฐะปัะฝัะผ ะพะฑัะฐะทะพะผ (ัะดะตะปะฐัั ัะฐะบัะพัะธะทะฐัะธะธ ะผะฐััะธั, ะบะพัะพััะต ะฝะต ะผะตะฝััััั ั ะธัะตัะฐัะธัะผะธ)
## ะะฐะดะฐัะฐ ะปะธะฝะตะนะฝะพะณะพ ะฟัะพะณัะฐะผะผะธัะพะฒะฐะฝะธั
\begin{align*}
& \min c^{\top}x\\
\text{s.t. } & Ax = b\\
& x \geq 0
\end{align*}
- ะะพะดะธัะธัะธัะพะฒะฐะฝะฝัะน ะปะฐะณัะฐะฝะถะธะฐะฝ
$$
L_{\rho}(x, z, \lambda) = c^{\top}x + I_{z \geq 0}(z) + \lambda^{\top}(x - z) + \frac{\rho}{2}\|x - z\|_2^2,
$$
ะณะดะต $c^{\top}x$ ะพะฟัะตะดะตะปะตะฝะฐ ะฝะฐ ะผะฝะพะถะตััะฒะต $Ax = b$.
- ะจะฐะณ ะพะฑะฝะพะฒะปะตะฝะธั ะฟะพ $x$ ะฟัะธะผะตั ะฒะธะด
$$
x_{k+1} = \arg\min_{x: \; Ax = b} c^{\top}x +\lambda^{\top}x + \frac{\rho}{2}\|x - z\|_2^2
$$
- ะะพะปััะธะผ ัะธััะตะผั ะธะท ััะปะพะฒะธะน ะพะฟัะธะผะฐะปัะฝะพััะธ
$$
\begin{bmatrix}
\rho I & A^{\top} \\
A & 0
\end{bmatrix}
\begin{bmatrix}
x_{k+1}\\
\mu
\end{bmatrix}
=
\begin{bmatrix}
-\lambda_k - c + \rho z_k\\
b
\end{bmatrix}
$$
```
import scipy.optimize as scopt
m, n = 10, 200
A = np.random.rand(m, n)
b = np.random.rand(m)
c = np.random.rand(n)
scipy_linprog_conv = []
def callback_splin(cur_res):
scipy_linprog_conv.append(cur_res)
res = scopt.linprog(c, A_eq=A, b_eq=b,
bounds=[(0, None) for i in range(n)],
callback=callback_splin, method="simplex")
print(res)
def update_x_admm(x, z, lam, A, b):
n = x.shape[0]
m = A.shape[0]
C = np.block([[rho * np.eye(n), A.T], [A, np.zeros((m, m))]])
rhs = np.block([-lam - c + rho * z, b])
return np.linalg.solve(C, rhs)[:n]
def update_z_admm(x, z, lam, A, b):
x_hat = lam / rho + x
return np.clip(x_hat, 0, np.max(x_hat))
x0 = np.random.randn(n)
z0 = np.random.randn(n)
lam0 = np.random.randn(n)
rho = 1
x_admm, z_admm, lam_admm, conv_x_admm, conv_z_admm, conv_lam_admm = admm(update_x_admm,
update_z_admm,
rho, x0, z0, lam0, max_iter=100)
print(c @ x_admm - res.fun, np.linalg.norm(x_admm - res.x))
plt.figure(figsize=(10, 8))
plt.plot([c @ x for x in conv_x_admm], label="ADMM")
plt.plot([c @ res.x for res in scipy_linprog_conv], label="Scipy")
plt.legend(fontsize=20)
plt.grid(True)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$c^{\\top}x_k$", fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
```
## ะะพะผะผะตะฝัะฐัะธะธ
- ะกั
ะพะดะธะผะพััั ะฟะพ ะธัะตัะฐัะธัะผ ะผะตะดะปะตะฝะฝะตะต, ะฝะพ ััะพะธะผะพััั ะพะดะฝะพะน ะธัะตัะฐัะธะธ ัะฐะบะถะต ะผะตะฝััะต
- ะัะฝะพะฒะฝะพะน ะฒัะธะณััั ะฟัะธ ะธัะฟะพะปัะทะพะฒะฐะฝะธะธ ADMM ะฒ ะฟะพะปััะตะฝะธะธ ะฝะต ะพัะตะฝั ัะพัะฝะพะณะพ ัะตัะตะฝะธั **ะฟะฐัะฐะปะปะตะปัะฝะพ** ะธ ะพัะตะฝั ะฑััััะพ
- ะ ะฐะทะปะธัะฝัะต ัะฟะพัะพะฑั ะฟัะตะดััะฐะฒะปะตะฝะธั ะทะฐะดะฐัะธ ะฒ ะฒะธะดะต, ะฟัะธะณะพะดะฝะพะผ ะดะปั ะธัะฟะพะปัะทะพะฒะฐะฝะธั ADMM, ะฟะพัะพะถะดะฐัั ัะฐะทะปะธัะฝัะต ะผะตัะพะดั, ะบะพัะพััะต ะธะผะตัั ัะฐะทะปะธัะฝัะต ัะฒะพะนััะฒะฐ
- ะะฐะฟัะธะผะตั ะฒ [ััะพะน](https://papers.nips.cc/paper/6746-a-new-alternating-direction-method-for-linear-programming.pdf) ััะฐััะต ะฟัะตะดะปะฐะณะฐะตััั ะฐะปััะตัะฝะฐัะธะฒะฝัะน ัะฟะพัะพะฑ ัะตัะตะฝะธั ะทะฐะดะฐัะธ ะปะธะฝะตะนะฝะพะณะพ ะฟัะพะณัะฐะผะผะธัะพะฒะฐะฝะธั ัะตัะตะท ADMM
- [ะะตัะพะด SCS](https://stanford.edu/~boyd/papers/pdf/scs_long.pdf), ะธัะฟะพะปัะทัะตะผัะน ะฟะพ ัะผะพะปัะฐะฝะธั ะฒ CVXPy, ะพัะฝะพะฒะฐะฝ ะฝะฐ ะฟัะธะผะตะฝะตะธะธ ADMM ะบ ะบะพะฝะธัะตัะบะพะผั ะฟัะตะดััะฐะฒะปะตะฝะธั ะธัั
ะพะดะฝะพะน ะทะฐะดะฐัะธ
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rc("text", usetex=True)
import cvxpy as cvx
def dual_ascent(update_x, A, b, alpha, x0, lambda0, max_iter):
x = x0.copy()
lam = lambda0.copy()
conv_x = [x]
conv_lam = [lam]
for i in range(max_iter):
x = update_x(x, lam, A, b)
lam = lam + alpha * (A @ x - b)
conv_x.append(x.copy())
conv_lam.append(lam.copy())
return x, lam, conv_x, conv_lam
m, n = 10, 20
A = np.random.randn(m, n)
b = np.random.randn(m)
P = np.random.randn(n, n)
P = P.T @ P
c = np.random.randn(n)
spec = np.linalg.eigvalsh(P)
mu = spec.min()
print(mu)
x = cvx.Variable(n)
obj = 0.5 * cvx.quad_form(x, P) - c @ x
problem = cvx.Problem(cvx.Minimize(obj), [A @ x == b])
problem.solve(verbose=True)
print(np.linalg.norm(A @ x.value - b))
print(problem.value)
x0 = np.random.randn(n)
lam0 = np.random.randn(m)
max_iter = 100000
alpha = mu / 10
def f(x):
return 0.5 * x @ P @ x - c @ x
def L(x, lam):
return f(x) + lam @ (A @ x - b)
def update_x(x, lam, A, b):
return np.linalg.solve(P, c - A.T @ lam)
x_da, lam_da, conv_x_da, conv_lam_da = dual_ascent(update_x, A, b, alpha, x0, lam0, max_iter)
print(np.linalg.norm(A @ x_da - b))
print(0.5 * x_da @ P @ x_da - c @ x_da)
plt.plot([f(x) for x in conv_x_da], label="Objective")
plt.plot(problem.value * np.ones(len(conv_x_da)), label="Traget value")
# plt.yscale("log")
plt.xscale("log")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.plot([L(x, lam) for x, lam in zip(conv_x_da, conv_lam_da)],
label="Lagrangian")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_da], label="$\|Ax - b\|_2$")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.grid(True)
def augmented_lagrangian(update_x, A, b, rho0, x0, lambda0, max_iter):
x = x0.copy()
lam = lambda0.copy()
conv_x = [x]
conv_lam = [lam]
rho = rho0
for i in range(max_iter):
x = update_x(x, lam, A, b)
lam = lam + rho * (A @ x - b)
conv_x.append(x.copy())
conv_lam.append(lam.copy())
return x, lam, conv_x, conv_lam
def update_x_al(x, lam, A, b):
return np.linalg.solve(P + rho * A.T @ A, c - A.T @ lam + A.T @ b)
rho = 10
max_iter = 1000
x_al, lam_al, conv_x_al, conv_lam_al = augmented_lagrangian(update_x_al, A, b, rho, x0, lam0, max_iter)
print(np.linalg.norm(A @ x_al - b))
print(0.5 * x_al @ P @ x_al - c @ x_al)
plt.plot([f(x) for x in conv_x_da], label="DA")
plt.plot([f(x) for x in conv_x_al], label="AL")
# plt.yscale("log")
plt.xscale("log")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("Objective", fontsize=20)
plt.plot([L(x, lam) for x, lam in zip(conv_x_da, conv_lam_da)],
label="DA")
plt.plot([L(x, lam) for x, lam in zip(conv_x_al, conv_lam_al)],
label="AL")
plt.legend(fontsize=20)
plt.xscale("log")
plt.xlabel("\# iterations", fontsize=20)
plt.xlabel("Lagrangian", fontsize=20)
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_da], label="DA")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_al], label="AL")
plt.legend(fontsize=20)
plt.xscale("log")
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$\|Ax - b\|_2$", fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
def admm(update_x, update_z, rho0, x0, z0, lambda0, max_iter):
x = x0.copy()
z = z0.copy()
lam = lambda0.copy()
conv_x = [x]
conv_z = [z]
conv_lam = [lam]
rho = rho0
for i in range(max_iter):
x = update_x(x, z, lam, A, b)
z = update_z(x, z, lam, A, b)
lam = lam + rho * (x - z)
conv_x.append(x.copy())
conv_z.append(z.copy())
conv_lam.append(lam.copy())
return x, z, lam, conv_x, conv_z, conv_lam
def update_x_admm(x, z, lam, A, b):
n = x.shape[0]
return np.linalg.solve(P + rho*np.eye(n), -lam + c + rho * z)
def update_z_admm(x, z, lam, A, b):
x_hat = lam / rho + x
return x_hat - A.T @ np.linalg.solve(A @ A.T, A @ x_hat - b)
z0 = np.random.randn(n)
lam0 = np.random.randn(n)
rho = 1
x_admm, z_admm, lam_admm, conv_x_admm, conv_z_admm, conv_lam_admm = admm(update_x_admm,
update_z_admm,
rho, x0, z0, lam0,
max_iter=10000)
plt.figure(figsize=(10, 8))
plt.plot([f(x) for x in conv_x_da], label="DA")
plt.plot([f(x) for x in conv_x_al], label="AL")
plt.plot([f(x) for x in conv_x_admm], label="ADMM x")
plt.plot([f(z) for z in conv_z_admm], label="ADMM z")
# plt.yscale("log")
plt.xscale("log")
plt.legend(fontsize=20)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("Objective", fontsize=20)
plt.grid(True)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_da], label="DA")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_al], label="AL")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_x_admm], label="ADMM")
plt.legend(fontsize=20)
plt.xscale("log")
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$\|Ax - b\|_2$", fontsize=20)
plt.grid(True)
plt.yticks(fontsize=20)
plt.show()
plt.semilogy([np.linalg.norm(x - z) for x, z in zip(conv_x_admm, conv_z_admm)])
plt.grid(True)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$\|x_k - z_k\|_2$", fontsize=20)
plt.yticks(fontsize=20)
plt.show()
import scipy.optimize as scopt
m, n = 10, 200
A = np.random.rand(m, n)
b = np.random.rand(m)
c = np.random.rand(n)
scipy_linprog_conv = []
def callback_splin(cur_res):
scipy_linprog_conv.append(cur_res)
res = scopt.linprog(c, A_eq=A, b_eq=b,
bounds=[(0, None) for i in range(n)],
callback=callback_splin, method="simplex")
print(res)
def update_x_admm(x, z, lam, A, b):
n = x.shape[0]
m = A.shape[0]
C = np.block([[rho * np.eye(n), A.T], [A, np.zeros((m, m))]])
rhs = np.block([-lam - c + rho * z, b])
return np.linalg.solve(C, rhs)[:n]
def update_z_admm(x, z, lam, A, b):
x_hat = lam / rho + x
return np.clip(x_hat, 0, np.max(x_hat))
x0 = np.random.randn(n)
z0 = np.random.randn(n)
lam0 = np.random.randn(n)
rho = 1
x_admm, z_admm, lam_admm, conv_x_admm, conv_z_admm, conv_lam_admm = admm(update_x_admm,
update_z_admm,
rho, x0, z0, lam0, max_iter=100)
print(c @ x_admm - res.fun, np.linalg.norm(x_admm - res.x))
plt.figure(figsize=(10, 8))
plt.plot([c @ x for x in conv_x_admm], label="ADMM")
plt.plot([c @ res.x for res in scipy_linprog_conv], label="Scipy")
plt.legend(fontsize=20)
plt.grid(True)
plt.xlabel("\# iterations", fontsize=20)
plt.ylabel("$c^{\\top}x_k$", fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
| 0.496826 | 0.933066 |
# ๆบๅจ็ฟป่ฏ
ๆบๅจ็ฟป่ฏๆฏๆๅฐไธๆฎตๆๆฌไปไธ็ง่ฏญ่จ่ชๅจ็ฟป่ฏๅฐๅฆไธ็ง่ฏญ่จใๅ ไธบไธๆฎตๆๆฌๅบๅๅจไธๅ่ฏญ่จไธญ็้ฟๅบฆไธไธๅฎ็ธๅ๏ผๆไปฅๆไปฌไฝฟ็จๆบๅจ็ฟป่ฏไธบไพๆฅไป็ป็ผ็ ๅจโ่งฃ็ ๅจๅๆณจๆๅๆบๅถ็ๅบ็จใ
## ่ฏปๅๅ้ขๅค็ๆฐๆฎ้
ๆไปฌๅ
ๅฎไนไธไบ็นๆฎ็ฌฆๅทใๅ
ถไธญโ<pad>โ๏ผpadding๏ผ็ฌฆๅท็จๆฅๆทปๅ ๅจ่พ็ญๅบๅๅ๏ผ็ดๅฐๆฏไธชๅบๅ็ญ้ฟ๏ผ่โ<bos>โๅโ<eos>โ็ฌฆๅทๅๅซ่กจ็คบๅบๅ็ๅผๅงๅ็ปๆใ
```
import collections
import io
import math
from mxnet import autograd, gluon, init, nd
from mxnet.contrib import text
from mxnet.gluon import data as gdata, loss as gloss, nn, rnn
PAD, BOS, EOS = '<pad>', '<bos>', '<eos>'
```
ๆฅ็ๅฎไนไธคไธช่พ
ๅฉๅฝๆฐๅฏนๅ้ข่ฏปๅ็ๆฐๆฎ่ฟ่ก้ขๅค็ใ
```
# ๅฐไธไธชๅบๅไธญๆๆ็่ฏ่ฎฐๅฝๅจall_tokensไธญไปฅไพฟไนๅๆ้ ่ฏๅ
ธ๏ผ็ถๅๅจ่ฏฅๅบๅๅ้ขๆทปๅ PAD็ดๅฐๅบๅ
# ้ฟๅบฆๅไธบmax_seq_len๏ผ็ถๅๅฐๅบๅไฟๅญๅจall_seqsไธญ
def process_one_seq(seq_tokens, all_tokens, all_seqs, max_seq_len):
all_tokens.extend(seq_tokens)
seq_tokens += [EOS] + [PAD] * (max_seq_len - len(seq_tokens) - 1)
all_seqs.append(seq_tokens)
# ไฝฟ็จๆๆ็่ฏๆฅๆ้ ่ฏๅ
ธใๅนถๅฐๆๆๅบๅไธญ็่ฏๅๆขไธบ่ฏ็ดขๅผๅๆ้ NDArrayๅฎไพ
def build_data(all_tokens, all_seqs):
vocab = text.vocab.Vocabulary(collections.Counter(all_tokens),
reserved_tokens=[PAD, BOS, EOS])
indices = [vocab.to_indices(seq) for seq in all_seqs]
return vocab, nd.array(indices)
```
ไธบไบๆผ็คบๆนไพฟ๏ผๆไปฌๅจ่ฟ้ไฝฟ็จไธไธชๅพๅฐ็ๆณ่ฏญโ่ฑ่ฏญๆฐๆฎ้ใๅจ่ฟไธชๆฐๆฎ้้๏ผๆฏไธ่กๆฏไธๅฏนๆณ่ฏญๅฅๅญๅๅฎๅฏนๅบ็่ฑ่ฏญๅฅๅญ๏ผไธญ้ดไฝฟ็จ`'\t'`้ๅผใๅจ่ฏปๅๆฐๆฎๆถ๏ผๆไปฌๅจๅฅๆซ้ไธโ<eos>โ็ฌฆๅท๏ผๅนถๅฏ่ฝ้่ฟๆทปๅ โ<pad>โ็ฌฆๅทไฝฟๆฏไธชๅบๅ็้ฟๅบฆๅไธบ`max_seq_len`ใๆไปฌไธบๆณ่ฏญ่ฏๅ่ฑ่ฏญ่ฏๅๅซๅๅปบ่ฏๅ
ธใๆณ่ฏญ่ฏ็็ดขๅผๅ่ฑ่ฏญ่ฏ็็ดขๅผ็ธไบ็ฌ็ซใ
```
def read_data(max_seq_len):
# inๅoutๅๅซๆฏinputๅoutput็็ผฉๅ
in_tokens, out_tokens, in_seqs, out_seqs = [], [], [], []
with io.open('../data/fr-en-small.txt') as f:
lines = f.readlines()
for line in lines:
in_seq, out_seq = line.rstrip().split('\t')
in_seq_tokens, out_seq_tokens = in_seq.split(' '), out_seq.split(' ')
if max(len(in_seq_tokens), len(out_seq_tokens)) > max_seq_len - 1:
continue # ๅฆๆๅ ไธEOSๅ้ฟไบmax_seq_len๏ผๅๅฟฝ็ฅๆๆญคๆ ทๆฌ
process_one_seq(in_seq_tokens, in_tokens, in_seqs, max_seq_len)
process_one_seq(out_seq_tokens, out_tokens, out_seqs, max_seq_len)
in_vocab, in_data = build_data(in_tokens, in_seqs)
out_vocab, out_data = build_data(out_tokens, out_seqs)
return in_vocab, out_vocab, gdata.ArrayDataset(in_data, out_data)
```
ๅฐๅบๅ็ๆๅคง้ฟๅบฆ่ฎพๆ7๏ผ็ถๅๆฅ็่ฏปๅๅฐ็็ฌฌไธไธชๆ ทๆฌใ่ฏฅๆ ทๆฌๅๅซๅ
ๅซๆณ่ฏญ่ฏ็ดขๅผๅบๅๅ่ฑ่ฏญ่ฏ็ดขๅผๅบๅใ
```
max_seq_len = 7
in_vocab, out_vocab, dataset = read_data(max_seq_len)
dataset[0]
```
## ๅซๆณจๆๅๆบๅถ็็ผ็ ๅจโ่งฃ็ ๅจ
ๆไปฌๅฐไฝฟ็จๅซๆณจๆๅๆบๅถ็็ผ็ ๅจโ่งฃ็ ๅจๆฅๅฐไธๆฎต็ฎ็ญ็ๆณ่ฏญ็ฟป่ฏๆ่ฑ่ฏญใไธ้ขๆไปฌๆฅไป็ปๆจกๅ็ๅฎ็ฐใ
### ็ผ็ ๅจ
ๅจ็ผ็ ๅจไธญ๏ผๆไปฌๅฐ่พๅ
ฅ่ฏญ่จ็่ฏ็ดขๅผ้่ฟ่ฏๅตๅ
ฅๅฑๅพๅฐ่ฏ็่กจๅพ๏ผ็ถๅ่พๅ
ฅๅฐไธไธชๅคๅฑ้จๆงๅพช็ฏๅๅ
ไธญใๆญฃๅฆๆไปฌๅจ[โๅพช็ฏ็ฅ็ป็ฝ็ป็็ฎๆดๅฎ็ฐโ](../chapter_recurrent-neural-networks/rnn-gluon.ipynb)ไธ่ๆๅฐ็๏ผGluon็`rnn.GRU`ๅฎไพๅจๅๅ่ฎก็ฎๅไนไผๅๅซ่ฟๅ่พๅบๅๆ็ปๆถ้ดๆญฅ็ๅคๅฑ้่็ถๆใๅ
ถไธญ็่พๅบๆ็ๆฏๆๅไธๅฑ็้่ๅฑๅจๅไธชๆถ้ดๆญฅ็้่็ถๆ๏ผๅนถไธๆถๅ่พๅบๅฑ่ฎก็ฎใๆณจๆๅๆบๅถๅฐ่ฟไบ่พๅบไฝไธบ้ฎ้กนๅๅผ้กนใ
```
class Encoder(nn.Block):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
drop_prob=0, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob)
def forward(self, inputs, state):
# ่พๅ
ฅๅฝข็ถๆฏ(ๆน้ๅคงๅฐ, ๆถ้ดๆญฅๆฐ)ใๅฐ่พๅบไบๆขๆ ทๆฌ็ปดๅๆถ้ดๆญฅ็ปด
embedding = self.embedding(inputs).swapaxes(0, 1)
return self.rnn(embedding, state)
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
```
ไธ้ขๆไปฌๆฅๅๅปบไธไธชๆน้ๅคงๅฐไธบ4ใๆถ้ดๆญฅๆฐไธบ7็ๅฐๆน้ๅบๅ่พๅ
ฅใ่ฎพ้จๆงๅพช็ฏๅๅ
็้่ๅฑไธชๆฐไธบ2๏ผ้่ๅๅ
ไธชๆฐไธบ16ใ็ผ็ ๅจๅฏน่ฏฅ่พๅ
ฅๆง่กๅๅ่ฎก็ฎๅ่ฟๅ็่พๅบๅฝข็ถไธบ(ๆถ้ดๆญฅๆฐ, ๆน้ๅคงๅฐ, ้่ๅๅ
ไธชๆฐ)ใ้จๆงๅพช็ฏๅๅ
ๅจๆ็ปๆถ้ดๆญฅ็ๅคๅฑ้่็ถๆ็ๅฝข็ถไธบ(้่ๅฑไธชๆฐ, ๆน้ๅคงๅฐ, ้่ๅๅ
ไธชๆฐ)ใๅฏนไบ้จๆงๅพช็ฏๅๅ
ๆฅ่ฏด๏ผ`state`ๅ่กจไธญๅชๅซไธไธชๅ
็ด ๏ผๅณ้่็ถๆ๏ผๅฆๆไฝฟ็จ้ฟ็ญๆ่ฎฐๅฟ๏ผ`state`ๅ่กจไธญ่ฟๅฐๅ
ๅซๅฆไธไธชๅ
็ด ๏ผๅณ่ฎฐๅฟ็ป่ใ
```
encoder = Encoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
encoder.initialize()
output, state = encoder(nd.zeros((4, 7)), encoder.begin_state(batch_size=4))
output.shape, state[0].shape
```
### ๆณจๆๅๆบๅถ
ๅจไป็ปๅฆไฝๅฎ็ฐๆณจๆๅๆบๅถ็็ข้ๅ่ฎก็ฎไนๅ๏ผๆไปฌๅ
ไบ่งฃไธไธ`Dense`ๅฎไพ็`flatten`้้กนใๅฝ่พๅ
ฅ็็ปดๅบฆๅคงไบ2ๆถ๏ผ้ป่ฎคๆ
ๅตไธ๏ผ`Dense`ๅฎไพไผๅฐ้คไบ็ฌฌไธ็ปด๏ผๆ ทๆฌ็ปด๏ผไปฅๅค็็ปดๅบฆๅ่งไฝ้่ฆไปฟๅฐๅๆข็็นๅพ็ปด๏ผๅนถๅฐ่พๅ
ฅ่ชๅจ่ฝฌๆ่กไธบๆ ทๆฌใๅไธบ็นๅพ็ไบ็ปด็ฉ้ตใ่ฎก็ฎๅ๏ผ่พๅบ็ฉ้ต็ๅฝข็ถไธบ(ๆ ทๆฌๆฐ, ่พๅบไธชๆฐ)ใๅฆๆๆไปฌๅธๆๅ
จ่ฟๆฅๅฑๅชๅฏน่พๅ
ฅ็ๆๅไธ็ปดๅไปฟๅฐๅๆข๏ผ่ไฟๆๅ
ถไป็ปดๅบฆไธ็ๅฝข็ถไธๅ๏ผไพฟ้่ฆๅฐ`Dense`ๅฎไพ็`flatten`้้กน่ฎพไธบ`False`ใๅจไธ้ขไพๅญไธญ๏ผๅ
จ่ฟๆฅๅฑๅชๅฏน่พๅ
ฅ็ๆๅไธ็ปดๅไปฟๅฐๅๆข๏ผๅ ๆญค่พๅบๅฝข็ถไธญๅชๆๆๅไธ็ปดๅไธบๅ
จ่ฟๆฅๅฑ็่พๅบไธชๆฐ2ใ
```
dense = nn.Dense(2, flatten=False)
dense.initialize()
dense(nd.zeros((3, 5, 7))).shape
```
ๆไปฌๅฐๅฎ็ฐ[โๆณจๆๅๆบๅถโ](./attention.ipynb)ไธ่ไธญๅฎไน็ๅฝๆฐ$a$๏ผๅฐ่พๅ
ฅ่ฟ็ปๅ้่ฟๅซๅ้่ๅฑ็ๅคๅฑๆ็ฅๆบๅๆขใๅ
ถไธญ้่ๅฑ็่พๅ
ฅๆฏ่งฃ็ ๅจ็้่็ถๆไธ็ผ็ ๅจๅจๆๆๆถ้ดๆญฅไธ้่็ถๆ็ไธไธ่ฟ็ป๏ผไธไฝฟ็จtanhๅฝๆฐไฝไธบๆฟๆดปๅฝๆฐใ่พๅบๅฑ็่พๅบไธชๆฐไธบ1ใไธคไธช`Dense`ๅฎไพๅไธไฝฟ็จๅๅทฎ๏ผไธ่ฎพ`flatten=False`ใๅ
ถไธญๅฝๆฐ$a$ๅฎไน้ๅ้$\boldsymbol{v}$็้ฟๅบฆๆฏไธไธช่ถ
ๅๆฐ๏ผๅณ`attention_size`ใ
```
def attention_model(attention_size):
model = nn.Sequential()
model.add(nn.Dense(attention_size, activation='tanh', use_bias=False,
flatten=False),
nn.Dense(1, use_bias=False, flatten=False))
return model
```
ๆณจๆๅๆบๅถ็่พๅ
ฅๅ
ๆฌๆฅ่ฏข้กนใ้ฎ้กนๅๅผ้กนใ่ฎพ็ผ็ ๅจๅ่งฃ็ ๅจ็้่ๅๅ
ไธชๆฐ็ธๅใ่ฟ้็ๆฅ่ฏข้กนไธบ่งฃ็ ๅจๅจไธไธๆถ้ดๆญฅ็้่็ถๆ๏ผๅฝข็ถไธบ(ๆน้ๅคงๅฐ, ้่ๅๅ
ไธชๆฐ)๏ผ้ฎ้กนๅๅผ้กนๅไธบ็ผ็ ๅจๅจๆๆๆถ้ดๆญฅ็้่็ถๆ๏ผๅฝข็ถไธบ(ๆถ้ดๆญฅๆฐ, ๆน้ๅคงๅฐ, ้่ๅๅ
ไธชๆฐ)ใๆณจๆๅๆบๅถ่ฟๅๅฝๅๆถ้ดๆญฅ็่ๆฏๅ้๏ผๅฝข็ถไธบ(ๆน้ๅคงๅฐ, ้่ๅๅ
ไธชๆฐ)ใ
```
def attention_forward(model, enc_states, dec_state):
# ๅฐ่งฃ็ ๅจ้่็ถๆๅนฟๆญๅฐๅ็ผ็ ๅจ้่็ถๆๅฝข็ถ็ธๅๅ่ฟ่ก่ฟ็ป
dec_states = nd.broadcast_axis(
dec_state.expand_dims(0), axis=0, size=enc_states.shape[0])
enc_and_dec_states = nd.concat(enc_states, dec_states, dim=2)
e = model(enc_and_dec_states) # ๅฝข็ถไธบ(ๆถ้ดๆญฅๆฐ, ๆน้ๅคงๅฐ, 1)
alpha = nd.softmax(e, axis=0) # ๅจๆถ้ดๆญฅ็ปดๅบฆๅsoftmax่ฟ็ฎ
return (alpha * enc_states).sum(axis=0) # ่ฟๅ่ๆฏๅ้
```
ๅจไธ้ข็ไพๅญไธญ๏ผ็ผ็ ๅจ็ๆถ้ดๆญฅๆฐไธบ10๏ผๆน้ๅคงๅฐไธบ4๏ผ็ผ็ ๅจๅ่งฃ็ ๅจ็้่ๅๅ
ไธชๆฐๅไธบ8ใๆณจๆๅๆบๅถ่ฟๅไธไธชๅฐๆน้็่ๆฏๅ้๏ผๆฏไธช่ๆฏๅ้็้ฟๅบฆ็ญไบ็ผ็ ๅจ็้่ๅๅ
ไธชๆฐใๅ ๆญค่พๅบ็ๅฝข็ถไธบ(4, 8)ใ
```
seq_len, batch_size, num_hiddens = 10, 4, 8
model = attention_model(10)
model.initialize()
enc_states = nd.zeros((seq_len, batch_size, num_hiddens))
dec_state = nd.zeros((batch_size, num_hiddens))
attention_forward(model, enc_states, dec_state).shape
```
### ๅซๆณจๆๅๆบๅถ็่งฃ็ ๅจ
ๆไปฌ็ดๆฅๅฐ็ผ็ ๅจๅจๆ็ปๆถ้ดๆญฅ็้่็ถๆไฝไธบ่งฃ็ ๅจ็ๅๅง้่็ถๆใ่ฟ่ฆๆฑ็ผ็ ๅจๅ่งฃ็ ๅจ็ๅพช็ฏ็ฅ็ป็ฝ็ปไฝฟ็จ็ธๅ็้่ๅฑไธชๆฐๅ้่ๅๅ
ไธชๆฐใ
ๅจ่งฃ็ ๅจ็ๅๅ่ฎก็ฎไธญ๏ผๆไปฌๅ
้่ฟๅๅไป็ป็ๆณจๆๅๆบๅถ่ฎก็ฎๅพๅฐๅฝๅๆถ้ดๆญฅ็่ๆฏๅ้ใ็ฑไบ่งฃ็ ๅจ็่พๅ
ฅๆฅ่ช่พๅบ่ฏญ่จ็่ฏ็ดขๅผ๏ผๆไปฌๅฐ่พๅ
ฅ้่ฟ่ฏๅตๅ
ฅๅฑๅพๅฐ่กจๅพ๏ผ็ถๅๅ่ๆฏๅ้ๅจ็นๅพ็ปด่ฟ็ปใๆไปฌๅฐ่ฟ็ปๅ็็ปๆไธไธไธๆถ้ดๆญฅ็้่็ถๆ้่ฟ้จๆงๅพช็ฏๅๅ
่ฎก็ฎๅบๅฝๅๆถ้ดๆญฅ็่พๅบไธ้่็ถๆใๆๅ๏ผๆไปฌๅฐ่พๅบ้่ฟๅ
จ่ฟๆฅๅฑๅๆขไธบๆๅ
ณๅไธช่พๅบ่ฏ็้ขๆต๏ผๅฝข็ถไธบ(ๆน้ๅคงๅฐ, ่พๅบ่ฏๅ
ธๅคงๅฐ)ใ
```
class Decoder(nn.Block):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
attention_size, drop_prob=0, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.attention = attention_model(attention_size)
self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob)
self.out = nn.Dense(vocab_size, flatten=False)
def forward(self, cur_input, state, enc_states):
# ไฝฟ็จๆณจๆๅๆบๅถ่ฎก็ฎ่ๆฏๅ้
c = attention_forward(self.attention, enc_states, state[0][-1])
# ๅฐๅตๅ
ฅๅ็่พๅ
ฅๅ่ๆฏๅ้ๅจ็นๅพ็ปด่ฟ็ป
input_and_c = nd.concat(self.embedding(cur_input), c, dim=1)
# ไธบ่พๅ
ฅๅ่ๆฏๅ้็่ฟ็ปๅขๅ ๆถ้ดๆญฅ็ปด๏ผๆถ้ดๆญฅไธชๆฐไธบ1
output, state = self.rnn(input_and_c.expand_dims(0), state)
# ็งป้คๆถ้ดๆญฅ็ปด๏ผ่พๅบๅฝข็ถไธบ(ๆน้ๅคงๅฐ, ่พๅบ่ฏๅ
ธๅคงๅฐ)
output = self.out(output).squeeze(axis=0)
return output, state
def begin_state(self, enc_state):
# ็ดๆฅๅฐ็ผ็ ๅจๆ็ปๆถ้ดๆญฅ็้่็ถๆไฝไธบ่งฃ็ ๅจ็ๅๅง้่็ถๆ
return enc_state
```
## ่ฎญ็ปๆจกๅ
ๆไปฌๅ
ๅฎ็ฐ`batch_loss`ๅฝๆฐ่ฎก็ฎไธไธชๅฐๆน้็ๆๅคฑใ่งฃ็ ๅจๅจๆๅๆถ้ดๆญฅ็่พๅ
ฅๆฏ็นๆฎๅญ็ฌฆ`BOS`ใไนๅ๏ผ่งฃ็ ๅจๅจๆๆถ้ดๆญฅ็่พๅ
ฅไธบๆ ทๆฌ่พๅบๅบๅๅจไธไธๆถ้ดๆญฅ็่ฏ๏ผๅณๅผบๅถๆๅญฆใๆญคๅค๏ผๅ[โword2vec็ๅฎ็ฐโ](word2vec-gluon.ipynb)ไธ่ไธญ็ๅฎ็ฐไธๆ ท๏ผๆไปฌๅจ่ฟ้ไนไฝฟ็จๆฉ็ ๅ้้ฟๅ
ๅกซๅ
้กนๅฏนๆๅคฑๅฝๆฐ่ฎก็ฎ็ๅฝฑๅใ
```
def batch_loss(encoder, decoder, X, Y, loss):
batch_size = X.shape[0]
enc_state = encoder.begin_state(batch_size=batch_size)
enc_outputs, enc_state = encoder(X, enc_state)
# ๅๅงๅ่งฃ็ ๅจ็้่็ถๆ
dec_state = decoder.begin_state(enc_state)
# ่งฃ็ ๅจๅจๆๅๆถ้ดๆญฅ็่พๅ
ฅๆฏBOS
dec_input = nd.array([out_vocab.token_to_idx[BOS]] * batch_size)
# ๆไปฌๅฐไฝฟ็จๆฉ็ ๅ้maskๆฅๅฟฝ็ฅๆๆ ็ญพไธบๅกซๅ
้กนPAD็ๆๅคฑ
mask, num_not_pad_tokens = nd.ones(shape=(batch_size,)), 0
l = nd.array([0])
for y in Y.T:
dec_output, dec_state = decoder(dec_input, dec_state, enc_outputs)
l = l + (mask * loss(dec_output, y)).sum()
dec_input = y # ไฝฟ็จๅผบๅถๆๅญฆ
num_not_pad_tokens += mask.sum().asscalar()
# ๅฝ้ๅฐEOSๆถ๏ผๅบๅๅ้ข็่ฏๅฐๅไธบPAD๏ผ็ธๅบไฝ็ฝฎ็ๆฉ็ ่ฎพๆ0
mask = mask * (y != out_vocab.token_to_idx[EOS])
return l / num_not_pad_tokens
```
ๅจ่ฎญ็ปๅฝๆฐไธญ๏ผๆไปฌ้่ฆๅๆถ่ฟญไปฃ็ผ็ ๅจๅ่งฃ็ ๅจ็ๆจกๅๅๆฐใ
```
def train(encoder, decoder, dataset, lr, batch_size, num_epochs):
encoder.initialize(init.Xavier(), force_reinit=True)
decoder.initialize(init.Xavier(), force_reinit=True)
enc_trainer = gluon.Trainer(encoder.collect_params(), 'adam',
{'learning_rate': lr})
dec_trainer = gluon.Trainer(decoder.collect_params(), 'adam',
{'learning_rate': lr})
loss = gloss.SoftmaxCrossEntropyLoss()
data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
for epoch in range(num_epochs):
l_sum = 0.0
for X, Y in data_iter:
with autograd.record():
l = batch_loss(encoder, decoder, X, Y, loss)
l.backward()
enc_trainer.step(1)
dec_trainer.step(1)
l_sum += l.asscalar()
if (epoch + 1) % 10 == 0:
print("epoch %d, loss %.3f" % (epoch + 1, l_sum / len(data_iter)))
```
ๆฅไธๆฅ๏ผๅๅปบๆจกๅๅฎไพๅนถ่ฎพ็ฝฎ่ถ
ๅๆฐใ็ถๅ๏ผๆไปฌๅฐฑๅฏไปฅ่ฎญ็ปๆจกๅไบใ
```
embed_size, num_hiddens, num_layers = 64, 64, 2
attention_size, drop_prob, lr, batch_size, num_epochs = 10, 0.5, 0.01, 2, 50
encoder = Encoder(len(in_vocab), embed_size, num_hiddens, num_layers,
drop_prob)
decoder = Decoder(len(out_vocab), embed_size, num_hiddens, num_layers,
attention_size, drop_prob)
train(encoder, decoder, dataset, lr, batch_size, num_epochs)
```
## ้ขๆตไธๅฎ้ฟ็ๅบๅ
ๅจ[โๆๆ็ดขโ](beam-search.ipynb)ไธ่ไธญๆไปฌไป็ปไบ3็งๆนๆณๆฅ็ๆ่งฃ็ ๅจๅจๆฏไธชๆถ้ดๆญฅ็่พๅบใ่ฟ้ๆไปฌๅฎ็ฐๆ็ฎๅ็่ดชๅฉชๆ็ดขใ
```
def translate(encoder, decoder, input_seq, max_seq_len):
in_tokens = input_seq.split(' ')
in_tokens += [EOS] + [PAD] * (max_seq_len - len(in_tokens) - 1)
enc_input = nd.array([in_vocab.to_indices(in_tokens)])
enc_state = encoder.begin_state(batch_size=1)
enc_output, enc_state = encoder(enc_input, enc_state)
dec_input = nd.array([out_vocab.token_to_idx[BOS]])
dec_state = decoder.begin_state(enc_state)
output_tokens = []
for _ in range(max_seq_len):
dec_output, dec_state = decoder(dec_input, dec_state, enc_output)
pred = dec_output.argmax(axis=1)
pred_token = out_vocab.idx_to_token[int(pred.asscalar())]
if pred_token == EOS: # ๅฝไปปไธๆถ้ดๆญฅๆ็ดขๅบEOSๆถ๏ผ่พๅบๅบๅๅณๅฎๆ
break
else:
output_tokens.append(pred_token)
dec_input = pred
return output_tokens
```
็ฎๅๆต่ฏไธไธๆจกๅใ่พๅ
ฅๆณ่ฏญๅฅๅญโils regardent.โ๏ผ็ฟป่ฏๅ็่ฑ่ฏญๅฅๅญๅบ่ฏฅๆฏโthey are watching.โใ
```
input_seq = 'ils regardent .'
translate(encoder, decoder, input_seq, max_seq_len)
```
## ่ฏไปท็ฟป่ฏ็ปๆ
่ฏไปทๆบๅจ็ฟป่ฏ็ปๆ้ๅธธไฝฟ็จBLEU๏ผBilingual Evaluation Understudy๏ผ[1]ใๅฏนไบๆจกๅ้ขๆตๅบๅไธญไปปๆ็ๅญๅบๅ๏ผBLEU่ๅฏ่ฟไธชๅญๅบๅๆฏๅฆๅบ็ฐๅจๆ ็ญพๅบๅไธญใ
ๅ
ทไฝๆฅ่ฏด๏ผ่ฎพ่ฏๆฐไธบ$n$็ๅญๅบๅ็็ฒพๅบฆไธบ$p_n$ใๅฎๆฏ้ขๆตๅบๅไธๆ ็ญพๅบๅๅน้
่ฏๆฐไธบ$n$็ๅญๅบๅ็ๆฐ้ไธ้ขๆตๅบๅไธญ่ฏๆฐไธบ$n$็ๅญๅบๅ็ๆฐ้ไนๆฏใไธพไธชไพๅญ๏ผๅ่ฎพๆ ็ญพๅบๅไธบ$A$ใ$B$ใ$C$ใ$D$ใ$E$ใ$F$๏ผ้ขๆตๅบๅไธบ$A$ใ$B$ใ$B$ใ$C$ใ$D$๏ผ้ฃไน$p_1 = 4/5,\ p_2 = 3/4,\ p_3 = 1/3,\ p_4 = 0$ใ่ฎพ$len_{\text{label}}$ๅ$len_{\text{pred}}$ๅๅซไธบๆ ็ญพๅบๅๅ้ขๆตๅบๅ็่ฏๆฐ๏ผ้ฃไน๏ผBLEU็ๅฎไนไธบ
$$ \exp\left(\min\left(0, 1 - \frac{len_{\text{label}}}{len_{\text{pred}}}\right)\right) \prod_{n=1}^k p_n^{1/2^n},$$
ๅ
ถไธญ$k$ๆฏๆไปฌๅธๆๅน้
็ๅญๅบๅ็ๆๅคง่ฏๆฐใๅฏไปฅ็ๅฐๅฝ้ขๆตๅบๅๅๆ ็ญพๅบๅๅฎๅ
จไธ่ดๆถ๏ผBLEUไธบ1ใ
ๅ ไธบๅน้
่พ้ฟๅญๅบๅๆฏๅน้
่พ็ญๅญๅบๅๆด้พ๏ผBLEUๅฏนๅน้
่พ้ฟๅญๅบๅ็็ฒพๅบฆ่ตไบไบๆดๅคงๆ้ใไพๅฆ๏ผๅฝ$p_n$ๅบๅฎๅจ0.5ๆถ๏ผ้็$n$็ๅขๅคง๏ผ$0.5^{1/2} \approx 0.7, 0.5^{1/4} \approx 0.84, 0.5^{1/8} \approx 0.92, 0.5^{1/16} \approx 0.96$ใๅฆๅค๏ผๆจกๅ้ขๆต่พ็ญๅบๅๅพๅพไผๅพๅฐ่พ้ซ$p_n$ๅผใๅ ๆญค๏ผไธๅผไธญ่ฟไน้กนๅ้ข็็ณปๆฐๆฏไธบไบๆฉ็ฝ่พ็ญ็่พๅบ่่ฎพ็ใไธพไธชไพๅญ๏ผๅฝ$k=2$ๆถ๏ผๅ่ฎพๆ ็ญพๅบๅไธบ$A$ใ$B$ใ$C$ใ$D$ใ$E$ใ$F$๏ผ่้ขๆตๅบๅไธบ$A$ใ$B$ใ่ฝ็ถ$p_1 = p_2 = 1$๏ผไฝๆฉ็ฝ็ณปๆฐ$\exp(1-6/2) \approx 0.14$๏ผๅ ๆญคBLEUไนๆฅ่ฟ0.14ใ
ไธ้ขๆฅๅฎ็ฐBLEU็่ฎก็ฎใ
```
def bleu(pred_tokens, label_tokens, k):
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs[''.join(label_tokens[i: i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs[''.join(pred_tokens[i: i + n])] > 0:
num_matches += 1
label_subs[''.join(pred_tokens[i: i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
```
ๆฅไธๆฅ๏ผๅฎไนไธไธช่พ
ๅฉๆๅฐๅฝๆฐใ
```
def score(input_seq, label_seq, k):
pred_tokens = translate(encoder, decoder, input_seq, max_seq_len)
label_tokens = label_seq.split(' ')
print('bleu %.3f, predict: %s' % (bleu(pred_tokens, label_tokens, k),
' '.join(pred_tokens)))
```
้ขๆตๆญฃ็กฎๅๅๆฐไธบ1ใ
```
score('ils regardent .', 'they are watching .', k=2)
```
ๆต่ฏไธไธชไธๅจ่ฎญ็ป้ไธญ็ๆ ทๆฌใ
```
score('ils sont canadiens .', 'they are canadian .', k=2)
```
## ๅฐ็ป
* ๅฏไปฅๅฐ็ผ็ ๅจโ่งฃ็ ๅจๅๆณจๆๅๆบๅถๅบ็จไบๆบๅจ็ฟป่ฏไธญใ
* BLEUๅฏไปฅ็จๆฅ่ฏไปท็ฟป่ฏ็ปๆใ
## ็ปไน
* ๅฆๆ็ผ็ ๅจๅ่งฃ็ ๅจ็้่ๅๅ
ไธชๆฐไธๅๆ้่ๅฑไธชๆฐไธๅ๏ผ่ฏฅๅฆไฝๆน่ฟ่งฃ็ ๅจ็้่็ถๆ็ๅๅงๅๆนๆณ๏ผ
* ๅจ่ฎญ็ปไธญ๏ผๅฐๅผบๅถๆๅญฆๆฟๆขไธบไฝฟ็จ่งฃ็ ๅจๅจไธไธๆถ้ดๆญฅ็่พๅบไฝไธบ่งฃ็ ๅจๅจๅฝๅๆถ้ดๆญฅ็่พๅ
ฅ๏ผ็ปๆๆไปไนๅๅๅ๏ผ
* ่ฏ็ไฝฟ็จๆดๅคง็็ฟป่ฏๆฐๆฎ้ๆฅ่ฎญ็ปๆจกๅ๏ผๅฆWMT [2] ๅTatoeba Project [3]ใ
## ๅ่ๆ็ฎ
[1] Papineni, K., Roukos, S., Ward, T., & Zhu, W. J. (2002, July). BLEU: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting on association for computational linguistics (pp. 311-318). Association for Computational Linguistics.
[2] WMT. http://www.statmt.org/wmt14/translation-task.html
[3] Tatoeba Project. http://www.manythings.org/anki/
## ๆซ็ ็ด่พพ[่ฎจ่ฎบๅบ](https://discuss.gluon.ai/t/topic/4689)

|
github_jupyter
|
import collections
import io
import math
from mxnet import autograd, gluon, init, nd
from mxnet.contrib import text
from mxnet.gluon import data as gdata, loss as gloss, nn, rnn
PAD, BOS, EOS = '<pad>', '<bos>', '<eos>'
# ๅฐไธไธชๅบๅไธญๆๆ็่ฏ่ฎฐๅฝๅจall_tokensไธญไปฅไพฟไนๅๆ้ ่ฏๅ
ธ๏ผ็ถๅๅจ่ฏฅๅบๅๅ้ขๆทปๅ PAD็ดๅฐๅบๅ
# ้ฟๅบฆๅไธบmax_seq_len๏ผ็ถๅๅฐๅบๅไฟๅญๅจall_seqsไธญ
def process_one_seq(seq_tokens, all_tokens, all_seqs, max_seq_len):
all_tokens.extend(seq_tokens)
seq_tokens += [EOS] + [PAD] * (max_seq_len - len(seq_tokens) - 1)
all_seqs.append(seq_tokens)
# ไฝฟ็จๆๆ็่ฏๆฅๆ้ ่ฏๅ
ธใๅนถๅฐๆๆๅบๅไธญ็่ฏๅๆขไธบ่ฏ็ดขๅผๅๆ้ NDArrayๅฎไพ
def build_data(all_tokens, all_seqs):
vocab = text.vocab.Vocabulary(collections.Counter(all_tokens),
reserved_tokens=[PAD, BOS, EOS])
indices = [vocab.to_indices(seq) for seq in all_seqs]
return vocab, nd.array(indices)
def read_data(max_seq_len):
# inๅoutๅๅซๆฏinputๅoutput็็ผฉๅ
in_tokens, out_tokens, in_seqs, out_seqs = [], [], [], []
with io.open('../data/fr-en-small.txt') as f:
lines = f.readlines()
for line in lines:
in_seq, out_seq = line.rstrip().split('\t')
in_seq_tokens, out_seq_tokens = in_seq.split(' '), out_seq.split(' ')
if max(len(in_seq_tokens), len(out_seq_tokens)) > max_seq_len - 1:
continue # ๅฆๆๅ ไธEOSๅ้ฟไบmax_seq_len๏ผๅๅฟฝ็ฅๆๆญคๆ ทๆฌ
process_one_seq(in_seq_tokens, in_tokens, in_seqs, max_seq_len)
process_one_seq(out_seq_tokens, out_tokens, out_seqs, max_seq_len)
in_vocab, in_data = build_data(in_tokens, in_seqs)
out_vocab, out_data = build_data(out_tokens, out_seqs)
return in_vocab, out_vocab, gdata.ArrayDataset(in_data, out_data)
max_seq_len = 7
in_vocab, out_vocab, dataset = read_data(max_seq_len)
dataset[0]
class Encoder(nn.Block):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
drop_prob=0, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob)
def forward(self, inputs, state):
# ่พๅ
ฅๅฝข็ถๆฏ(ๆน้ๅคงๅฐ, ๆถ้ดๆญฅๆฐ)ใๅฐ่พๅบไบๆขๆ ทๆฌ็ปดๅๆถ้ดๆญฅ็ปด
embedding = self.embedding(inputs).swapaxes(0, 1)
return self.rnn(embedding, state)
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
encoder = Encoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
encoder.initialize()
output, state = encoder(nd.zeros((4, 7)), encoder.begin_state(batch_size=4))
output.shape, state[0].shape
dense = nn.Dense(2, flatten=False)
dense.initialize()
dense(nd.zeros((3, 5, 7))).shape
def attention_model(attention_size):
model = nn.Sequential()
model.add(nn.Dense(attention_size, activation='tanh', use_bias=False,
flatten=False),
nn.Dense(1, use_bias=False, flatten=False))
return model
def attention_forward(model, enc_states, dec_state):
# ๅฐ่งฃ็ ๅจ้่็ถๆๅนฟๆญๅฐๅ็ผ็ ๅจ้่็ถๆๅฝข็ถ็ธๅๅ่ฟ่ก่ฟ็ป
dec_states = nd.broadcast_axis(
dec_state.expand_dims(0), axis=0, size=enc_states.shape[0])
enc_and_dec_states = nd.concat(enc_states, dec_states, dim=2)
e = model(enc_and_dec_states) # ๅฝข็ถไธบ(ๆถ้ดๆญฅๆฐ, ๆน้ๅคงๅฐ, 1)
alpha = nd.softmax(e, axis=0) # ๅจๆถ้ดๆญฅ็ปดๅบฆๅsoftmax่ฟ็ฎ
return (alpha * enc_states).sum(axis=0) # ่ฟๅ่ๆฏๅ้
seq_len, batch_size, num_hiddens = 10, 4, 8
model = attention_model(10)
model.initialize()
enc_states = nd.zeros((seq_len, batch_size, num_hiddens))
dec_state = nd.zeros((batch_size, num_hiddens))
attention_forward(model, enc_states, dec_state).shape
class Decoder(nn.Block):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
attention_size, drop_prob=0, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.attention = attention_model(attention_size)
self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob)
self.out = nn.Dense(vocab_size, flatten=False)
def forward(self, cur_input, state, enc_states):
# ไฝฟ็จๆณจๆๅๆบๅถ่ฎก็ฎ่ๆฏๅ้
c = attention_forward(self.attention, enc_states, state[0][-1])
# ๅฐๅตๅ
ฅๅ็่พๅ
ฅๅ่ๆฏๅ้ๅจ็นๅพ็ปด่ฟ็ป
input_and_c = nd.concat(self.embedding(cur_input), c, dim=1)
# ไธบ่พๅ
ฅๅ่ๆฏๅ้็่ฟ็ปๅขๅ ๆถ้ดๆญฅ็ปด๏ผๆถ้ดๆญฅไธชๆฐไธบ1
output, state = self.rnn(input_and_c.expand_dims(0), state)
# ็งป้คๆถ้ดๆญฅ็ปด๏ผ่พๅบๅฝข็ถไธบ(ๆน้ๅคงๅฐ, ่พๅบ่ฏๅ
ธๅคงๅฐ)
output = self.out(output).squeeze(axis=0)
return output, state
def begin_state(self, enc_state):
# ็ดๆฅๅฐ็ผ็ ๅจๆ็ปๆถ้ดๆญฅ็้่็ถๆไฝไธบ่งฃ็ ๅจ็ๅๅง้่็ถๆ
return enc_state
def batch_loss(encoder, decoder, X, Y, loss):
batch_size = X.shape[0]
enc_state = encoder.begin_state(batch_size=batch_size)
enc_outputs, enc_state = encoder(X, enc_state)
# ๅๅงๅ่งฃ็ ๅจ็้่็ถๆ
dec_state = decoder.begin_state(enc_state)
# ่งฃ็ ๅจๅจๆๅๆถ้ดๆญฅ็่พๅ
ฅๆฏBOS
dec_input = nd.array([out_vocab.token_to_idx[BOS]] * batch_size)
# ๆไปฌๅฐไฝฟ็จๆฉ็ ๅ้maskๆฅๅฟฝ็ฅๆๆ ็ญพไธบๅกซๅ
้กนPAD็ๆๅคฑ
mask, num_not_pad_tokens = nd.ones(shape=(batch_size,)), 0
l = nd.array([0])
for y in Y.T:
dec_output, dec_state = decoder(dec_input, dec_state, enc_outputs)
l = l + (mask * loss(dec_output, y)).sum()
dec_input = y # ไฝฟ็จๅผบๅถๆๅญฆ
num_not_pad_tokens += mask.sum().asscalar()
# ๅฝ้ๅฐEOSๆถ๏ผๅบๅๅ้ข็่ฏๅฐๅไธบPAD๏ผ็ธๅบไฝ็ฝฎ็ๆฉ็ ่ฎพๆ0
mask = mask * (y != out_vocab.token_to_idx[EOS])
return l / num_not_pad_tokens
def train(encoder, decoder, dataset, lr, batch_size, num_epochs):
encoder.initialize(init.Xavier(), force_reinit=True)
decoder.initialize(init.Xavier(), force_reinit=True)
enc_trainer = gluon.Trainer(encoder.collect_params(), 'adam',
{'learning_rate': lr})
dec_trainer = gluon.Trainer(decoder.collect_params(), 'adam',
{'learning_rate': lr})
loss = gloss.SoftmaxCrossEntropyLoss()
data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
for epoch in range(num_epochs):
l_sum = 0.0
for X, Y in data_iter:
with autograd.record():
l = batch_loss(encoder, decoder, X, Y, loss)
l.backward()
enc_trainer.step(1)
dec_trainer.step(1)
l_sum += l.asscalar()
if (epoch + 1) % 10 == 0:
print("epoch %d, loss %.3f" % (epoch + 1, l_sum / len(data_iter)))
embed_size, num_hiddens, num_layers = 64, 64, 2
attention_size, drop_prob, lr, batch_size, num_epochs = 10, 0.5, 0.01, 2, 50
encoder = Encoder(len(in_vocab), embed_size, num_hiddens, num_layers,
drop_prob)
decoder = Decoder(len(out_vocab), embed_size, num_hiddens, num_layers,
attention_size, drop_prob)
train(encoder, decoder, dataset, lr, batch_size, num_epochs)
def translate(encoder, decoder, input_seq, max_seq_len):
in_tokens = input_seq.split(' ')
in_tokens += [EOS] + [PAD] * (max_seq_len - len(in_tokens) - 1)
enc_input = nd.array([in_vocab.to_indices(in_tokens)])
enc_state = encoder.begin_state(batch_size=1)
enc_output, enc_state = encoder(enc_input, enc_state)
dec_input = nd.array([out_vocab.token_to_idx[BOS]])
dec_state = decoder.begin_state(enc_state)
output_tokens = []
for _ in range(max_seq_len):
dec_output, dec_state = decoder(dec_input, dec_state, enc_output)
pred = dec_output.argmax(axis=1)
pred_token = out_vocab.idx_to_token[int(pred.asscalar())]
if pred_token == EOS: # ๅฝไปปไธๆถ้ดๆญฅๆ็ดขๅบEOSๆถ๏ผ่พๅบๅบๅๅณๅฎๆ
break
else:
output_tokens.append(pred_token)
dec_input = pred
return output_tokens
input_seq = 'ils regardent .'
translate(encoder, decoder, input_seq, max_seq_len)
def bleu(pred_tokens, label_tokens, k):
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs[''.join(label_tokens[i: i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs[''.join(pred_tokens[i: i + n])] > 0:
num_matches += 1
label_subs[''.join(pred_tokens[i: i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
def score(input_seq, label_seq, k):
pred_tokens = translate(encoder, decoder, input_seq, max_seq_len)
label_tokens = label_seq.split(' ')
print('bleu %.3f, predict: %s' % (bleu(pred_tokens, label_tokens, k),
' '.join(pred_tokens)))
score('ils regardent .', 'they are watching .', k=2)
score('ils sont canadiens .', 'they are canadian .', k=2)
| 0.612541 | 0.856932 |
```
import subprocess
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
import json
pd.set_option('display.max_rows', 300)
```

# Data Understanding
* The data has been taken from the following:
* John Hopkins (GITHUB) https://github.com/CSSEGISandData/COVID-19
* RKI, webscraping method https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html
* Rest API services to retrieve data
# GITHUB csv data
git clone/pull https://github.com/CSSEGISandData/COVID-19
```
git_pull = subprocess.Popen( "/usr/bin/git pull" ,
cwd = os.path.dirname( '../data/raw/COVID-19/' ),
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE )
(out, error) = git_pull.communicate()
print("Error : " + str(error))
print("out : " + str(out))
data_path='../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
pd_raw=pd.read_csv(data_path)
pd_raw.head()
```
# Webscrapping
```
page = requests.get('https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html')
soup = BeautifulSoup(page.content, 'html.parser')
html_table=soup.find('table')
all_rows=html_table.find_all('tr')
final_data_list=[]
for pos,rows in enumerate(all_rows):
col_list=[each_col.get_text(strip=True) for each_col in rows.find_all('td')] #td for data element
final_data_list.append(col_list)
pd_daily_status=pd.DataFrame(final_data_list).dropna().rename(columns={0:'state',
1:'cases',
2:'changes',
3:'cases_per_100k',
4:'fatal',
5:'comment'})
pd_daily_status.head()
```
## REST API calls
```
data=requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
import json
json_object=json.loads(data.content)
type(json_object)
json_object.keys()
full_list=[]
for pos,each_dict in enumerate (json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd_full_list=pd.DataFrame(full_list)
pd_full_list.head()
pd_full_list.to_csv('../data/raw/NPGEO/GER_state_data.csv',sep=';')
pd_full_list.shape[0]
```
|
github_jupyter
|
import subprocess
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
import json
pd.set_option('display.max_rows', 300)
git_pull = subprocess.Popen( "/usr/bin/git pull" ,
cwd = os.path.dirname( '../data/raw/COVID-19/' ),
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE )
(out, error) = git_pull.communicate()
print("Error : " + str(error))
print("out : " + str(out))
data_path='../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
pd_raw=pd.read_csv(data_path)
pd_raw.head()
page = requests.get('https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html')
soup = BeautifulSoup(page.content, 'html.parser')
html_table=soup.find('table')
all_rows=html_table.find_all('tr')
final_data_list=[]
for pos,rows in enumerate(all_rows):
col_list=[each_col.get_text(strip=True) for each_col in rows.find_all('td')] #td for data element
final_data_list.append(col_list)
pd_daily_status=pd.DataFrame(final_data_list).dropna().rename(columns={0:'state',
1:'cases',
2:'changes',
3:'cases_per_100k',
4:'fatal',
5:'comment'})
pd_daily_status.head()
data=requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
import json
json_object=json.loads(data.content)
type(json_object)
json_object.keys()
full_list=[]
for pos,each_dict in enumerate (json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd_full_list=pd.DataFrame(full_list)
pd_full_list.head()
pd_full_list.to_csv('../data/raw/NPGEO/GER_state_data.csv',sep=';')
pd_full_list.shape[0]
| 0.074039 | 0.411406 |
# `Nuqleon.Memory`
Provides object pools, function memoization, and caching utilities.
## Reference the library
### Option 1 - Use a local build
If you have built the library locally, run the following cell to load the latest build.
```
#r "bin/Debug/net50/Nuqleon.Memory.dll"
```
### Option 2 - Use NuGet packages
If you want to use the latest published package from NuGet, run the following cell.
```
#r "nuget:Nuqleon.Memory,*-*"
```
## (Optional) Attach a debugger
If you'd like to step through the source code of the library while running samples, run the following cell, and follow instructions to start a debugger (e.g. Visual Studio). Navigate to the source code of the library to set breakpoints.
```
System.Diagnostics.Debugger.Launch();
```
## Using object pools
Object pools can be used to reduce the overhead of allocating fresh objects. This library supports object pooling for arbitrary types but also have built-in support for commonly used types, such as `StringBuilder` and various collection types.
### Using an object pool for a well-known collection type
First, we'll explore support for built-in types by having a look at pooling for `Stack<T>` objects. Support for other types is completely analogous.
#### Step 1 - Create a pool
The first step is to create a pool using one of the `Create` static method overloads on the pool type. In this example, we'll use `StackPool<T>` and create a pool that can grow up to `8` instances.
```
var pool = StackPool<int>.Create(size: 8);
```
#### Step 2 - Inspect the pool using `DebugView`
At any time we can have a look at the pool's internals using the `DebugView` property. This shows statistics of the pool, as well as call stacks for allocations and deallocations in `DEBUG` builds.
**Note:** Because the `DebugView` contains quite lengthy stack traces due to invocations taking place in .NET Interactive underneath the Notebook, we use a simple helper function to trim stack traces below. In regular use cases outside Notebooks, `DebugView` is typically accessed in the Visual Studio debugger Watch window.
```
using System.IO;
using System.Text.RegularExpressions;
// Regular expression to match stack trace lines with any amount of leading whitespace.
var isStackTraceLine = new Regex("^([ \t]*)at (.*)$");
// Eat our own object pooling dogfood here as well :-).
var stringBuilderPool = StringBuilderPool.Create(size: 8);
string TrimDebugView(string debugView)
{
using (var sb = stringBuilderPool.New())
using (var sr = new StringReader(debugView))
{
var skip = false;
string line;
while ((line = sr.ReadLine()) != null)
{
var match = isStackTraceLine.Match(line);
if (skip && !match.Success)
{
skip = false;
}
if (!skip)
{
if (match.Success && match.Groups[2].Value.StartsWith("Submission"))
{
sb.StringBuilder.AppendLine(match.Groups[1].Value + "at <Notebook>");
skip = true;
}
else
{
sb.StringBuilder.AppendLine(line);
}
}
}
return sb.StringBuilder.ToString();
}
}
void PrintDebugView()
{
Console.WriteLine(TrimDebugView(pool.DebugView));
}
PrintDebugView();
```
#### Step 3 - Allocate an object from the pool
One way to allocate an object from the pool is by using `Allocate`. Once we're done using the object, we call `Free`. This is typically done in a safe manner, e.g. using a `try...finally...` statement. In case an object is not returned to the pool, it will just get garbage collected and the pool's performance will be degraded. However, there won't be a memory leak.
```
PooledStack<int> stack = pool.Allocate();
PrintDebugView();
try
{
// Use the object here.
stack.Push(1);
}
finally
{
pool.Free(stack);
}
```
Now that we've returned the object back to the pool, let's inspect the pool again.
```
PrintDebugView();
```
#### Step 4 - An alternative way to allocate from the pool
An alternative way to allocate an object from the pool is by using `New` which returns a holder object that implements `IDisposable` and can be used with a `using` statement. This makes it easier to ensure returning the object to the pool, even in exceptional circumstances.
```
using (PooledStackHolder<int> h = pool.New())
{
PrintDebugView();
var s = h.Stack;
// Use the object here.
s.Push(1);
}
```
Now that we've returned the object back to the pool, let's inspect the pool again.
```
PrintDebugView();
```
### Use an object pool for a custom type
To understand how object pools work at the next level of detail, let's use object pooling for a custom object type. There are a few ways to work with object pools, including deriving from `ObjectPoolBase<T>` or by using `ObjectPool<T>` directly. We'll explore the latter.
#### Step 1 - Create a custom type
To illustrate the behavior of pooling, we'll start by defining a custom type for which we'll pool instances.
```
class MyObject
{
// Demonstrates the expense of the object which may warrant pooling to reuse these array allocatons.
private readonly int[] _values = new int[16];
public int this[int i]
{
get => _values[i];
set => _values[i] = value;
}
public override string ToString() => string.Join(", ", _values);
}
```
#### Step 2 - Create an object pool
Next, rather than allocating `MyObject` instances directly, we'll use an `ObjectPool<MyObject>` which gets parameterized on a `Func<MyObject>` factory. For illustration purposes, we'll include a side-effect to indicate an allocation has taken place. The second `size` parameter passed to the constructor indicates the maximum number of instances held by the pool.
```
using System.Memory;
var myPool = new ObjectPool<MyObject>(() =>
{
var res = new MyObject();
Console.WriteLine($"Allocated a new MyObject instance. Hash code = {res.GetHashCode()}");
return res;
}, size: 4);
```
#### Step 3 - Use the pool to allocate instances
Let's now use our freshly created pool to allocate an instance of `MyObject` and witness the invocation of the factory.
```
MyObject myObj1 = myPool.Allocate();
myObj1[0] = 42;
Console.WriteLine(myObj1);
```
#### Step 4 - Create a second object from the pool
When we request another instance of `MyObject` from the pool while `myObj1` is in use, another allocation will take place. In this example, we use `New` which returns a `PooledObject<T>` which implements `IDisposable` to return the object to the pool. By using a `using` statement, the object gets returned to the pool automatically.
```
using (PooledObject<MyObject> myObj2 = myPool.New())
{
myObj2.Object[0] = 43;
Console.WriteLine(myObj2.Object);
}
```
#### Step 5 - Witness the reuse of objects
To illustrate the reuse of objects from the pool, let's allocate yet another object from the pool. Because we still are holding on to `myObj1` but have used and released `myObj2`, the latter object can be reused. Note that the hash code of the object returned from the pool matches `myObj2` in the cell above.
```
using (PooledObject<MyObject> myObj3 = myPool.New())
{
Console.WriteLine($"Hash code = {myObj3.Object.GetHashCode()}");
Console.WriteLine(myObj3.Object);
}
```
Note that the instance that was returned still has the mutated state in the array from the last time the object was used. This may pose a security issue in some cases where it may be warranted to clear the contents of an object prior to returning it to the pool. To support this, we can implement additional interfaces on the pooled object type. We'll get to this in a moment.
#### Step 6 - Exploring pooling behavior a bit more
What happens if we allocate more objects than the specified `size` of the object pool? To figure this out, let's allocate a bunch more objects from the pool. But first, let's return `myObj1` to the pool.
```
myPool.Free(myObj1);
```
It goes without saying that `myObj1` should no longer be used after it has been returned to the pool because it can be used by some other piece of code at any time after having been returned. The use of `New` with a `using` statement makes it a bit harder to have this type of *use-after-free* bugs, but one should remain cautious and carefully review usage patterns of pooled objects.
Now, let's allocate more objects than fit in our pool. The pool's size is `4`, so let's allocate `5` objects.
```
var objs = new MyObject[5];
for (var i = 0; i < objs.Length; i++)
{
objs[i] = myPool.Allocate();
Console.WriteLine($"objs[{i}].GetHashCode() = {objs[i].GetHashCode()}");
}
```
Note that the first two objects are reused from the `myObj1` and `myObj2` allocations we did earlier. The remainder three objects were freshly allocated. Now, we'll return all of them back to the pool.
```
for (var i = 0; i < objs.Length; i++)
{
myPool.Free(objs[i]);
}
```
If we allocate another `5` objects now, we'll get four that get reused from the pool (because of its maximum size of `4`), while the fifth one will be new.
```
var objs = new MyObject[5];
for (var i = 0; i < objs.Length; i++)
{
objs[i] = myPool.Allocate();
Console.WriteLine($"objs[{i}].GetHashCode() = {objs[i].GetHashCode()}");
}
```
#### Step 7 - Support clearing an instance upon returning to the pool
To support clearing an instance prior to returning it to the pool, we can implement the `IClearable` interface on `MyObject`.
```
class MyObject : IClearable
{
// Demonstrates the expense of the object which may warrant pooling to reuse these array allocatons.
private readonly int[] _values = new int[16];
public int this[int i]
{
get => _values[i];
set => _values[i] = value;
}
public void Clear() => Array.Clear(_values, 0, _values.Length);
public override string ToString() => string.Join(", ", _values);
}
```
Let's create a new pool, just like we did before. We'll keep the side-effect in the factory delegate to spot reuse of objects further on.
```
using System.Memory;
var myPool = new ObjectPool<MyObject>(() =>
{
var res = new MyObject();
Console.WriteLine($"Allocated a new MyObject instance. Hash code = {res.GetHashCode()}");
return res;
}, size: 4);
```
Finally, let's allocate an object, mutate it, return it to the pool, and then allocate another object. This will cause reuse of the object. However, this time around we should not see the result of mutating the array upon reusing the same instance, because `IClearable.Clear` has been called by the pool.
```
Console.WriteLine("First usage of the object");
using (var obj = myPool.New())
{
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}");
obj.Object[0] = 42;
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}");
}
Console.WriteLine("Second usage of the object");
using (var obj = myPool.New())
{
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}"); // Contents should be clear!
obj.Object[0] = 42;
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}");
}
```
## Function memoization
Function memoization is a technique to cache the results of evaluating a pure function in order to speed up future invocations.
### A trivial example using the Fibonacci sequence
As an example, consider the well-known recursive Fibonacci generator.
```
Func<long, long> fib = null;
fib = n => n <= 1 ? 1 : checked(fib(n - 1) + fib(n - 2));
```
Evaluating the Fibonacci generator causes repeated evaluation of the same function with the same argument. For example:
```
fib(3) = fib(2) + fib(1)
fib(2) = fib(1) + fib(0)
```
Let's run the Fibonacci generator for a few values and time the execution.
```
using System.Diagnostics;
void PrintFibonacci(int max, TimeSpan maxTimeToCompute)
{
var sw = new Stopwatch();
for (int i = 0; i < max; i++)
{
sw.Restart();
long res = 0L;
try
{
res = fib(i);
}
catch (OverflowException)
{
Console.WriteLine($"fib({i}) = Overflow");
return;
}
sw.Stop();
Console.WriteLine($"fib({i}) = {res} - Took {sw.Elapsed}");
// Stop if it starts taking too long.
if (sw.Elapsed > maxTimeToCompute)
{
Console.WriteLine($"Aborted at iteration {i}. This is starting to take too long.");
break;
}
}
}
PrintFibonacci(100, TimeSpan.FromSeconds(5));
```
Most likely, you didn't get much further than some `40`-ish iterations. Let's use this example to illustrate memoization for function evaluation.
The first step to make memoization work is to create a so-called *memoization cache factory*. Each memoized function will have an associated memoization cache. Factories for such caches determine the policy of the cache. In the sample below we'll use an `Unbounded` cache which does not limit the number of entries in the cache. Other options are caches with least-recently-used (LRU) policies or other eviction policies.
```
using System.Memory;
IMemoizationCacheFactory factory = MemoizationCacheFactory.Unbounded;
```
Now that we have a cache factory, we can create a *memoizer* that will be used to memoize functions.
```
IMemoizer mem = Memoizer.Create(factory);
```
Finally, we use the memoizer to `Memoize` the function. After doing so, we end up with a pair of a cache and a memoized function of the same delegate type.
```
IMemoizedDelegate<Func<long, long>> memoizedDelegateFib = mem.Memoize(fib);
// The cache and delegate pair.
IMemoizationCache cache = memoizedDelegateFib.Cache;
Func<long, long> fibMemoized = memoizedDelegateFib.Delegate;
// Let's replace the original delegate by the memoized one, which was also used in the body of the recursive definition of fib.
fib = fibMemoized;
// Now we should get much further along.
PrintFibonacci(100, TimeSpan.FromSeconds(5));
```
To see what's going on, let's explore the cache.
```
cache.DebugView
```
While the output of `DebugView` is a bit spartan, note that we have `92` entries which contain the values of evaluating `fib(0)` through `fib(91)`. We can also go ahead and clear the cache manually, using the `Clear` method.
**Note:** The use of `Clear` is atypical but is sometimes useful after performing a lot of operations in a certain "phase" of execution in a program and where it makes sense to reclaim resources. Caches in Nuqleon are not actively maintained; there are no background threads or timers to prune caches when they're not in use. However, simply dropping the reference to the memoized delegate will also cause the cache to get garbage collected. That's often a more convenient approach to manage caches.
```
cache.Clear();
cache.DebugView
```
### Exploring memoization in more detail
To explore what's going on, let's craft a more sophisticated example using an instrumented function.
```
using System.Threading;
static double GetRadius(double x, double y)
{
Console.WriteLine($"GetRadius({x}, {y}) was called");
Thread.Sleep(1000);
return Math.Sqrt(x * x + y * y);
}
```
Invoking this function directly takes a little over a second to complete, mimicking the expense of a real function.
```
var sw = Stopwatch.StartNew();
Console.WriteLine($"GetRadius(3, 4) = {GetRadius(3, 4)} in {sw.Elapsed}");
```
By using memoization, we can cache and reuse the result. This time around, we'll create an LRU cache to explore cache policies.
```
using System.Memory;
IMemoizationCacheFactory factory = MemoizationCacheFactory.CreateLru(maxCapacity: 4);
IMemoizer memoizer = Memoizer.Create(factory);
```
Unlike our Fibonacci example, we start off with a method here, rather than a delegate. Furthermore, we have more than one parameter in this case. To pick the right overload of `Memoize`, we will be explicit about the parameter and result types. As a result, we'll end up with a `Func<double, double, double>` delegate that represents the memoized `GetRadius` method. While we're at it, we'll also explore other parameters of `Memoize`, all of which are optional and have suitable defaults.
```
IMemoizedDelegate<Func<double, double, double>> getRadiusMemoized = memoizer.Memoize<double, double, double>(GetRadius, MemoizationOptions.CacheException, EqualityComparer<double>.Default, EqualityComparer<double>.Default);
```
The first additional parameter is a `MemoizationOptions` enum which enables turning on caching of exceptions in case the function throws. This is off by default. In our example, this is obviously quite useless. The additional two parameters are `IEqualityComparer<T>` instances for the two inputs of the `GetRadius` function. These are used to look up existing `x, y` pairs in the cache when trying to find a match. An example where this can be useful is for functions that take in an array and one wants to check the array for element-wise equality.
**Note:** A concrete example of memoization is in `Nuqleon.Reflection.Virtualization` where expensive reflection calls get memoized. This touches on various design points mentioned here. For example, some APIs may throw an exception, and we may want to cache these. Also, APIs like `MakeGenericType` take in a `Type[]` and memoization requires a way to compare two such arrays for element-wise equality.
With the resulting memoized delegate, we can now see the behavior of repeated invocation of `GetRadius` with memoization applied.
```
var sw = Stopwatch.StartNew();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
sw.Restart();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
```
Note that the second invocation did not trigger the invocation of `GetRadius` and served up the result from the cache. Let's now print the cache's `DebugView`, clear the cache, and try to invoke the memoized delegate again.
```
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
getRadiusMemoized.Cache.Clear();
sw.Restart();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
sw.Restart();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
```
Note that the `DebugView` output is much more verbose. This is because we're now using an LRU cache which has a much more elaborate `DebugView` to analyze what's going on. For our initial exploration, keep an eye on `Eviction count`, which will reflect the LRU behavior where the least recently used entry gets evicted from the cache. To illustrate this, let's invoke the memoized delegate with various inputs.
```
for (int i = 0; i < 2; i++)
{
foreach (var (x, y) in new[] {
(1, 2),
(2, 3),
(3, 4),
})
{
sw.Restart();
Console.WriteLine($"GetRadius({x}, {y}) = {getRadiusMemoized.Delegate(x, y)} in {sw.Elapsed}");
}
}
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
```
Because we've only invoked the function with three distinct input pairs, we never ended up causing any eviction. The order of the entries in the `DebugView` shows the latest invocation at the top. Let's make another invocation for a unique input.
```
sw.Restart();
Console.WriteLine($"GetRadius(4, 5) = {getRadiusMemoized.Delegate(4, 5)} in {sw.Elapsed}");
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
```
Now, the cache is full. To see the order of the entries change, we can make some more invocations with these input pairs. All accesses will be sped up because they get served from the cache.
```
foreach (var (x, y) in new[] {
(4, 5),
(1, 2),
(2, 3),
(3, 4),
})
{
sw.Restart();
Console.WriteLine($"GetRadius({x}, {y}) = {getRadiusMemoized.Delegate(x, y)} in {sw.Elapsed}");
}
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
```
Now, the input pair `(4, 5)` is the least recently used one. Let's try to invoke the memoized function with a new unique input pair, and see this entry getting evicted.
```
sw.Restart();
Console.WriteLine($"GetRadius(5, 6) = {getRadiusMemoized.Delegate(5, 6)} in {sw.Elapsed}");
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
```
The eviction count is `1` now. If we try to invoke the delegate with inputs `(4, 5)` again, we'll see `GetRadius` getting invoked again. This time, `(1, 2)` is the least recently used entry which will get evicted.
```
sw.Restart();
Console.WriteLine($"GetRadius(4, 5) = {getRadiusMemoized.Delegate(4, 5)} in {sw.Elapsed}");
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
```
### More advanced cache policies
In the samples above we've seen the use of an unbounded and an LRU-based cache for memoization. The `Nuqleon.Memory` library also supports more advanced cache management schemes.
A first example is the use of `CreateEvictedBy[Highest|Lowest]` for any metric on `IMemoizationCacheEntryMetrics`. In fact, the LRU policy is merely performing an eviction based on the `LastAccessTime` metric that's kept for entries in the cache. In the sample below, we'll use the `SpeedUpFactory` metric which represents a ratio between the time it took to invoke the function for the given arguments, prior to caching the result, and the time taken by subsequent invocations, served from the cache.
```
static int GetValueDelayed(int x, int ms)
{
Thread.Sleep(ms);
return x;
}
```
The `GetValueDelayed` function illustrates the difference in time needed to invoke a function based on its arguments. We can now memoize the function using `CreateEvictedByLowest` using the `SpeedupFactory` metric. We'll also limit the cache to 4 entries using the `maxCapacity` parameter.
**Note:** The `ageThreshold` parameter is slightly more complex. Every time the memoization cache gets accessed, the cache entry that was used to satisfy the request (i.e. either an existing entry or a freshly created one) is moved to the top of an internal data structure. This keeps them ordered by the last access time, which is directly usable for LRU policies. When an eviction has to be made based on another metric, the tail of this list of entries is used to find a candidate, excluding the most recent items. This is done to give recent items a chance to get more statistically relevant data, especially for new entries that shouldn't get evicted immediately. The `ageThreshold` specifies this cut-off point. By setting it to `1.0` rather than the default of `0.9`, we will consider all cache entries valid to be valid as eviction candidates.
```
var factory = MemoizationCacheFactory.CreateEvictedByLowest(metric => metric.SpeedupFactor, maxCapacity: 4, ageThreshold: 1.0);
var memoizer = Memoizer.Create(factory);
var getValueDelayedMemoized = memoizer.Memoize<int, int, int>(GetValueDelayed);
```
Also note that the delegate passed to `CreateEvictedByLowest` can contain any computation based on the given metrics, so users are free to compute other derived metrics in case the built-in ones do not meet certain criteria. Using this delegate it's also possible to create a random eviction policy, simply by returning a random number.
Now we'll go ahead and invoke the memoized function for a few times different inputs which will cause the computation of metrics for each entry, as shown by dumping the cache's `DebugView`.
```
for (int i = 0; i < 100; i++)
{
for (int j = 1; j <= 4; j++)
{
getValueDelayedMemoized.Delegate(42, j * 10);
}
}
Console.WriteLine(getValueDelayedMemoized.Cache.DebugView);
```
The reported speed up factor for the different entries will differ slightly after the decimal point, where the 40ms invocation of the function has the highest speed up and the 10ms invocation of the function has the lowest speed up. Upon doing a new invocation that requires the eviction of an entry, the entry with the lowest speed up will be evicted. In the cell below, we invoke the function with a different argument value to cause eviction.
```
getValueDelayedMemoized.Delegate(43, 10);
Console.WriteLine(getValueDelayedMemoized.Cache.DebugView);
```
Memoization caches also support direct trimming using an interface called `ITrimmable`. Different types of trimming are possible, for example based on metrics. This is illustrated in the cell below where we drop cache entries with a `HitCount` less than `10`.
```
int trimCount = getValueDelayedMemoized.Cache.ToTrimmableByMetrics().Trim(metric => metric.HitCount < 10);
Console.WriteLine($"Trimmed {trimCount} entries.");
Console.WriteLine(getValueDelayedMemoized.Cache.DebugView);
```
### Thread safety of memoization caches
By default, memoization caches returned from memoization cache factories are **not** thread-safe. This is a deliberate design choice in order to avoid overheads for single-threaded scenarios. In order to create thread-safe memoizers, one can use a few different approaches.
* Use `ConcurrentMemoizationCache` instead of `MemoizationCache`.
* Use the `Synchronized` extension method on memoization cache factories.
* Use the `WithThreadLocal` extension method on memoization cache factories.
All of these return an `IMemoizationCacheFactory` that produces caches with thread-safe behavior. Alternatively, one can memoize the same function multiple times, on different threads, and ensure that only that thread calls the memoized function.
In the sample below, we use `WithThreadLocal` to cause memoization caches to be allocated on each distinct thread.
```
using System.Memory;
var factory = MemoizationCacheFactory.CreateLru(maxCapacity: 8).WithThreadLocal();
var memoizer = Memoizer.Create(factory);
var f = memoizer.Memoize((int x) =>
{
Console.WriteLine($"~{Environment.CurrentManagedThreadId} - f({x})");
return x + 1;
});
```
Next, let's use the cache from two different threads. Each of them will have its own cache.
```
using System.Threading;
var t1 = new Thread(() =>
{
f.Delegate(1);
f.Delegate(2);
f.Delegate(1); // used from thread-local cache
Console.WriteLine(f.Cache.DebugView);
});
t1.Start();
t1.Join();
var t2 = new Thread(() =>
{
f.Delegate(1); // unique cache on this thread
f.Delegate(2);
f.Delegate(1); // used from thread-local cache
Console.WriteLine(f.Cache.DebugView);
});
t2.Start();
t2.Join();
```
### Intern caches
Intern caches are often used to deduplicate instances of immutable objects based on value equality. The best known sample is `string.Intern(string)` which deduplicates strings. For example, the result of calling `"BAR".ToLower()` can get deduplicated by `string.Intern` if an existing string with contents `"bar"` exists. The old copy with identical contents can then be garbage collected.
Memoization caches can be used to construct intern caches, simply by memoizing an identity function `(T x) => x` using an `IEqualityComparer<T>` that checks for value equality. As an example, let's build an intern cache for `ReadOnlyCollection<T>` objects. First, we'll create an `IEqualityComparer<ReadOnlyCollection<T>>` implementation for such immutable collections, using pairwise element equality.
```
using System.Collections.Generic;
using System.Linq;
class SequenceEqualityComparer<T> : IEqualityComparer<IEnumerable<T>>
{
public bool Equals(IEnumerable<T> xs, IEnumerable<T> ys)
{
if (xs is null)
{
return ys is null;
}
if (ys is null)
{
return false;
}
return xs.SequenceEqual(ys);
}
public int GetHashCode(IEnumerable<T> xs)
{
HashCode h = new();
if (xs is not null)
{
foreach (var x in xs)
{
h.Add(x);
}
}
return h.ToHashCode();
}
}
```
Next, we'll create an intern cache.
```
using System.Collections.ObjectModel;
using System.Memory;
IInternCache<ReadOnlyCollection<int>> cache = MemoizationCacheFactory.Unbounded.CreateInternCache<ReadOnlyCollection<int>>(new SequenceEqualityComparer<int>());
```
Finally, we can try out our cache by instantiating multiple copies of a `ReadOnlyCollection<int>` with the same contents and running them through `Intern`.
```
var xs = new ReadOnlyCollection<int>(Enumerable.Range(0, 10).ToArray());
Console.WriteLine($"xs.GetHashCode() = {xs.GetHashCode()}");
xs = cache.Intern(xs);
Console.WriteLine($"xs.GetHashCode() = {xs.GetHashCode()} after interning");
var ys = new ReadOnlyCollection<int>(Enumerable.Range(0, 10).ToArray());
Console.WriteLine($"ys.GetHashCode() = {ys.GetHashCode()}");
ys = cache.Intern(ys);
Console.WriteLine($"ys.GetHashCode() = {ys.GetHashCode()} after interning");
```
|
github_jupyter
|
#r "bin/Debug/net50/Nuqleon.Memory.dll"
#r "nuget:Nuqleon.Memory,*-*"
System.Diagnostics.Debugger.Launch();
var pool = StackPool<int>.Create(size: 8);
using System.IO;
using System.Text.RegularExpressions;
// Regular expression to match stack trace lines with any amount of leading whitespace.
var isStackTraceLine = new Regex("^([ \t]*)at (.*)$");
// Eat our own object pooling dogfood here as well :-).
var stringBuilderPool = StringBuilderPool.Create(size: 8);
string TrimDebugView(string debugView)
{
using (var sb = stringBuilderPool.New())
using (var sr = new StringReader(debugView))
{
var skip = false;
string line;
while ((line = sr.ReadLine()) != null)
{
var match = isStackTraceLine.Match(line);
if (skip && !match.Success)
{
skip = false;
}
if (!skip)
{
if (match.Success && match.Groups[2].Value.StartsWith("Submission"))
{
sb.StringBuilder.AppendLine(match.Groups[1].Value + "at <Notebook>");
skip = true;
}
else
{
sb.StringBuilder.AppendLine(line);
}
}
}
return sb.StringBuilder.ToString();
}
}
void PrintDebugView()
{
Console.WriteLine(TrimDebugView(pool.DebugView));
}
PrintDebugView();
PooledStack<int> stack = pool.Allocate();
PrintDebugView();
try
{
// Use the object here.
stack.Push(1);
}
finally
{
pool.Free(stack);
}
PrintDebugView();
using (PooledStackHolder<int> h = pool.New())
{
PrintDebugView();
var s = h.Stack;
// Use the object here.
s.Push(1);
}
PrintDebugView();
class MyObject
{
// Demonstrates the expense of the object which may warrant pooling to reuse these array allocatons.
private readonly int[] _values = new int[16];
public int this[int i]
{
get => _values[i];
set => _values[i] = value;
}
public override string ToString() => string.Join(", ", _values);
}
using System.Memory;
var myPool = new ObjectPool<MyObject>(() =>
{
var res = new MyObject();
Console.WriteLine($"Allocated a new MyObject instance. Hash code = {res.GetHashCode()}");
return res;
}, size: 4);
MyObject myObj1 = myPool.Allocate();
myObj1[0] = 42;
Console.WriteLine(myObj1);
using (PooledObject<MyObject> myObj2 = myPool.New())
{
myObj2.Object[0] = 43;
Console.WriteLine(myObj2.Object);
}
using (PooledObject<MyObject> myObj3 = myPool.New())
{
Console.WriteLine($"Hash code = {myObj3.Object.GetHashCode()}");
Console.WriteLine(myObj3.Object);
}
myPool.Free(myObj1);
var objs = new MyObject[5];
for (var i = 0; i < objs.Length; i++)
{
objs[i] = myPool.Allocate();
Console.WriteLine($"objs[{i}].GetHashCode() = {objs[i].GetHashCode()}");
}
for (var i = 0; i < objs.Length; i++)
{
myPool.Free(objs[i]);
}
var objs = new MyObject[5];
for (var i = 0; i < objs.Length; i++)
{
objs[i] = myPool.Allocate();
Console.WriteLine($"objs[{i}].GetHashCode() = {objs[i].GetHashCode()}");
}
class MyObject : IClearable
{
// Demonstrates the expense of the object which may warrant pooling to reuse these array allocatons.
private readonly int[] _values = new int[16];
public int this[int i]
{
get => _values[i];
set => _values[i] = value;
}
public void Clear() => Array.Clear(_values, 0, _values.Length);
public override string ToString() => string.Join(", ", _values);
}
using System.Memory;
var myPool = new ObjectPool<MyObject>(() =>
{
var res = new MyObject();
Console.WriteLine($"Allocated a new MyObject instance. Hash code = {res.GetHashCode()}");
return res;
}, size: 4);
Console.WriteLine("First usage of the object");
using (var obj = myPool.New())
{
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}");
obj.Object[0] = 42;
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}");
}
Console.WriteLine("Second usage of the object");
using (var obj = myPool.New())
{
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}"); // Contents should be clear!
obj.Object[0] = 42;
Console.WriteLine($"obj#{obj.Object.GetHashCode()} = {obj.Object}");
}
Func<long, long> fib = null;
fib = n => n <= 1 ? 1 : checked(fib(n - 1) + fib(n - 2));
fib(3) = fib(2) + fib(1)
fib(2) = fib(1) + fib(0)
using System.Diagnostics;
void PrintFibonacci(int max, TimeSpan maxTimeToCompute)
{
var sw = new Stopwatch();
for (int i = 0; i < max; i++)
{
sw.Restart();
long res = 0L;
try
{
res = fib(i);
}
catch (OverflowException)
{
Console.WriteLine($"fib({i}) = Overflow");
return;
}
sw.Stop();
Console.WriteLine($"fib({i}) = {res} - Took {sw.Elapsed}");
// Stop if it starts taking too long.
if (sw.Elapsed > maxTimeToCompute)
{
Console.WriteLine($"Aborted at iteration {i}. This is starting to take too long.");
break;
}
}
}
PrintFibonacci(100, TimeSpan.FromSeconds(5));
using System.Memory;
IMemoizationCacheFactory factory = MemoizationCacheFactory.Unbounded;
IMemoizer mem = Memoizer.Create(factory);
IMemoizedDelegate<Func<long, long>> memoizedDelegateFib = mem.Memoize(fib);
// The cache and delegate pair.
IMemoizationCache cache = memoizedDelegateFib.Cache;
Func<long, long> fibMemoized = memoizedDelegateFib.Delegate;
// Let's replace the original delegate by the memoized one, which was also used in the body of the recursive definition of fib.
fib = fibMemoized;
// Now we should get much further along.
PrintFibonacci(100, TimeSpan.FromSeconds(5));
cache.DebugView
cache.Clear();
cache.DebugView
using System.Threading;
static double GetRadius(double x, double y)
{
Console.WriteLine($"GetRadius({x}, {y}) was called");
Thread.Sleep(1000);
return Math.Sqrt(x * x + y * y);
}
var sw = Stopwatch.StartNew();
Console.WriteLine($"GetRadius(3, 4) = {GetRadius(3, 4)} in {sw.Elapsed}");
using System.Memory;
IMemoizationCacheFactory factory = MemoizationCacheFactory.CreateLru(maxCapacity: 4);
IMemoizer memoizer = Memoizer.Create(factory);
IMemoizedDelegate<Func<double, double, double>> getRadiusMemoized = memoizer.Memoize<double, double, double>(GetRadius, MemoizationOptions.CacheException, EqualityComparer<double>.Default, EqualityComparer<double>.Default);
var sw = Stopwatch.StartNew();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
sw.Restart();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
getRadiusMemoized.Cache.Clear();
sw.Restart();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
sw.Restart();
Console.WriteLine($"GetRadius(3, 4) = {getRadiusMemoized.Delegate(3, 4)} in {sw.Elapsed}");
for (int i = 0; i < 2; i++)
{
foreach (var (x, y) in new[] {
(1, 2),
(2, 3),
(3, 4),
})
{
sw.Restart();
Console.WriteLine($"GetRadius({x}, {y}) = {getRadiusMemoized.Delegate(x, y)} in {sw.Elapsed}");
}
}
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
sw.Restart();
Console.WriteLine($"GetRadius(4, 5) = {getRadiusMemoized.Delegate(4, 5)} in {sw.Elapsed}");
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
foreach (var (x, y) in new[] {
(4, 5),
(1, 2),
(2, 3),
(3, 4),
})
{
sw.Restart();
Console.WriteLine($"GetRadius({x}, {y}) = {getRadiusMemoized.Delegate(x, y)} in {sw.Elapsed}");
}
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
sw.Restart();
Console.WriteLine($"GetRadius(5, 6) = {getRadiusMemoized.Delegate(5, 6)} in {sw.Elapsed}");
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
sw.Restart();
Console.WriteLine($"GetRadius(4, 5) = {getRadiusMemoized.Delegate(4, 5)} in {sw.Elapsed}");
Console.WriteLine(getRadiusMemoized.Cache.DebugView);
static int GetValueDelayed(int x, int ms)
{
Thread.Sleep(ms);
return x;
}
var factory = MemoizationCacheFactory.CreateEvictedByLowest(metric => metric.SpeedupFactor, maxCapacity: 4, ageThreshold: 1.0);
var memoizer = Memoizer.Create(factory);
var getValueDelayedMemoized = memoizer.Memoize<int, int, int>(GetValueDelayed);
for (int i = 0; i < 100; i++)
{
for (int j = 1; j <= 4; j++)
{
getValueDelayedMemoized.Delegate(42, j * 10);
}
}
Console.WriteLine(getValueDelayedMemoized.Cache.DebugView);
getValueDelayedMemoized.Delegate(43, 10);
Console.WriteLine(getValueDelayedMemoized.Cache.DebugView);
int trimCount = getValueDelayedMemoized.Cache.ToTrimmableByMetrics().Trim(metric => metric.HitCount < 10);
Console.WriteLine($"Trimmed {trimCount} entries.");
Console.WriteLine(getValueDelayedMemoized.Cache.DebugView);
using System.Memory;
var factory = MemoizationCacheFactory.CreateLru(maxCapacity: 8).WithThreadLocal();
var memoizer = Memoizer.Create(factory);
var f = memoizer.Memoize((int x) =>
{
Console.WriteLine($"~{Environment.CurrentManagedThreadId} - f({x})");
return x + 1;
});
using System.Threading;
var t1 = new Thread(() =>
{
f.Delegate(1);
f.Delegate(2);
f.Delegate(1); // used from thread-local cache
Console.WriteLine(f.Cache.DebugView);
});
t1.Start();
t1.Join();
var t2 = new Thread(() =>
{
f.Delegate(1); // unique cache on this thread
f.Delegate(2);
f.Delegate(1); // used from thread-local cache
Console.WriteLine(f.Cache.DebugView);
});
t2.Start();
t2.Join();
using System.Collections.Generic;
using System.Linq;
class SequenceEqualityComparer<T> : IEqualityComparer<IEnumerable<T>>
{
public bool Equals(IEnumerable<T> xs, IEnumerable<T> ys)
{
if (xs is null)
{
return ys is null;
}
if (ys is null)
{
return false;
}
return xs.SequenceEqual(ys);
}
public int GetHashCode(IEnumerable<T> xs)
{
HashCode h = new();
if (xs is not null)
{
foreach (var x in xs)
{
h.Add(x);
}
}
return h.ToHashCode();
}
}
using System.Collections.ObjectModel;
using System.Memory;
IInternCache<ReadOnlyCollection<int>> cache = MemoizationCacheFactory.Unbounded.CreateInternCache<ReadOnlyCollection<int>>(new SequenceEqualityComparer<int>());
var xs = new ReadOnlyCollection<int>(Enumerable.Range(0, 10).ToArray());
Console.WriteLine($"xs.GetHashCode() = {xs.GetHashCode()}");
xs = cache.Intern(xs);
Console.WriteLine($"xs.GetHashCode() = {xs.GetHashCode()} after interning");
var ys = new ReadOnlyCollection<int>(Enumerable.Range(0, 10).ToArray());
Console.WriteLine($"ys.GetHashCode() = {ys.GetHashCode()}");
ys = cache.Intern(ys);
Console.WriteLine($"ys.GetHashCode() = {ys.GetHashCode()} after interning");
| 0.518302 | 0.920932 |
```
%matplotlib inline
```
# Scaling the regularization parameter for SVCs
The following example illustrates the effect of scaling the
regularization parameter when using `svm` for
`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
\begin{align}C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)\end{align}
where
- $C$ is used to set the amount of regularization
- $\mathcal{L}$ is a `loss` function of our samples
and our model parameters.
- $\Omega$ is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, `cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
--
**NOTE:** This is sourced from ```scikit-learn``` learning module found here:
https://scikit-learn.org/stable/auto_examples/svm/plot_svm_scale_c.html#sphx-glr-auto-examples-svm-plot-svm-scale-c-py
--
```
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features // 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features // 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for clf, cs, X, y in clf_sets:
# set up the plot for each regressor
fig, axes = plt.subplots(nrows=2, sharey=True, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size,
test_size=.3,
n_splits=250, random_state=1))
grid.fit(X, y)
scores = grid.cv_results_['mean_test_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for ax, (scaler, name) in zip(axes, scales):
ax.set_xlabel('C')
ax.set_ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
ax.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
ax.set_title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
```
|
github_jupyter
|
%matplotlib inline
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features // 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features // 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for clf, cs, X, y in clf_sets:
# set up the plot for each regressor
fig, axes = plt.subplots(nrows=2, sharey=True, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size,
test_size=.3,
n_splits=250, random_state=1))
grid.fit(X, y)
scores = grid.cv_results_['mean_test_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for ax, (scaler, name) in zip(axes, scales):
ax.set_xlabel('C')
ax.set_ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
ax.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
ax.set_title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| 0.844665 | 0.981131 |
## 1. Importing required modules
```
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
#configure
#sets matplotlib to inline and displays graphs below the corressponding cell.
%matplotlib inline
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
#preprocess.
from keras.preprocessing.image import ImageDataGenerator
#dl libraraies
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD, Adam
from keras.utils import to_categorical
# specifically for cnn
from keras.layers import Dropout, Flatten,Activation
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
import tensorflow as tf
import random as rn
# specifically for manipulating zipped images and getting numpy arrays of pixel values of images.
import cv2
import numpy as np
from tqdm import tqdm
import os
from random import shuffle
from zipfile import ZipFile
from PIL import Image
```
## 2. Preparing the data
### 2.1 Making the functions to get the training and validation set from the Images
```
X = []
Z = []
IMG_SIZE = 30
classes = 43
def make_train_data(classes):
for i in range(classes):
path = "../input/gtsrb-german-traffic-sign/Train/{}/".format(i)
class_image = os.listdir(path)
for j in class_image:
img = cv2.imread(path+j, cv2.IMREAD_COLOR)
train_image_from_array = Image.fromarray(img, 'RGB')
train_label = int(i)
if train_image_from_array is not None:
resized_train_img = train_image_from_array.resize((IMG_SIZE,IMG_SIZE))
X.append(np.array(resized_train_img))
Z.append(train_label)
make_train_data(classes)
print(len(X))
print(len(Z))
```
### 2.2 Visualizing random images
```
fig,ax=plt.subplots(5,3)
fig.set_size_inches(15,15)
for i in range(5):
for j in range(3):
l = rn.randint(0,len(Z))
ax[i,j].imshow(X[l])
ax[i,j].set_title('Sign ID : '+ str(Z[l]))
plt.tight_layout()
```
### 2.3 Label Encoding the Y array & then One Hot Encoding
```
le = LabelEncoder()
Y = le.fit(Z)
Y = le.transform(Z)
Y = to_categorical(Y, classes)
X = np.array(X)
X = X / 255
print("Shape of X:{}".format(X.shape))
print("Shape of Y:{}".format(Y.shape))
```
### 2.4 Splitting into Training and Validation Sets
```
x_train,x_val,y_train,y_val = train_test_split(X, Y, test_size=0.25, random_state=42)
print("Shape of x_train:{}".format(x_train.shape))
print("Shape of y_train:{}".format(y_train.shape))
print("Shape of x_val:{}".format(x_val.shape))
print("Shape of y_val:{}".format(y_val.shape))
```
### 2.5 Setting the random seeds
```
np.random.seed(42)
rn.seed(42)
tf.random.set_seed(42)
```
## 3. Building the model
### 3.1 Specifying the VGG-16 model in Keras
```
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu',input_shape=(IMG_SIZE,IMG_SIZE,3)))
model.add(Conv2D(filters=32, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=128, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(Conv2D(filters=128, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(Conv2D(filters=128, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Dropout(rate=0.25))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
# model.add(Dropout(rate=0.25))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(43, activation='softmax'))
```
### 3.2 Using LR annealer
```
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
# datagen = ImageDataGenerator(
# featurewise_center=False, # set input mean to 0 over the dataset
# samplewise_center=False, # set each sample mean to 0
# featurewise_std_normalization=False, # divide inputs by std of the dataset
# samplewise_std_normalization=False, # divide each input by its std
# zca_whitening=False, # apply ZCA whitening
# rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
# zoom_range = 0.1, # Randomly zoom image
# width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
# height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
# horizontal_flip=True, # randomly flip images
# vertical_flip=False) # randomly flip images
# datagen.fit(x_train)
red_lr = ReduceLROnPlateau(monitor='val_accuracy',patience=3,verbose=1,factor=0.1)
```
### 3.3 Specifying the bacth size and epochs and then compiling the model.
```
batch_size=128
epochs=20
#model.compile(optimizer=SGD(lr=0.01, momentum=0.9),loss='categorical_crossentropy',metrics=['accuracy']) # Original VGG Parameters
model.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
```
### 3.4 Model summary
```
model.summary()
```
### 3.5 Training the model and making predictions on the validation set.
```
# History = model.fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size),
# epochs=epochs, validation_data = (x_val,y_val),
# verbose = 1, steps_per_epoch=x_train.shape[0] // 128, callbacks=[red_lr])
History = model.fit(x_train,y_train, batch_size=batch_size,
epochs=epochs, validation_data = (x_val,y_val),
verbose = 1, callbacks=[red_lr])
```
## 4. Plotting and evaluating the model performance
### 4.1 Accuracy graph against the number of epochs
```
plt.plot(History.history['accuracy'])
plt.plot(History.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
```
### 4.2 Loss function graph against number of epochs
```
plt.figure(1)
plt.plot(History.history['loss'], label='training loss')
plt.plot(History.history['val_loss'], label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
```
## 5. Visualizing Predictons on the Validation Set
```
#Predicting with the test data
import pandas as pd
y_test=pd.read_csv("../input/gtsrb-german-traffic-sign/Test.csv")
labels=y_test['Path'].to_numpy()
y_test=y_test['ClassId'].values
data=[]
for f in labels:
image=cv2.imread('../input/gtsrb-german-traffic-sign/test/'+f.replace('Test/', ''))
image_from_array = Image.fromarray(image, 'RGB')
size_image = image_from_array.resize((IMG_SIZE, IMG_SIZE))
data.append(np.array(size_image))
x_test=np.array(data)
x_test = x_test.astype('float32')/255
pred = model.predict_classes(x_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, pred)
```
## 6. Calculating number of correctly and incorrectly classified number of images.
```
i=0
prop_class=[]
mis_class=[]
for i in range(len(y_test)):
if(y_test[i] == pred[i]):
prop_class.append(i)
for i in range(len(y_test)):
if(y_test[i] != pred[i]):
mis_class.append(i)
print("Number of test images: {}".format(len(y_test)))
print("Number of correctly classified images: %d" %(len(prop_class)))
print("Number of incorrectly classified images: %d" %(len(mis_class)))
```
## 7. Correctly classified images visualized
```
fig,ax=plt.subplots(5,3)
fig.set_size_inches(15,15)
count=0
for i in range(5):
for j in range(3):
ax[i,j].imshow(x_test[prop_class[count]])
ax[i,j].set_title("Predicted Sign ID : "+str(pred[prop_class[count]])+"\n"+"Actual Sign ID : "+str(y_test[prop_class[count]]))
plt.tight_layout()
count+=1
```
## 8. Incorrectly classified images visualized
```
fig,ax=plt.subplots(5,3)
fig.set_size_inches(15,15)
count=0
for i in range(5):
for j in range(3):
ax[i,j].imshow(x_test[mis_class[count]])
ax[i,j].set_title("Predicted Sign ID : "+str(pred[mis_class[count]])+"\n"+"Actual Sign ID : "+str(y_test[mis_class[count]]))
plt.tight_layout()
count+=1
```
|
github_jupyter
|
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
#configure
#sets matplotlib to inline and displays graphs below the corressponding cell.
%matplotlib inline
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
#preprocess.
from keras.preprocessing.image import ImageDataGenerator
#dl libraraies
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD, Adam
from keras.utils import to_categorical
# specifically for cnn
from keras.layers import Dropout, Flatten,Activation
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
import tensorflow as tf
import random as rn
# specifically for manipulating zipped images and getting numpy arrays of pixel values of images.
import cv2
import numpy as np
from tqdm import tqdm
import os
from random import shuffle
from zipfile import ZipFile
from PIL import Image
X = []
Z = []
IMG_SIZE = 30
classes = 43
def make_train_data(classes):
for i in range(classes):
path = "../input/gtsrb-german-traffic-sign/Train/{}/".format(i)
class_image = os.listdir(path)
for j in class_image:
img = cv2.imread(path+j, cv2.IMREAD_COLOR)
train_image_from_array = Image.fromarray(img, 'RGB')
train_label = int(i)
if train_image_from_array is not None:
resized_train_img = train_image_from_array.resize((IMG_SIZE,IMG_SIZE))
X.append(np.array(resized_train_img))
Z.append(train_label)
make_train_data(classes)
print(len(X))
print(len(Z))
fig,ax=plt.subplots(5,3)
fig.set_size_inches(15,15)
for i in range(5):
for j in range(3):
l = rn.randint(0,len(Z))
ax[i,j].imshow(X[l])
ax[i,j].set_title('Sign ID : '+ str(Z[l]))
plt.tight_layout()
le = LabelEncoder()
Y = le.fit(Z)
Y = le.transform(Z)
Y = to_categorical(Y, classes)
X = np.array(X)
X = X / 255
print("Shape of X:{}".format(X.shape))
print("Shape of Y:{}".format(Y.shape))
x_train,x_val,y_train,y_val = train_test_split(X, Y, test_size=0.25, random_state=42)
print("Shape of x_train:{}".format(x_train.shape))
print("Shape of y_train:{}".format(y_train.shape))
print("Shape of x_val:{}".format(x_val.shape))
print("Shape of y_val:{}".format(y_val.shape))
np.random.seed(42)
rn.seed(42)
tf.random.set_seed(42)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu',input_shape=(IMG_SIZE,IMG_SIZE,3)))
model.add(Conv2D(filters=32, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=128, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(Conv2D(filters=128, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(Conv2D(filters=128, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Dropout(rate=0.25))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
# model.add(Dropout(rate=0.25))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(Conv2D(filters=256, kernel_size=(3,3),strides=(1,1), padding='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(43, activation='softmax'))
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
# datagen = ImageDataGenerator(
# featurewise_center=False, # set input mean to 0 over the dataset
# samplewise_center=False, # set each sample mean to 0
# featurewise_std_normalization=False, # divide inputs by std of the dataset
# samplewise_std_normalization=False, # divide each input by its std
# zca_whitening=False, # apply ZCA whitening
# rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
# zoom_range = 0.1, # Randomly zoom image
# width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
# height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
# horizontal_flip=True, # randomly flip images
# vertical_flip=False) # randomly flip images
# datagen.fit(x_train)
red_lr = ReduceLROnPlateau(monitor='val_accuracy',patience=3,verbose=1,factor=0.1)
batch_size=128
epochs=20
#model.compile(optimizer=SGD(lr=0.01, momentum=0.9),loss='categorical_crossentropy',metrics=['accuracy']) # Original VGG Parameters
model.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
# History = model.fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size),
# epochs=epochs, validation_data = (x_val,y_val),
# verbose = 1, steps_per_epoch=x_train.shape[0] // 128, callbacks=[red_lr])
History = model.fit(x_train,y_train, batch_size=batch_size,
epochs=epochs, validation_data = (x_val,y_val),
verbose = 1, callbacks=[red_lr])
plt.plot(History.history['accuracy'])
plt.plot(History.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
plt.figure(1)
plt.plot(History.history['loss'], label='training loss')
plt.plot(History.history['val_loss'], label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
#Predicting with the test data
import pandas as pd
y_test=pd.read_csv("../input/gtsrb-german-traffic-sign/Test.csv")
labels=y_test['Path'].to_numpy()
y_test=y_test['ClassId'].values
data=[]
for f in labels:
image=cv2.imread('../input/gtsrb-german-traffic-sign/test/'+f.replace('Test/', ''))
image_from_array = Image.fromarray(image, 'RGB')
size_image = image_from_array.resize((IMG_SIZE, IMG_SIZE))
data.append(np.array(size_image))
x_test=np.array(data)
x_test = x_test.astype('float32')/255
pred = model.predict_classes(x_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, pred)
i=0
prop_class=[]
mis_class=[]
for i in range(len(y_test)):
if(y_test[i] == pred[i]):
prop_class.append(i)
for i in range(len(y_test)):
if(y_test[i] != pred[i]):
mis_class.append(i)
print("Number of test images: {}".format(len(y_test)))
print("Number of correctly classified images: %d" %(len(prop_class)))
print("Number of incorrectly classified images: %d" %(len(mis_class)))
fig,ax=plt.subplots(5,3)
fig.set_size_inches(15,15)
count=0
for i in range(5):
for j in range(3):
ax[i,j].imshow(x_test[prop_class[count]])
ax[i,j].set_title("Predicted Sign ID : "+str(pred[prop_class[count]])+"\n"+"Actual Sign ID : "+str(y_test[prop_class[count]]))
plt.tight_layout()
count+=1
fig,ax=plt.subplots(5,3)
fig.set_size_inches(15,15)
count=0
for i in range(5):
for j in range(3):
ax[i,j].imshow(x_test[mis_class[count]])
ax[i,j].set_title("Predicted Sign ID : "+str(pred[mis_class[count]])+"\n"+"Actual Sign ID : "+str(y_test[mis_class[count]]))
plt.tight_layout()
count+=1
| 0.652131 | 0.905406 |
# Convolutional Neural Networks
---
In this notebook, we'll show you how to calculate VGG-16 bottleneck features on a toy dataset. Note that unless you have a powerful GPU, computing the bottleneck features takes a significant amount of time.
This is an example of how to compute the actual bottleneck features , whereas in its subsequent notebook we have downloaded and used premade bottleneck features.
### 1. Load and Preprocess Sample Images
Before supplying an image to a pre-trained network in Keras, there are some required preprocessing steps. You will learn more about this in the project; for now, we have implemented this functionality for you in the first code cell of the notebook. We have imported a very small dataset of 8 images and stored the preprocessed image input as `img_input`. Note that the dimensionality of this array is `(8, 224, 224, 3)`. In this case, each of the 8 images is a 3D tensor, with shape `(224, 224, 3)`.
```
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
import numpy as np
import glob
img_paths = glob.glob("images/*.jpg")
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in img_paths]
return np.vstack(list_of_tensors)
# calculate the image input. you will learn more about how this works the project!
img_input = preprocess_input(paths_to_tensor(img_paths))
print(img_input.shape)
```
### 2. Recap How to Import VGG-16
Recall how we import the VGG-16 network (including the final classification layer) that has been pre-trained on ImageNet.

```
from keras.applications.vgg16 import VGG16
model = VGG16()
model.summary()
```
For this network, `model.predict` returns a 1000-dimensional probability vector containing the predicted probability that an image returns each of the 1000 ImageNet categories. The dimensionality of the obtained output from passing `img_input` through the model is `(8, 1000)`. The first value of `8` merely denotes that 8 images were passed through the network.
```
model.predict(img_input).shape
```
### 3. Import the VGG-16 Model, with the Final Fully-Connected Layers Removed
When performing transfer learning, we need to remove the final layers of the network, as they are too specific to the ImageNet database. This is accomplished in the code cell below.

```
from keras.applications.vgg16 import VGG16
model = VGG16(include_top=False)
model.summary()
```
### 4. Extract Output of Final Max Pooling Layer
Now, the network stored in `model` is a truncated version of the VGG-16 network, where the final three fully-connected layers have been removed. In this case, `model.predict` returns a 3D array (with dimensions $7\times 7\times 512$) corresponding to the final max pooling layer of VGG-16. The dimensionality of the obtained output from passing `img_input` through the model is `(8, 7, 7, 512)`. The first value of `8` merely denotes that 8 images were passed through the network.
```
print(model.predict(img_input).shape)
```
This is exactly how we calculate the bottleneck features for the project!
|
github_jupyter
|
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
import numpy as np
import glob
img_paths = glob.glob("images/*.jpg")
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in img_paths]
return np.vstack(list_of_tensors)
# calculate the image input. you will learn more about how this works the project!
img_input = preprocess_input(paths_to_tensor(img_paths))
print(img_input.shape)
from keras.applications.vgg16 import VGG16
model = VGG16()
model.summary()
model.predict(img_input).shape
from keras.applications.vgg16 import VGG16
model = VGG16(include_top=False)
model.summary()
print(model.predict(img_input).shape)
| 0.781414 | 0.989531 |
<em><sub>This page is available as an executable or viewable <strong>Jupyter Notebook</strong>:</sub></em>
<br/><br/>
<a href="https://mybinder.org/v2/gh/JetBrains/lets-plot/v1.5.2demos1?filepath=docs%2Fexamples%2Fjupyter-notebooks%2Ferror_bars.ipynb"
target="_parent">
<img align="left"
src="https://mybinder.org/badge_logo.svg">
</a>
<a href="https://nbviewer.jupyter.org/github/JetBrains/lets-plot/blob/master/docs/examples/jupyter-notebooks/error_bars.ipynb"
target="_parent">
<img align="right"
src="https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.png"
width="109" height="20">
</a>
<br/>
<br/>
```
from lets_plot import *
LetsPlot.setup_html()
```
### Plotting means and error ranges.
There are several ways to show error ranges on a plot. Among them are
- *geom_errorbar*
- *geom_crossbar*
- *geom_linerange*
- *geom_pointrange*
```
# This example was found at: www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)
data = dict(
supp = ['OJ', 'OJ', 'OJ', 'VC', 'VC', 'VC'],
dose = [0.5, 1.0, 2.0, 0.5, 1.0, 2.0],
length = [13.23, 22.70, 26.06, 7.98, 16.77, 26.14],
len_min = [11.83, 21.2, 24.50, 4.24, 15.26, 23.35],
len_max = [15.63, 24.9, 27.11, 10.72, 19.28, 28.93]
)
p = ggplot(data, aes(x='dose', color='supp'))
```
### Error-bars with lines and points.
```
p + geom_errorbar(aes(ymin='len_min', ymax='len_max'), width=.1) \
+ geom_line(aes(y='length')) \
+ geom_point(aes(y='length'))
# The errorbars overlapped, so use position_dodge to move them horizontally
pd = position_dodge(0.1) # move them .05 to the left and right
p + geom_errorbar(aes(ymin='len_min', ymax='len_max'), width=.1, position=pd) \
+ geom_line(aes(y='length'), position=pd) \
+ geom_point(aes(y='length'), position=pd)
# Black errorbars - notice the mapping of 'group=supp'
# Without it, the errorbars won't be dodged!
p + geom_errorbar(aes(ymin='len_min', ymax='len_max', group='supp'), color='black', width=.1, position=pd) \
+ geom_line(aes(y='length'), position=pd) \
+ geom_point(aes(y='length'), position=pd, size=5)
# Finished graph
# - fixed size
# - point shape # 21 is filled circle
# - position legend in bottom right
p1 = p \
+ xlab("Dose (mg)") \
+ ylab("Tooth length (mm)") \
+ scale_color_manual(['orange', 'dark_green'], na_value='gray') \
+ ggsize(700, 400)
p1 + geom_errorbar(aes(ymin='len_min', ymax='len_max', group='supp'), color='black', width=.1, position=pd) \
+ geom_line(aes(y='length'), position=pd) \
+ geom_point(aes(y='length'), position=pd, size=5, shape=21, fill="white") \
+ theme(legend_justification=[1,0], legend_position=[1,0]) \
+ ggtitle("The Effect of Vitamin C on Tooth Growth in Guinea Pigs")
```
### Error-bars on bar plot.
```
# Plot error ranges on Bar plot
p1 \
+ geom_bar(aes(y='length', fill='supp'), stat='identity', position='dodge', color='black') \
+ geom_errorbar(aes(ymin='len_min', ymax='len_max', group='supp'), color='black', width=.1, position=position_dodge(0.9)) \
+ theme(legend_justification=[0,1], legend_position=[0,1])
```
### Crossbars.
```
# Thickness of the horizontal mid-line can be adjusted using `fatten` parameter.
p1 + geom_crossbar(aes(ymin='len_min', ymax='len_max', middle='length', color='supp'), fatten=5)
```
### Line-range.
```
p1 \
+ geom_linerange(aes(ymin='len_min', ymax='len_max', color='supp'), position=pd) \
+ geom_line(aes(y='length'), position=pd)
```
### Point-range
```
# Point-range is the same as line-range but with an added mid-point.
p1 \
+ geom_pointrange(aes(y='length', ymin='len_min', ymax='len_max', color='supp'), position=pd) \
+ geom_line(aes(y='length'), position=pd)
# Size of the mid-point can be adjuasted using `fatten` parameter - multiplication factor relative to the line size.
p1 \
+ geom_line(aes(y='length'), position=pd) \
+ geom_pointrange(aes(y='length', ymin='len_min', ymax='len_max', fill='supp'), position=pd, color='rgb(230, 230, 230)', size=5, shape=23, fatten=1) \
+ scale_fill_manual(['orange', 'dark_green'], na_value='gray')
```
|
github_jupyter
|
from lets_plot import *
LetsPlot.setup_html()
# This example was found at: www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)
data = dict(
supp = ['OJ', 'OJ', 'OJ', 'VC', 'VC', 'VC'],
dose = [0.5, 1.0, 2.0, 0.5, 1.0, 2.0],
length = [13.23, 22.70, 26.06, 7.98, 16.77, 26.14],
len_min = [11.83, 21.2, 24.50, 4.24, 15.26, 23.35],
len_max = [15.63, 24.9, 27.11, 10.72, 19.28, 28.93]
)
p = ggplot(data, aes(x='dose', color='supp'))
p + geom_errorbar(aes(ymin='len_min', ymax='len_max'), width=.1) \
+ geom_line(aes(y='length')) \
+ geom_point(aes(y='length'))
# The errorbars overlapped, so use position_dodge to move them horizontally
pd = position_dodge(0.1) # move them .05 to the left and right
p + geom_errorbar(aes(ymin='len_min', ymax='len_max'), width=.1, position=pd) \
+ geom_line(aes(y='length'), position=pd) \
+ geom_point(aes(y='length'), position=pd)
# Black errorbars - notice the mapping of 'group=supp'
# Without it, the errorbars won't be dodged!
p + geom_errorbar(aes(ymin='len_min', ymax='len_max', group='supp'), color='black', width=.1, position=pd) \
+ geom_line(aes(y='length'), position=pd) \
+ geom_point(aes(y='length'), position=pd, size=5)
# Finished graph
# - fixed size
# - point shape # 21 is filled circle
# - position legend in bottom right
p1 = p \
+ xlab("Dose (mg)") \
+ ylab("Tooth length (mm)") \
+ scale_color_manual(['orange', 'dark_green'], na_value='gray') \
+ ggsize(700, 400)
p1 + geom_errorbar(aes(ymin='len_min', ymax='len_max', group='supp'), color='black', width=.1, position=pd) \
+ geom_line(aes(y='length'), position=pd) \
+ geom_point(aes(y='length'), position=pd, size=5, shape=21, fill="white") \
+ theme(legend_justification=[1,0], legend_position=[1,0]) \
+ ggtitle("The Effect of Vitamin C on Tooth Growth in Guinea Pigs")
# Plot error ranges on Bar plot
p1 \
+ geom_bar(aes(y='length', fill='supp'), stat='identity', position='dodge', color='black') \
+ geom_errorbar(aes(ymin='len_min', ymax='len_max', group='supp'), color='black', width=.1, position=position_dodge(0.9)) \
+ theme(legend_justification=[0,1], legend_position=[0,1])
# Thickness of the horizontal mid-line can be adjusted using `fatten` parameter.
p1 + geom_crossbar(aes(ymin='len_min', ymax='len_max', middle='length', color='supp'), fatten=5)
p1 \
+ geom_linerange(aes(ymin='len_min', ymax='len_max', color='supp'), position=pd) \
+ geom_line(aes(y='length'), position=pd)
# Point-range is the same as line-range but with an added mid-point.
p1 \
+ geom_pointrange(aes(y='length', ymin='len_min', ymax='len_max', color='supp'), position=pd) \
+ geom_line(aes(y='length'), position=pd)
# Size of the mid-point can be adjuasted using `fatten` parameter - multiplication factor relative to the line size.
p1 \
+ geom_line(aes(y='length'), position=pd) \
+ geom_pointrange(aes(y='length', ymin='len_min', ymax='len_max', fill='supp'), position=pd, color='rgb(230, 230, 230)', size=5, shape=23, fatten=1) \
+ scale_fill_manual(['orange', 'dark_green'], na_value='gray')
| 0.880232 | 0.89289 |
## This notebook will help you train a vanilla Point-Cloud AE with the basic architecture we used in our paper.
(it assumes latent_3d_points is in the PYTHONPATH and the structural losses have been compiled)
```
!which python
import sys
sys.path.append("/home/ubuntu/")
import os.path as osp
from latent_3d_points.src.ae_templates import mlp_architecture_ala_iclr_18, default_train_params
from latent_3d_points.src.autoencoder import Configuration as Conf
from latent_3d_points.src.point_net_ae import PointNetAutoEncoder
from latent_3d_points.src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, \
load_all_point_clouds_under_folder
from latent_3d_points.src.tf_utils import reset_tf_graph
from latent_3d_points.src.general_utils import plot_3d_point_cloud
%load_ext autoreload
%autoreload 2
%matplotlib inline
```
Define Basic Parameters
```
top_out_dir = '../data/' # Use to save Neural-Net check-points etc.
top_in_dir = '../data/shape_net_core_uniform_samples_2048/' # Top-dir of where point-clouds are stored.
experiment_name = 'single_class_ae'
n_pc_points = 2048 # Number of points per model.
bneck_size = 128 # Bottleneck-AE size
ae_loss = 'chamfer' # Loss to optimize: 'emd' or 'chamfer'
class_name = raw_input('Give me the class name (e.g. "chair"): ').lower()
```
Load Point-Clouds
```
syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir , syn_id)
all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True)
```
==================================EXPERIMENTS======================================
```
all_pc_data.point_clouds[0].shape
plot_3d_point_cloud(all_pc_data.point_clouds[0][:,0], all_pc_data.point_clouds[0][:,1], all_pc_data.point_clouds[0][:,2])
from latent_3d_points.src.in_out import load_point_clouds_from_filenames, pc_loader
fnames = ["../data/shapenet_dim32_sdf_pc/04379243/fff7f07d1c4042f8a946c24c4f9fb58e__7__.ply"]
pclouds, model_ids, syn_ids = load_point_clouds_from_filenames(fnames, 1, loader=pc_loader, verbose=True)
pclouds.shape
plot_3d_point_cloud(pclouds[0][:,0], pclouds[0][:,1], pclouds[0][:,2])
from latent_3d_points.src.in_out import load_point_clouds_from_filenames2, pc_loader, files_in_subdirs
from latent_3d_points.src.in_out import PointCloudDataSet
def load_all_point_clouds_under_folder2(top_dir, n_threads=20, file_ending='.ply', verbose=False):
file_names = [f for f in files_in_subdirs(top_dir, file_ending)]
pclouds, model_ids, syn_ids = load_point_clouds_from_filenames(file_names, n_threads, loader=pc_loader, verbose=verbose)
return PointCloudDataSet(np.array(pclouds), labels=syn_ids + '_' + model_ids, init_shuffle=False)
file_names = [f for f in files_in_subdirs("../data/shapenet_dim32_sdf_pc/04379243", ".ply")]
pclouds, model_ids, syn_ids = load_point_clouds_from_filenames2(file_names, 8, loader=pc_loader, verbose=True)
import numpy as np
np.array(pclouds).shape
sizes=[]
for p in pclouds:
sizes.append(p.shape[0])
print min(sizes)
print max(sizes)
print np.mean(sizes)
print np.median(sizes)
print sum([1 for s in sizes if s >= 2048])
```
====================================================================================
Load default training parameters (some of which are listed beloq). For more details please print the configuration object.
'batch_size': 50
'denoising': False (# by default AE is not denoising)
'learning_rate': 0.0005
'z_rotate': False (# randomly rotate models of each batch)
'loss_display_step': 1 (# display loss at end of these many epochs)
'saver_step': 10 (# over how many epochs to save neural-network)
```
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))
conf = Conf(n_input = [n_pc_points, 3],
loss = ae_loss,
training_epochs = train_params['training_epochs'],
batch_size = train_params['batch_size'],
denoising = train_params['denoising'],
learning_rate = train_params['learning_rate'],
train_dir = train_dir,
loss_display_step = train_params['loss_display_step'],
saver_step = train_params['saver_step'],
z_rotate = train_params['z_rotate'],
encoder = encoder,
decoder = decoder,
encoder_args = enc_args,
decoder_args = dec_args
)
conf.experiment_name = experiment_name
conf.held_out_step = 5 # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))
```
If you ran the above lines, you can reload a saved model like this:
```
load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae:
conf = Conf.load(train_dir + '/configuration')
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(conf.train_dir, epoch=restore_epoch)
```
Build AE Model.
```
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
```
Train the AE (save output to train_stats.txt)
```
buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close()
```
Get a batch of reconstuctions and their latent-codes.
```
feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)[0]
latent_codes = ae.transform(feed_pc)
```
Use any plotting mechanism such as matplotlib to visualize the results.
```
i = 2
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
i = 4
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
```
|
github_jupyter
|
!which python
import sys
sys.path.append("/home/ubuntu/")
import os.path as osp
from latent_3d_points.src.ae_templates import mlp_architecture_ala_iclr_18, default_train_params
from latent_3d_points.src.autoencoder import Configuration as Conf
from latent_3d_points.src.point_net_ae import PointNetAutoEncoder
from latent_3d_points.src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, \
load_all_point_clouds_under_folder
from latent_3d_points.src.tf_utils import reset_tf_graph
from latent_3d_points.src.general_utils import plot_3d_point_cloud
%load_ext autoreload
%autoreload 2
%matplotlib inline
top_out_dir = '../data/' # Use to save Neural-Net check-points etc.
top_in_dir = '../data/shape_net_core_uniform_samples_2048/' # Top-dir of where point-clouds are stored.
experiment_name = 'single_class_ae'
n_pc_points = 2048 # Number of points per model.
bneck_size = 128 # Bottleneck-AE size
ae_loss = 'chamfer' # Loss to optimize: 'emd' or 'chamfer'
class_name = raw_input('Give me the class name (e.g. "chair"): ').lower()
syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir , syn_id)
all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True)
all_pc_data.point_clouds[0].shape
plot_3d_point_cloud(all_pc_data.point_clouds[0][:,0], all_pc_data.point_clouds[0][:,1], all_pc_data.point_clouds[0][:,2])
from latent_3d_points.src.in_out import load_point_clouds_from_filenames, pc_loader
fnames = ["../data/shapenet_dim32_sdf_pc/04379243/fff7f07d1c4042f8a946c24c4f9fb58e__7__.ply"]
pclouds, model_ids, syn_ids = load_point_clouds_from_filenames(fnames, 1, loader=pc_loader, verbose=True)
pclouds.shape
plot_3d_point_cloud(pclouds[0][:,0], pclouds[0][:,1], pclouds[0][:,2])
from latent_3d_points.src.in_out import load_point_clouds_from_filenames2, pc_loader, files_in_subdirs
from latent_3d_points.src.in_out import PointCloudDataSet
def load_all_point_clouds_under_folder2(top_dir, n_threads=20, file_ending='.ply', verbose=False):
file_names = [f for f in files_in_subdirs(top_dir, file_ending)]
pclouds, model_ids, syn_ids = load_point_clouds_from_filenames(file_names, n_threads, loader=pc_loader, verbose=verbose)
return PointCloudDataSet(np.array(pclouds), labels=syn_ids + '_' + model_ids, init_shuffle=False)
file_names = [f for f in files_in_subdirs("../data/shapenet_dim32_sdf_pc/04379243", ".ply")]
pclouds, model_ids, syn_ids = load_point_clouds_from_filenames2(file_names, 8, loader=pc_loader, verbose=True)
import numpy as np
np.array(pclouds).shape
sizes=[]
for p in pclouds:
sizes.append(p.shape[0])
print min(sizes)
print max(sizes)
print np.mean(sizes)
print np.median(sizes)
print sum([1 for s in sizes if s >= 2048])
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))
conf = Conf(n_input = [n_pc_points, 3],
loss = ae_loss,
training_epochs = train_params['training_epochs'],
batch_size = train_params['batch_size'],
denoising = train_params['denoising'],
learning_rate = train_params['learning_rate'],
train_dir = train_dir,
loss_display_step = train_params['loss_display_step'],
saver_step = train_params['saver_step'],
z_rotate = train_params['z_rotate'],
encoder = encoder,
decoder = decoder,
encoder_args = enc_args,
decoder_args = dec_args
)
conf.experiment_name = experiment_name
conf.held_out_step = 5 # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))
load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae:
conf = Conf.load(train_dir + '/configuration')
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(conf.train_dir, epoch=restore_epoch)
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close()
feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)[0]
latent_codes = ae.transform(feed_pc)
i = 2
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
i = 4
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
| 0.421076 | 0.770681 |
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
print(tf.__version__)
```
# 1. Load Data
```
# overall_summary = pd.read_csv('overall_summary.csv')
overall_summary = pd.read_csv('CR.csv') #Change rate of ISSRs Volume
time = overall_summary['datetime_id']
time = time.to_numpy()[1:]
volume_series = overall_summary['CR_lag1']
volume_series = volume_series.to_numpy()[1:]
overall_summary
# overall_summary['volume_of_ISSR'].plot(figsize=(18,6));
overall_summary['CR_lag1'].plot(figsize=(18,6));
```
**Decompose Time-Series to see Individual Components (trend + seasonality + noise)**
https://coderzcolumn.com/tutorials/data-science/how-to-remove-trend-and-seasonality-from-time-series-data-using-python-pandas
```
from statsmodels.tsa.seasonal import seasonal_decompose
decompose_result1 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24, model="additive")
trend1 = decompose_result1.trend
seasonal1 = decompose_result1.seasonal
residual1 = decompose_result1.resid
decompose_result2 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24*7, model="additive")
trend2 = decompose_result2.trend
seasonal2 = decompose_result2.seasonal
residual2 = decompose_result2.resid
decompose_result3 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24*30, model="additive")
trend3 = decompose_result3.trend
seasonal3 = decompose_result3.seasonal
residual3 = decompose_result3.resid
decompose_result4 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24*90, model="additive")
trend4 = decompose_result4.trend
seasonal4 = decompose_result4.seasonal
residual4 = decompose_result4.resid
decompose_result1.plot()
decompose_result2.plot()
decompose_result3.plot()
decompose_result4.plot();
```
**Dicky-Fuller Test for Stationarity**
- `p-value > 0.05`: This implies that time-series is non-stationary.
- `p-value <=0.05`: This implies that time-series is stationary.
```
from statsmodels.tsa.stattools import adfuller
dftest = adfuller(overall_summary['CR_lag1'][1:], autolag = "BIC" )
print("1. ADF : ",dftest[0])
print("2. P-Value : ", dftest[1])
print("3. Num Of Lags : ", dftest[2])
print("4. Method to use when automatically determining the lag length : ", 'BIC' )
print("5. Num Of Observations Used For ADF Regression and Critical Values Calculation :", dftest[3])
print("6. Critical Values :")
for key, val in dftest[4].items():
print("\t",key, ": ", val)
```
**Conclusion:**
- The Dicky-Fuller Test shows that **our data is stationary** and there is no necessary to remove trend and seasonality.
- But when we decompose the time-Series by the period of season, **there is a clear trend.**
- The reason why **there is a conflict between Dicky-Fuller Test and visualization** is that **the dataset is too small to generate the long term seasonality and trend**. To deeply analyze the trend and seasonality, we have to collect at least two-year data.
- Therefore, **our data tends to be non-stationary time series** and we need to remove trend and seasonality. But it is imposible to retrieve correct trend and seasonality in such datsaet.
- So, we will use this solution from Coursera Course:
> To predict on non-stationary time series, **we could just train for limited period of time.** For example, here where I take just the last 100 steps. You'll probably get a better performance than if you had trained on the entire time series. But that's breaking the mold for typical machine, learning where we always assume that more data is better. But for time series forecasting it really depends on the time series. If it's stationary, meaning its behavior does not change over time, then great. The more data you have the better. But if it's not stationary then the optimal time window that you should use for training will vary.
# 2. Demo for MLP (Multilayer Perceptron) Model
- split the data to training set and validate set with a proportion of around 50% (to limit the period of time)
- define some variables
```
tf.keras.backend.clear_session()
tf.random.set_seed(1234) # Set the global random seed.
np.random.seed(1234)
split_time = 3000 # 1000, 2000, 3000, 4000 as a tuning parameter
time_train = time[:split_time]
x_train = volume_series[:split_time]
time_valid = time[split_time:]
x_valid = volume_series[split_time:]
window_size = 3
batch_size = 30
shuffle_buffer_size = 3000
model_index = 1
```
- Define functions for the creation of rolling windows and visualizations for final results.
- The output of rolling window function include both predictors and response variable
```
def windowed_dataset(series, model_index, window_size, batch_size, shuffle_buffer_size):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + model_index, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + model_index)) # Convert each window to numpy format
dataset = dataset.shuffle(shuffle_buffer_size, seed = 123).map(lambda window: (window[:-model_index], window[-1])) # Shuffle data and Retrieve features and response variable
dataset = dataset.batch(batch_size).prefetch(1) # Combines consecutive elements of this dataset into batches
return dataset
def plot_series(time, series, title, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.title(title)
plt.grid(True)
```
- Generate rolling windows
- Train the model
## Modeling MLP
```
dataset = windowed_dataset(series = x_train, model_index = model_index,
window_size = window_size,
batch_size = batch_size,
shuffle_buffer_size = shuffle_buffer_size)
print(dataset)
# units: Positive integer, dimensionality of the output space.
# Three layers in a sequential. Totally 4 layers exits. Here, we have two hidden layer.
# The first has five neurons activated by relu.
# The second is the same, and the third is a single dense would provide the predicted value.
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(units = 5, input_shape=[window_size], activation="tanh"), #Input layer # The number of nodes
tf.keras.layers.Dense(units = 5, activation="tanh"), # hidden layer
tf.keras.layers.Dense(units = 1)
])
model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(lr=1e-6, momentum=0.9))
model.fit(dataset,epochs=100,verbose=0)
print("Layer weights {}".format(model.get_weights()))
```
- Make prediction for the entire timeline (from timestamp 1 to timestamp 5496): **forecast**
- Retrieve the predicted value for test set: **results_valid**
- Retrieve the predicted value for train set: **results_train**
```
forecast = []
for timestamp in range(len(volume_series) - window_size- (model_index-1)):
forecast.append(model.predict(volume_series[timestamp:timestamp + window_size][np.newaxis]))
# predicted value on validate set
forecast_valid = forecast[split_time-window_size-(model_index-1):]
results_valid = np.array(forecast_valid)[:, 0, 0]
# predicted value on training set
forecast_train = forecast[:split_time-window_size-(model_index-1)]
results_train = np.array(forecast_train)[:, 0, 0]
```
- Visualization
```
plt.figure(figsize=(18, 6))
plot_series(time_valid, x_valid, title = "Plot for test set (Blue: observed; Orange: Predicted)")
plot_series(time_valid, results_valid, title = "Plot for test set (Blue: observed; Orange: Predicted)");
# Blue: x_valid (observed); Orange: results_valid(predicted)
plt.figure(figsize=(18, 6))
plot_series(time_train[window_size+(model_index-1):], x_train[window_size+(model_index-1):], title = "Plot for training set (Blue: observed; Orange: Predicted)")
plot_series(time_train[window_size+(model_index-1):], results_train, title = "Plot for training set (Blue: observed; Orange: Predicted)");
```
- Performance
```
print("MLP-MAE(test set) =", tf.keras.metrics.mean_absolute_error(x_valid, results_valid).numpy())
print("MLP-MSE(test set) =", tf.keras.metrics.mean_squared_error(x_valid, results_valid).numpy())
print("MLP-MAE(training set) =", tf.keras.metrics.mean_absolute_error(x_train[window_size:], results_train).numpy())
print("MLP-MSE(training set) =", tf.keras.metrics.mean_squared_error(x_train[window_size:], results_train).numpy())
```
|
github_jupyter
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
print(tf.__version__)
# overall_summary = pd.read_csv('overall_summary.csv')
overall_summary = pd.read_csv('CR.csv') #Change rate of ISSRs Volume
time = overall_summary['datetime_id']
time = time.to_numpy()[1:]
volume_series = overall_summary['CR_lag1']
volume_series = volume_series.to_numpy()[1:]
overall_summary
# overall_summary['volume_of_ISSR'].plot(figsize=(18,6));
overall_summary['CR_lag1'].plot(figsize=(18,6));
from statsmodels.tsa.seasonal import seasonal_decompose
decompose_result1 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24, model="additive")
trend1 = decompose_result1.trend
seasonal1 = decompose_result1.seasonal
residual1 = decompose_result1.resid
decompose_result2 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24*7, model="additive")
trend2 = decompose_result2.trend
seasonal2 = decompose_result2.seasonal
residual2 = decompose_result2.resid
decompose_result3 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24*30, model="additive")
trend3 = decompose_result3.trend
seasonal3 = decompose_result3.seasonal
residual3 = decompose_result3.resid
decompose_result4 = seasonal_decompose(overall_summary['CR_lag1'][1:], period = 24*90, model="additive")
trend4 = decompose_result4.trend
seasonal4 = decompose_result4.seasonal
residual4 = decompose_result4.resid
decompose_result1.plot()
decompose_result2.plot()
decompose_result3.plot()
decompose_result4.plot();
from statsmodels.tsa.stattools import adfuller
dftest = adfuller(overall_summary['CR_lag1'][1:], autolag = "BIC" )
print("1. ADF : ",dftest[0])
print("2. P-Value : ", dftest[1])
print("3. Num Of Lags : ", dftest[2])
print("4. Method to use when automatically determining the lag length : ", 'BIC' )
print("5. Num Of Observations Used For ADF Regression and Critical Values Calculation :", dftest[3])
print("6. Critical Values :")
for key, val in dftest[4].items():
print("\t",key, ": ", val)
tf.keras.backend.clear_session()
tf.random.set_seed(1234) # Set the global random seed.
np.random.seed(1234)
split_time = 3000 # 1000, 2000, 3000, 4000 as a tuning parameter
time_train = time[:split_time]
x_train = volume_series[:split_time]
time_valid = time[split_time:]
x_valid = volume_series[split_time:]
window_size = 3
batch_size = 30
shuffle_buffer_size = 3000
model_index = 1
def windowed_dataset(series, model_index, window_size, batch_size, shuffle_buffer_size):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + model_index, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + model_index)) # Convert each window to numpy format
dataset = dataset.shuffle(shuffle_buffer_size, seed = 123).map(lambda window: (window[:-model_index], window[-1])) # Shuffle data and Retrieve features and response variable
dataset = dataset.batch(batch_size).prefetch(1) # Combines consecutive elements of this dataset into batches
return dataset
def plot_series(time, series, title, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.title(title)
plt.grid(True)
dataset = windowed_dataset(series = x_train, model_index = model_index,
window_size = window_size,
batch_size = batch_size,
shuffle_buffer_size = shuffle_buffer_size)
print(dataset)
# units: Positive integer, dimensionality of the output space.
# Three layers in a sequential. Totally 4 layers exits. Here, we have two hidden layer.
# The first has five neurons activated by relu.
# The second is the same, and the third is a single dense would provide the predicted value.
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(units = 5, input_shape=[window_size], activation="tanh"), #Input layer # The number of nodes
tf.keras.layers.Dense(units = 5, activation="tanh"), # hidden layer
tf.keras.layers.Dense(units = 1)
])
model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(lr=1e-6, momentum=0.9))
model.fit(dataset,epochs=100,verbose=0)
print("Layer weights {}".format(model.get_weights()))
forecast = []
for timestamp in range(len(volume_series) - window_size- (model_index-1)):
forecast.append(model.predict(volume_series[timestamp:timestamp + window_size][np.newaxis]))
# predicted value on validate set
forecast_valid = forecast[split_time-window_size-(model_index-1):]
results_valid = np.array(forecast_valid)[:, 0, 0]
# predicted value on training set
forecast_train = forecast[:split_time-window_size-(model_index-1)]
results_train = np.array(forecast_train)[:, 0, 0]
plt.figure(figsize=(18, 6))
plot_series(time_valid, x_valid, title = "Plot for test set (Blue: observed; Orange: Predicted)")
plot_series(time_valid, results_valid, title = "Plot for test set (Blue: observed; Orange: Predicted)");
# Blue: x_valid (observed); Orange: results_valid(predicted)
plt.figure(figsize=(18, 6))
plot_series(time_train[window_size+(model_index-1):], x_train[window_size+(model_index-1):], title = "Plot for training set (Blue: observed; Orange: Predicted)")
plot_series(time_train[window_size+(model_index-1):], results_train, title = "Plot for training set (Blue: observed; Orange: Predicted)");
print("MLP-MAE(test set) =", tf.keras.metrics.mean_absolute_error(x_valid, results_valid).numpy())
print("MLP-MSE(test set) =", tf.keras.metrics.mean_squared_error(x_valid, results_valid).numpy())
print("MLP-MAE(training set) =", tf.keras.metrics.mean_absolute_error(x_train[window_size:], results_train).numpy())
print("MLP-MSE(training set) =", tf.keras.metrics.mean_squared_error(x_train[window_size:], results_train).numpy())
| 0.704058 | 0.92523 |
## Image Inpainting using OpenVINO
This notebook demonstrates how to use gmcnn image inpainting model with OpenVINO.
The Following pipeline will be created in this notebook.
<img align='center' src="data/pipeline.png" alt="drawing" width="600"/>
This model is used to obtain something very similar to the original image given a tampered image.
More details about the [GMCNN model](https://github.com/shepnerd/inpainting_gmcnn)
```
from pathlib import Path
import os
from openvino.inference_engine import IECore
import cv2
import numpy as np
import matplotlib.pyplot as plt
```
### Downloading the Models
Models can be downloaded from omz downloader. omz is a command line tool for downloading models from the open model zoo.
`gmcnn-places2-tf` is the omz name for the considered model. You can find the names of available models [here](https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/index.md) and [here](https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/intel/index.md)
```
# Directory where model will be downloaded
base_model_dir = "model"
# Model name as named in Open Model Zoo
model_name = "gmcnn-places2-tf"
model_path = Path(f"{base_model_dir}/public/{model_name}/{model_name}/frozen_model.pb")
if not os.path.exists(model_path):
download_command = f"omz_downloader " \
f"--name {model_name} " \
f"--output_dir {base_model_dir}"
! $download_command
else:
print("Already downloaded")
ir_path = Path(model_path).with_suffix(".xml")
```
### Convert Tensorflow model to OpenVINO IR format
We will be using the model optimizer command line tool for converting the tensorflow model to IR format.
We will extract the necessary information from [the overview of openvino models](https://docs.openvino.ai/latest/omz_models_model_gmcnn_places2_tf.html) (alternatively you can check the openvino/open_model_zoo as well)
`input_model` path to the tensorflow model.
`input shape` input shape of the model, in this case we have 2 inputs given with a commaa seperating the 2 shapes.
`input` Used to give the input names. This is essential here because we have 2 inputs.
`output_dir` Output directory/name.
For more details about the parameters please check [here](https://docs.openvino.ai/latest/openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model.html)
```
mo_command = f"""mo
--input_model "{model_path}"
--input_shape "[1, 512, 680, 3], [1, 512, 680, 1]"
--input "Placeholder, Placeholder_1"
--output_dir "{model_path.parent}"
"""
mo_command = " ".join(mo_command.split())
# Run Model Optimizer if the IR model file does not exist
if not ir_path.exists():
print("Exporting TensorFlow model to IR... This may take a few minutes.")
! $mo_command
else:
print("IR model already exists.")
```
### Load the model
Now we will load the IR formatted model.
1. Initialize inference engine (IECore)
2. Read the network from *.bin and *.xml files (weights and architecture)
3. Load the model on the "CPU."
4. Get input and output names of nodes.
Only a few lines of code are required to run the model. Let's see it.
```
ie = IECore()
def model_init(model_path):
"""
Read the network and weights from file, load the
model on the CPU and get input and output names of nodes
:param: model: model architecture path *.xml
:returns:
exec_net: Encoder model network
input_key: Input node network
output_key: Output node network
"""
# Read the model.xml and weights file
net = ie.read_network(model_path)
# load the model on to the CPU
exec_net = ie.load_network(net, "CPU")
# Store the input node names as a list because this model have 2 inputs
input_keys = list(exec_net.input_info)
output_keys = next(iter(exec_net.outputs.keys()))
return input_keys, output_keys, exec_net, net
input_keys, output_key, exec_net, net = model_init(ir_path)
```
### Determine the input shapes of the model
Lets save input shapes into a list called `input_shapes`
Note that both the image dimentions are same however the second input shape has a channel of 1 (monotone)
*since input dimentions are used for resizing we have copied it to H and W variables
```
input_shapes = []
for key in input_keys:
curr_shape = net.input_info[key].tensor_desc.dims
input_shapes.append(curr_shape)
print(key, ": ", curr_shape)
N, C, H, W = input_shapes[0]
```
### Create a square mask
Next we will create a single channeled mask that will be laid on top of the original image
```
def create_mask(image_height, image_width, sizey=30, sizex=30):
"""
create a square mask of defined size on a random location
:param: image_height: height of the image
:param: image_width: width of the image
:param: size: size in pixels of one side
:returns:
mask: monotone(grayscale) mask of size [image_width,image_height,1]
"""
mask = np.zeros((image_height, image_width, 1), dtype=np.float32)
start_x = np.random.randint(image_width - sizex)
start_y = np.random.randint(image_height - sizey)
cv2.rectangle(mask, (start_x, start_y), (start_x+sizex, start_y+sizey), (1, 1, 1), -1)
return mask
# Generate a square mask of size 100px x 100px
mask = create_mask(H, W, 40, 40)
# This mask will be laid over the input image as noise
plt.imshow(cv2.cvtColor(mask, cv2.COLOR_BGR2RGB));
```
### Preprocess the mask - Resizing
Note this step is not necessary since we passed the correct arguments to the create_mask fucntion
```
# This cell is not necessary since we gave the correct dimentions when creating the mask.
resized_mask = cv2.resize(mask, (W, H))
resized_mask = np.expand_dims(resized_mask, axis=-1)
```
### Load and Resize the Image
This image will be altered using a mask.
```
# Load Image
image = cv2.imread("data/test_image.png")
# Resize image to meet network expected input sizes
resized_image = cv2.resize(image, (W, H))
plt.imshow(cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB));
```
### Generating the Masked Image
This is the multiplication of the image and the mask gives us the result of the masked image layered on top of the original image.
The `masked_image` will be the first input to GMCNN model
```
# Generating Masked_image
masked_image = (resized_image*(1-mask)+255*mask).astype(np.uint8)
plt.imshow(cv2.cvtColor(masked_image, cv2.COLOR_BGR2RGB));
```
### Preprocessing function to aling with the ```input_shapes```
The model expects the input dimentions to be NCHW.
- masked_image.shape = (512,680,3) -----> model excepts = (1,3,512,680)
- resized_mask.shape = (512,680,1) -----> model excepts = (1,1,512,680)
```
# Adding the batch size dimention and changing to NCHW format
def preprocess(inp):
"""
Adding the batch size dimention and changing to NCHW format
:param frame: input image
:returns: processed image
"""
inp = np.transpose(inp, (2, 0, 1))
inp = np.expand_dims(inp, axis=0)
return inp
masked_image = preprocess(masked_image)
resized_mask = preprocess(resized_mask)
result = exec_net.infer({input_keys[0]: masked_image, input_keys[1]:resized_mask})
plt.imshow(cv2.cvtColor(np.transpose(result[output_key].astype(np.uint8)[0], (1, 2, 0)), cv2.COLOR_BGR2RGB));
```
|
github_jupyter
|
from pathlib import Path
import os
from openvino.inference_engine import IECore
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Directory where model will be downloaded
base_model_dir = "model"
# Model name as named in Open Model Zoo
model_name = "gmcnn-places2-tf"
model_path = Path(f"{base_model_dir}/public/{model_name}/{model_name}/frozen_model.pb")
if not os.path.exists(model_path):
download_command = f"omz_downloader " \
f"--name {model_name} " \
f"--output_dir {base_model_dir}"
! $download_command
else:
print("Already downloaded")
ir_path = Path(model_path).with_suffix(".xml")
mo_command = f"""mo
--input_model "{model_path}"
--input_shape "[1, 512, 680, 3], [1, 512, 680, 1]"
--input "Placeholder, Placeholder_1"
--output_dir "{model_path.parent}"
"""
mo_command = " ".join(mo_command.split())
# Run Model Optimizer if the IR model file does not exist
if not ir_path.exists():
print("Exporting TensorFlow model to IR... This may take a few minutes.")
! $mo_command
else:
print("IR model already exists.")
ie = IECore()
def model_init(model_path):
"""
Read the network and weights from file, load the
model on the CPU and get input and output names of nodes
:param: model: model architecture path *.xml
:returns:
exec_net: Encoder model network
input_key: Input node network
output_key: Output node network
"""
# Read the model.xml and weights file
net = ie.read_network(model_path)
# load the model on to the CPU
exec_net = ie.load_network(net, "CPU")
# Store the input node names as a list because this model have 2 inputs
input_keys = list(exec_net.input_info)
output_keys = next(iter(exec_net.outputs.keys()))
return input_keys, output_keys, exec_net, net
input_keys, output_key, exec_net, net = model_init(ir_path)
input_shapes = []
for key in input_keys:
curr_shape = net.input_info[key].tensor_desc.dims
input_shapes.append(curr_shape)
print(key, ": ", curr_shape)
N, C, H, W = input_shapes[0]
def create_mask(image_height, image_width, sizey=30, sizex=30):
"""
create a square mask of defined size on a random location
:param: image_height: height of the image
:param: image_width: width of the image
:param: size: size in pixels of one side
:returns:
mask: monotone(grayscale) mask of size [image_width,image_height,1]
"""
mask = np.zeros((image_height, image_width, 1), dtype=np.float32)
start_x = np.random.randint(image_width - sizex)
start_y = np.random.randint(image_height - sizey)
cv2.rectangle(mask, (start_x, start_y), (start_x+sizex, start_y+sizey), (1, 1, 1), -1)
return mask
# Generate a square mask of size 100px x 100px
mask = create_mask(H, W, 40, 40)
# This mask will be laid over the input image as noise
plt.imshow(cv2.cvtColor(mask, cv2.COLOR_BGR2RGB));
# This cell is not necessary since we gave the correct dimentions when creating the mask.
resized_mask = cv2.resize(mask, (W, H))
resized_mask = np.expand_dims(resized_mask, axis=-1)
# Load Image
image = cv2.imread("data/test_image.png")
# Resize image to meet network expected input sizes
resized_image = cv2.resize(image, (W, H))
plt.imshow(cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB));
# Generating Masked_image
masked_image = (resized_image*(1-mask)+255*mask).astype(np.uint8)
plt.imshow(cv2.cvtColor(masked_image, cv2.COLOR_BGR2RGB));
The model expects the input dimentions to be NCHW.
- masked_image.shape = (512,680,3) -----> model excepts = (1,3,512,680)
- resized_mask.shape = (512,680,1) -----> model excepts = (1,1,512,680)
| 0.761272 | 0.961025 |
# Homework 01: Working with text
Questions 1-6 use the text in the variable `sentences`.
`string` and `re` are imported for yoy. You should not use any ohter imports for this exercise.
```
import string
import re
sentences = """
Eva, Can I Stab Bats In A Cave?
This is not a palindrome.
Madam In Eden, I'm Adam
Neither is this.
Mr. Owl Ate My Metal Worm
Do you think this is a palindrome?
Never Odd Or Even
Ouch! That hurts!!
Doc, Note: I Dissent. A Fast Never Prevents A Fatness. I Diet On Cod.
Pythons eat rats.
"""
```
**1**. (10 points)
- Write a function to identify palindromes. It should ignore spaces, punctuation and capitalization.
```
def identify_palindrome(sen):
clean = ''.join(sen.split(' ')).translate(str.maketrans('','',string.punctuation)).lower().split('\n')
palindrome = []
pos = []
for i, sen in enumerate(clean):
if sen == '':
continue
elif sen == sen[::-1]:
palindrome.append(sen)
pos.append(i)
raw = sen.split('\n')
return palindrome, pos
palindrome, pos = identify_palindrome(sentences)
print('palindrome in the sentences:\n', palindrome, end = '\n\n')
for row, sen in enumerate(sentences.split('\n')):
if row in pos:
print(row, sen)
```
**2**. (10 points)
Convert the given sentences into a list, and find the palindromes using
- a for loop
- a list comprehension
- the `filter` higher order function
```
sen_list = sentences.split('\n')
clean = ''.join(sentences.split(' ')).translate(str.maketrans('','',string.punctuation)).lower().split('\n')
palindrome2 = [sen for sen in clean if sen == sen[::-1]]
palindrome2
```
**3**. (10 points)
Sort the same sentences in decreasing order of the number of vowels they contain.
```
num_vowels = []
for sen in sen_list:
cur_vowels = re.findall(r'[aeiouAEIOU]', sen)
num_vowels.append(len(cur_vowels))
num_vowels
[x for _, x in sorted(zip(num_vowels, sen_list))]
```
**4**. (10 points)
Count the number of times each word occurs in the `sentences` variable. Ignore punctuation and capitalization. Show the top 3 words together with their counts.
```
words_sen = sentences.translate(str.maketrans('','',string.punctuation)).lower()
words = re.split(r'[ \n]', words_sen)
dictionary = dict()
for word in set(words):
if word != '':
dictionary[word] = len(re.findall(word, words_sen))
t = sorted(dictionary, key = lambda x: dictionary[x], reverse = True)
count = 0
for i in t:
print(i, "\t", dictionary[i])
count += 1
if count == 3:
break
```
**5**. (10 points)
Convert the variable sentences into a string with no spaces or punctuation and all in lower case. Now find the most commonly occurring pair of characters (the character pairs in `abcde` are `ab`, `bc`, `cd`, `de`).
```
long_sen = ''.join(clean)
l_sen = long_sen[:-1]
r_sen = long_sen[1:]
char_pairs = dict()
for (b, f) in zip(list(l_sen), list(r_sen)):
temp_char = b + f
if temp_char in char_pairs:
char_pairs[temp_char] += 1
else:
char_pairs[temp_char] = 1
p = sorted(char_pairs, key = lambda x: char_pairs[x], reverse = True)
print(p[0], "\t", char_pairs[p[0]])
```
**6**. (10 points)
- Save the palindromes found in **1** to a file `data/palindromes.txt` with each palindrome in its own line.
- Read the file `data/palindromes.txt` and display the lines formatted so that
- the longest line is printed as is
- all other lines are right-aligned to the longest line
```
palindrome, pos = identify_palindrome(sentences)
with open("./data/palindromes.txt", 'w') as f:
for line in palindrome:
f.write(line)
f.write('\n')
temp_p = []
temp_len = []
with open("./data/palindromes.txt", 'r') as f:
for line in f:
temp_p.append(line.strip())
temp_len.append(len(line.strip()))
max_align = max(temp_len)
for sen in temp_p:
print(sen.rjust(max_align))
```
**7**. (20 points)
- Write a function called `encode` that uses the simple Caesar cipher with offset of $k$. A Caesar cipher moves a character $k$ positions forward with wraparound - an offset of 2 would give `a -> c, b -> d, ..., y -> a, z -> b`.
The encoder should preserve punctuation, spaces and the case of the characters as in the example below for an offset of 13.
```
(original) Why did the chicken cross the road?
(encoded) Jul qvq gur puvpxra pebff gur ebnq?
```
Write a function to encode and decode text using a Caesar cipher with an offset of 5 and test it on the `Why did the chicken cross the road?`
```
def encode(s, k):
ori_l = 'abcdefghijklmnopqrstuvwxyz'
ori_u = ori_l.upper()
ori_l2 = ori_l * 2
ori_u2 = ori_l2.upper()
tar_l = ori_l2[k:(26+k)]
tar_u = ori_u2[k:(26+k)]
return s.translate(str.maketrans(ori_l+ori_u, tar_l+tar_u))
def decode(s, k):
ori_l = 'abcdefghijklmnopqrstuvwxyz'
ori_u = ori_l.upper()
ori_l2 = ori_l * 2
ori_u2 = ori_l2.upper()
tar_l = ori_l2[(26-k):(52-k)]
tar_u = ori_u2[(26-k):(52-k)]
return s.translate(str.maketrans(ori_l+ori_u, tar_l+tar_u))
test = 'Why did the chicken cross the road?'
test_en = encode(test, 5)
test_de = decode(test_en, 5)
print(test)
print(test_en)
print(test_de)
```
**8**. (20 points)
A one-time pad is the same as a Caesar cipher, except that each character is given a random offset (the pad contains a list of offsets). Without the pad, the cipher is unbreakable. Write a function to encode and decode using the one-time pad of offsets provided in `data/pad.txt`. As usual, the encoder should ignore punctuation and spaces but preserve the case of the characters.
```
pad = []
with open("data/pad.txt", 'r') as f:
for line in f:
pad.append(int(line.strip()))
def one_pad_encode(s, pad):
output = ''
for cha, p in zip(s, pad):
char = ord(cha)
if char <= 122 and char >= 97:
temp = chr((char-97+p)%26+97)
elif char <= 90 and char >= 65:
temp = chr((char-65+p)%26+65)
else:
temp = cha
output += temp
return output
def one_pad_decode(s, pad):
output = ''
for cha, p in zip(s, pad):
char = ord(cha)
if char <= 122 and char >= 97:
temp = chr((char-97+26-p)%26+97)
elif char <= 90 and char >= 65:
temp = chr((char-65+26-p)%26+65)
else:
temp = cha
output += temp
return output
test = 'Why did the chicken cross the road?'
test_en = one_pad_encode(test, pad)
test_de = one_pad_decode(test_en, pad)
print(test)
print(test_en)
print(test_de)
```
|
github_jupyter
|
import string
import re
sentences = """
Eva, Can I Stab Bats In A Cave?
This is not a palindrome.
Madam In Eden, I'm Adam
Neither is this.
Mr. Owl Ate My Metal Worm
Do you think this is a palindrome?
Never Odd Or Even
Ouch! That hurts!!
Doc, Note: I Dissent. A Fast Never Prevents A Fatness. I Diet On Cod.
Pythons eat rats.
"""
def identify_palindrome(sen):
clean = ''.join(sen.split(' ')).translate(str.maketrans('','',string.punctuation)).lower().split('\n')
palindrome = []
pos = []
for i, sen in enumerate(clean):
if sen == '':
continue
elif sen == sen[::-1]:
palindrome.append(sen)
pos.append(i)
raw = sen.split('\n')
return palindrome, pos
palindrome, pos = identify_palindrome(sentences)
print('palindrome in the sentences:\n', palindrome, end = '\n\n')
for row, sen in enumerate(sentences.split('\n')):
if row in pos:
print(row, sen)
sen_list = sentences.split('\n')
clean = ''.join(sentences.split(' ')).translate(str.maketrans('','',string.punctuation)).lower().split('\n')
palindrome2 = [sen for sen in clean if sen == sen[::-1]]
palindrome2
num_vowels = []
for sen in sen_list:
cur_vowels = re.findall(r'[aeiouAEIOU]', sen)
num_vowels.append(len(cur_vowels))
num_vowels
[x for _, x in sorted(zip(num_vowels, sen_list))]
words_sen = sentences.translate(str.maketrans('','',string.punctuation)).lower()
words = re.split(r'[ \n]', words_sen)
dictionary = dict()
for word in set(words):
if word != '':
dictionary[word] = len(re.findall(word, words_sen))
t = sorted(dictionary, key = lambda x: dictionary[x], reverse = True)
count = 0
for i in t:
print(i, "\t", dictionary[i])
count += 1
if count == 3:
break
long_sen = ''.join(clean)
l_sen = long_sen[:-1]
r_sen = long_sen[1:]
char_pairs = dict()
for (b, f) in zip(list(l_sen), list(r_sen)):
temp_char = b + f
if temp_char in char_pairs:
char_pairs[temp_char] += 1
else:
char_pairs[temp_char] = 1
p = sorted(char_pairs, key = lambda x: char_pairs[x], reverse = True)
print(p[0], "\t", char_pairs[p[0]])
palindrome, pos = identify_palindrome(sentences)
with open("./data/palindromes.txt", 'w') as f:
for line in palindrome:
f.write(line)
f.write('\n')
temp_p = []
temp_len = []
with open("./data/palindromes.txt", 'r') as f:
for line in f:
temp_p.append(line.strip())
temp_len.append(len(line.strip()))
max_align = max(temp_len)
for sen in temp_p:
print(sen.rjust(max_align))
(original) Why did the chicken cross the road?
(encoded) Jul qvq gur puvpxra pebff gur ebnq?
def encode(s, k):
ori_l = 'abcdefghijklmnopqrstuvwxyz'
ori_u = ori_l.upper()
ori_l2 = ori_l * 2
ori_u2 = ori_l2.upper()
tar_l = ori_l2[k:(26+k)]
tar_u = ori_u2[k:(26+k)]
return s.translate(str.maketrans(ori_l+ori_u, tar_l+tar_u))
def decode(s, k):
ori_l = 'abcdefghijklmnopqrstuvwxyz'
ori_u = ori_l.upper()
ori_l2 = ori_l * 2
ori_u2 = ori_l2.upper()
tar_l = ori_l2[(26-k):(52-k)]
tar_u = ori_u2[(26-k):(52-k)]
return s.translate(str.maketrans(ori_l+ori_u, tar_l+tar_u))
test = 'Why did the chicken cross the road?'
test_en = encode(test, 5)
test_de = decode(test_en, 5)
print(test)
print(test_en)
print(test_de)
pad = []
with open("data/pad.txt", 'r') as f:
for line in f:
pad.append(int(line.strip()))
def one_pad_encode(s, pad):
output = ''
for cha, p in zip(s, pad):
char = ord(cha)
if char <= 122 and char >= 97:
temp = chr((char-97+p)%26+97)
elif char <= 90 and char >= 65:
temp = chr((char-65+p)%26+65)
else:
temp = cha
output += temp
return output
def one_pad_decode(s, pad):
output = ''
for cha, p in zip(s, pad):
char = ord(cha)
if char <= 122 and char >= 97:
temp = chr((char-97+26-p)%26+97)
elif char <= 90 and char >= 65:
temp = chr((char-65+26-p)%26+65)
else:
temp = cha
output += temp
return output
test = 'Why did the chicken cross the road?'
test_en = one_pad_encode(test, pad)
test_de = one_pad_decode(test_en, pad)
print(test)
print(test_en)
print(test_de)
| 0.152442 | 0.857231 |
<a href="https://colab.research.google.com/github/shadab4150/Hindi-News-Language-Model-and-Classification-indic-NLP/blob/master/Hindi_News_Language_Model_and_Classification_indic_NLP_v2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/drive')
!unzip '/content/drive/My Drive/Dataset/hindi2vec.zip' -d '/content/'
from fastai.text import *
import pandas as pd
import gc
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
df_trn = pd.read_csv('/content/hindi2vec/hindi-train.csv', sep="\t", encoding='utf-8', header=None)
df_val = pd.read_csv('/content/hindi2vec/hindi-test.csv', sep="\t", encoding='utf-8', header=None)
df_val.columns=['category','text']
df_trn.columns=['category','text']
df_trn.head()
df_val.head()
train = pd.concat([df_trn[['text']],df_val[['text']]])
train.dropna(inplace=True)
train_df = pd.concat([df_trn[['category','text']],df_val[['category','text']]])
train_df.dropna(inplace=True)
print(train.isna().sum())
train.dropna(inplace=True)
train.head()
bs = 128
path = Path('/content/')
data = (TextList.from_df(train,path=path)
.split_by_rand_pct(0.2, seed=42)
.label_for_lm()
.databunch(bs=bs, num_workers=1))
data.show_batch(rows=6)
data.vocab.itos[-40:-20]
```
## Mixed Precision Training
```
learn = language_model_learner(data, AWD_LSTM, drop_mult=0.5, pretrained=False,metrics=[accuracy,Perplexity()]).to_fp16()
print(learn.model)
lr = 1e-2
lr *= bs/48 # Scaling the learning rate by batch size
learn.unfreeze()
learn.fit_one_cycle(15, lr, moms=(0.8,0.7))
learn.save('H_Model1')
learn.load('H_Model1')
learn.lr_find()
learn.recorder.plot()
gc.collect()
learn.load('H_Model1')
learn.unfreeze()
learn.fit_one_cycle(5, 1e-3)
learn.save('final')
learn.save_encoder('ftenc')
gc.collect()
learn.load('/content/models/final');
```
# Lets try to complete acutual News Headlines

* Actual New headline from **BBC Hindi**.
* **Three** different Takes
```
for i in range(3):
print(learn.predict('เคธเคเคฆเฅ เค
เคฐเคฌ เคเฅ เคเฅเคฒเฅเค เคฎเฅเค เคฌเคเคฆ เคชเคพเคเคฟเคธเฅเคคเคพเคจเฅ เคเคผเฅเคฆเคฟเคฏเฅเค เคเฅ เคเคนเคพเคจเฅ',n_words=50,temperature=0.75))
```

* Actual New headline from **BBC Hindi**.
```
learn.predict('เคฒเคเคฆเคจ: เคฎเคธเฅเคเคฟเคฆ เคฎเฅเค เคเคพเคเฅ เคธเฅ เคนเคฎเคฒเคพ, เคเค เคเคฟเคฐเคซเคผเฅเคคเคพเคฐ',n_words=50,temperature=0.65)
learn.export('hindiNews.pkl')
```
# Hindi News Classifier
```
train_df.head()
data_clas = (TextList.from_df(train_df, path, vocab=data.vocab, cols='text')
.split_by_rand_pct(0.1, seed=42)
.label_from_df(cols='category')
.databunch(bs=bs, num_workers=1))
data_clas.show_batch(rows=6)
```
### News Categories
```
data_clas.classes
```
## Metrics f1_score
* Because there is a class imbalance in the dataset
> average = **macro**
```
f2 = FBeta()
f2.average='macro'
learn_c = text_classifier_learner(data_clas, AWD_LSTM, pretrained=False , drop_mult=0.5, metrics=[accuracy,f2]).to_fp16()
learn_c.load_encoder('ftenc')
learn_c.freeze()
lr=2e-2
lr *= bs/48
learn_c.fit_one_cycle(10, lr, moms=(0.8,0.7))
learn_c.save('clf')
learn_c.load('clf')
learn_c.unfreeze()
learn_c.lr_find()
learn_c.recorder.plot()
learn_c.unfreeze()
learn_c.fit_one_cycle(5, slice(1e-5))
txt_interp = TextClassificationInterpretation.from_learner(learn_c)
txt_interp.show_top_losses(5)
learn_c.save('clf')
learn_c.load('clf')
learn_c.unfreeze()
```
## Training the last few layers
```
learn_c.freeze_to(-2)
learn_c.fit_one_cycle(3, slice(lr/(2.6**4),lr), moms=(0.8,0.7))
learn_c.freeze_to(-3)
learn_c.fit_one_cycle(2, slice(lr/2/(2.6**4),lr/2), moms=(0.8,0.7))
learn_c.unfreeze()
learn_c.fit_one_cycle(3, slice(lr/10/(2.6**4),lr/10), moms=(0.8,0.7))
```
# final f_beta score 0.789
```
```
# Saving fp32 version
```
learn.to_fp32().save('/content/hindi_model/hindiM1', with_opt=False)
learn.data.vocab.save('/content/hindi_model/HindiVocab.pkl')
```
|
github_jupyter
|
from google.colab import drive
drive.mount('/content/drive')
!unzip '/content/drive/My Drive/Dataset/hindi2vec.zip' -d '/content/'
from fastai.text import *
import pandas as pd
import gc
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
df_trn = pd.read_csv('/content/hindi2vec/hindi-train.csv', sep="\t", encoding='utf-8', header=None)
df_val = pd.read_csv('/content/hindi2vec/hindi-test.csv', sep="\t", encoding='utf-8', header=None)
df_val.columns=['category','text']
df_trn.columns=['category','text']
df_trn.head()
df_val.head()
train = pd.concat([df_trn[['text']],df_val[['text']]])
train.dropna(inplace=True)
train_df = pd.concat([df_trn[['category','text']],df_val[['category','text']]])
train_df.dropna(inplace=True)
print(train.isna().sum())
train.dropna(inplace=True)
train.head()
bs = 128
path = Path('/content/')
data = (TextList.from_df(train,path=path)
.split_by_rand_pct(0.2, seed=42)
.label_for_lm()
.databunch(bs=bs, num_workers=1))
data.show_batch(rows=6)
data.vocab.itos[-40:-20]
learn = language_model_learner(data, AWD_LSTM, drop_mult=0.5, pretrained=False,metrics=[accuracy,Perplexity()]).to_fp16()
print(learn.model)
lr = 1e-2
lr *= bs/48 # Scaling the learning rate by batch size
learn.unfreeze()
learn.fit_one_cycle(15, lr, moms=(0.8,0.7))
learn.save('H_Model1')
learn.load('H_Model1')
learn.lr_find()
learn.recorder.plot()
gc.collect()
learn.load('H_Model1')
learn.unfreeze()
learn.fit_one_cycle(5, 1e-3)
learn.save('final')
learn.save_encoder('ftenc')
gc.collect()
learn.load('/content/models/final');
for i in range(3):
print(learn.predict('เคธเคเคฆเฅ เค
เคฐเคฌ เคเฅ เคเฅเคฒเฅเค เคฎเฅเค เคฌเคเคฆ เคชเคพเคเคฟเคธเฅเคคเคพเคจเฅ เคเคผเฅเคฆเคฟเคฏเฅเค เคเฅ เคเคนเคพเคจเฅ',n_words=50,temperature=0.75))
learn.predict('เคฒเคเคฆเคจ: เคฎเคธเฅเคเคฟเคฆ เคฎเฅเค เคเคพเคเฅ เคธเฅ เคนเคฎเคฒเคพ, เคเค เคเคฟเคฐเคซเคผเฅเคคเคพเคฐ',n_words=50,temperature=0.65)
learn.export('hindiNews.pkl')
train_df.head()
data_clas = (TextList.from_df(train_df, path, vocab=data.vocab, cols='text')
.split_by_rand_pct(0.1, seed=42)
.label_from_df(cols='category')
.databunch(bs=bs, num_workers=1))
data_clas.show_batch(rows=6)
data_clas.classes
f2 = FBeta()
f2.average='macro'
learn_c = text_classifier_learner(data_clas, AWD_LSTM, pretrained=False , drop_mult=0.5, metrics=[accuracy,f2]).to_fp16()
learn_c.load_encoder('ftenc')
learn_c.freeze()
lr=2e-2
lr *= bs/48
learn_c.fit_one_cycle(10, lr, moms=(0.8,0.7))
learn_c.save('clf')
learn_c.load('clf')
learn_c.unfreeze()
learn_c.lr_find()
learn_c.recorder.plot()
learn_c.unfreeze()
learn_c.fit_one_cycle(5, slice(1e-5))
txt_interp = TextClassificationInterpretation.from_learner(learn_c)
txt_interp.show_top_losses(5)
learn_c.save('clf')
learn_c.load('clf')
learn_c.unfreeze()
learn_c.freeze_to(-2)
learn_c.fit_one_cycle(3, slice(lr/(2.6**4),lr), moms=(0.8,0.7))
learn_c.freeze_to(-3)
learn_c.fit_one_cycle(2, slice(lr/2/(2.6**4),lr/2), moms=(0.8,0.7))
learn_c.unfreeze()
learn_c.fit_one_cycle(3, slice(lr/10/(2.6**4),lr/10), moms=(0.8,0.7))
```
# Saving fp32 version
| 0.510741 | 0.698522 |
```
r = range(1000) # sequnce
g = (x for x in r) # generator
l = list(r) # sequence
i = iter(l) # iterator
```
Compare the size of sequence, generator and iterator
```
import sys
print('range:', sys.getsizeof(r))
print('iter :', sys.getsizeof(i))
print('gen :', sys.getsizeof(g))
print('seq :', sys.getsizeof(l))
```
Compare the attributes/methods of sequence, generator and iterator
```
def attrs(obj):
return ', '.join(a for a in ['__iter__', '__len__', 'send'] if hasattr(obj, a))
print('range:', attrs(r))
print('iter :', attrs(i))
print('gen :', attrs(g))
print('seq :', attrs(l))
# simple example for yield from and multiple yields
def gen():
yield from [1 , 2, 3]
yield from [4, 5]
list(gen())
# a grep-like function. Here the eager implementation.
def find(query, pat='*.*'):
from glob import glob
hits = []
for filepath in glob(pat):
with open(filepath) as lines:
for line in lines:
for word in line.split():
if query in word.lower():
hits.append(word)
return hits
find('generator', '*.md')
```
```
def filepaths(pat='*.*'):
from glob import glob
return glob(pat)
def lines(filepaths):
for filepath in filepaths:
with open(filepath) as lines:
yield from lines
def words(lines):
for line in lines:
yield from line.split()
def find(query, pat='*.*'):
ws = words(lines(filepaths(pat)))
return (w for w in ws if query in w)
list(find('generator', '*.md'))
```
The same code with nutsflow
```
from nutsflow import *
from glob import glob
@nut_processor
def lines(filepaths):
for filepath in filepaths:
yield from open(filepath)
@nut_processor
def words(lines):
for line in lines:
yield from line.split()
def find(query, pat='*.*'):
return glob('*.md') >> lines() >> words() >> Filter(lambda w: query in w) >> Collect()
find('generator', '*.md')
nums = [-2, -1, 0, 3, 4]
def absolutes(nums):
return (abs(n) for n in nums)
absolutes(nums)
def absolutes(nums):
for n in nums:
yield abs(n)
absolutes(nums)
class absolutes:
def __init__(self, nums):
self.nums = iter(nums)
def __iter__(self):
return self
def __next__(self):
while not (n:=next(self.nums)):
pass
return abs(n)
list(absolutes([-1, -2, 0, 3, 4]))
def flatten(xs):
if isinstance(xs, list):
for x in xs:
yield from flatten(x)
else:
yield xs
# flatten without "yield from"
def flatten2(xs):
if isinstance(xs, list):
for x in xs:
for r in flatten(x):
yield r
else:
yield xs
def take(iterable, n):
return [next(iterable) for _ in range(n)]
f = flatten([1, [2, 3, [4]], 5])
list(f)
# using list comprehension
def flatten(xs):
if isinstance(xs, list):
return [y for x in xs for y in flatten(x)]
return [xs]
flatten([1, [2, 3, [4]], 5])
```
|
github_jupyter
|
r = range(1000) # sequnce
g = (x for x in r) # generator
l = list(r) # sequence
i = iter(l) # iterator
import sys
print('range:', sys.getsizeof(r))
print('iter :', sys.getsizeof(i))
print('gen :', sys.getsizeof(g))
print('seq :', sys.getsizeof(l))
def attrs(obj):
return ', '.join(a for a in ['__iter__', '__len__', 'send'] if hasattr(obj, a))
print('range:', attrs(r))
print('iter :', attrs(i))
print('gen :', attrs(g))
print('seq :', attrs(l))
# simple example for yield from and multiple yields
def gen():
yield from [1 , 2, 3]
yield from [4, 5]
list(gen())
# a grep-like function. Here the eager implementation.
def find(query, pat='*.*'):
from glob import glob
hits = []
for filepath in glob(pat):
with open(filepath) as lines:
for line in lines:
for word in line.split():
if query in word.lower():
hits.append(word)
return hits
find('generator', '*.md')
def filepaths(pat='*.*'):
from glob import glob
return glob(pat)
def lines(filepaths):
for filepath in filepaths:
with open(filepath) as lines:
yield from lines
def words(lines):
for line in lines:
yield from line.split()
def find(query, pat='*.*'):
ws = words(lines(filepaths(pat)))
return (w for w in ws if query in w)
list(find('generator', '*.md'))
from nutsflow import *
from glob import glob
@nut_processor
def lines(filepaths):
for filepath in filepaths:
yield from open(filepath)
@nut_processor
def words(lines):
for line in lines:
yield from line.split()
def find(query, pat='*.*'):
return glob('*.md') >> lines() >> words() >> Filter(lambda w: query in w) >> Collect()
find('generator', '*.md')
nums = [-2, -1, 0, 3, 4]
def absolutes(nums):
return (abs(n) for n in nums)
absolutes(nums)
def absolutes(nums):
for n in nums:
yield abs(n)
absolutes(nums)
class absolutes:
def __init__(self, nums):
self.nums = iter(nums)
def __iter__(self):
return self
def __next__(self):
while not (n:=next(self.nums)):
pass
return abs(n)
list(absolutes([-1, -2, 0, 3, 4]))
def flatten(xs):
if isinstance(xs, list):
for x in xs:
yield from flatten(x)
else:
yield xs
# flatten without "yield from"
def flatten2(xs):
if isinstance(xs, list):
for x in xs:
for r in flatten(x):
yield r
else:
yield xs
def take(iterable, n):
return [next(iterable) for _ in range(n)]
f = flatten([1, [2, 3, [4]], 5])
list(f)
# using list comprehension
def flatten(xs):
if isinstance(xs, list):
return [y for x in xs for y in flatten(x)]
return [xs]
flatten([1, [2, 3, [4]], 5])
| 0.276984 | 0.74849 |
# Read datafiles, merge, and lightly clean
```
import os
import json
import datetime
from pprint import pprint
from copy import deepcopy
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
```
#### Configuration
```
ROOT = '..'
datafile_date = '2020-04-10-v7'
PROCESS_SMALL_DOCS = False # True # Small docs are the individual paragraphs in the text
json_subdir = 'pdf_json/' # may want pmc (xml) for small docs (set to '' for dataversion <= 5)
SOURCE_FILES = {
'COMM-USE': ROOT + f'/data/raw/{datafile_date}/comm_use_subset/{json_subdir}',
'BioRxiv': ROOT + f'/data/raw/{datafile_date}/biorxiv_medrxiv/{json_subdir}',
'NONCOMM': ROOT + f'/data/raw/{datafile_date}/noncomm_use_subset/{json_subdir}',
'PMC': ROOT + f'/data/raw/{datafile_date}/custom_license/{json_subdir}',
}
metadata_file = ROOT + f'/data/raw/{datafile_date}/metadata.csv'
outdir = ROOT + f'/data/interim/{datafile_date}/'
outfile = f'{outdir}{datafile_date}-covid19-combined.jsonl'
outfile_small_docs = f'{outdir}{datafile_date}-covid19-combined-smalldocs.jsonl'
outfile_only_abstracts = f'{outdir}{datafile_date}-covid19-combined-only-abstracts.jsonl'
outfile_abstracts = f'{outdir}{datafile_date}-covid19-combined-abstracts.jsonl'
json_args = {'orient': 'records', 'lines': True}
out_json_args = {'date_format': 'iso', **json_args}
out_path_mode = 0o777
os.makedirs(outdir, mode = out_path_mode, exist_ok = True)
os.makedirs(outdir_am, mode = out_path_mode, exist_ok = True)
```
## Helper Functions
Some functions taken and modified from https://www.kaggle.com/xhlulu/cord-19-eda-parse-json-and-generate-clean-csv
```
def format_name(author):
middle_name = " ".join(author['middle'])
if author['middle']:
return " ".join([author['first'], middle_name, author['last']])
else:
return " ".join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ", ".join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f"{name} ({affiliation})")
else:
name_ls.append(name)
else:
name_ls.append(name)
return ", ".join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: "" for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ""
for section, text in texts_di.items():
body += section
body += "\n\n"
body += text
body += "\n\n"
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(
bib['authors'],
with_affiliation=False
)
doi = None
pmid = None
other_ids = bib.get('other_ids')
if other_ids:
doi = other_ids.get('DOI')
pmid = other_ids.get('PMID')
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
if doi:
formatted_ls.extend(doi)
if pmid:
formatted_ls.extend(['PMID' + p for p in pmid])
formatted.append(", ".join(formatted_ls))
return "\n ".join(formatted)
def bib_titles(bibs):
result = {}
for key, bib in bibs.items():
result[key] = bib['title']
return result
def extract_small_docs(main_doc_id, body_text, bib_titles_dict):
result = []
for i, di in enumerate(body_text):
ref_titles = []
for ref in di['cite_spans']:
title = bib_titles_dict.get(ref['ref_id'])
if title:
ref_titles.append(title)
result.append((main_doc_id, i, di['text'], di['section'], ref_titles))
return result
def load_files(dirname):
filenames = os.listdir(dirname)
raw_files = []
for filename in tqdm(filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return raw_files
def generate_clean_df(all_files, prepare_small_docs=False):
cleaned_files = []
small_docs = []
for file in tqdm(all_files):
if prepare_small_docs:
bib_titles_dict = bib_titles(file['bib_entries'])
docs = extract_small_docs(file['paper_id'], file['body_text'], bib_titles_dict)
else:
docs = []
features = [
file['paper_id'],
file['metadata']['title'],
format_authors(file['metadata']['authors']),
format_authors(file['metadata']['authors'],
with_affiliation=True),
format_body(file['abstract']),
format_body(file['body_text']),
format_bib(file['bib_entries']),
file['metadata']['authors'],
file['bib_entries'],
len(docs)
]
cleaned_files.append(features)
if prepare_small_docs:
small_docs.extend(docs)
col_names = ['paper_id', 'title', 'authors',
'affiliations', 'abstract', 'text',
'bibliography','raw_authors','raw_bibliography',
'num_small_docs']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
clean_df.head()
if prepare_small_docs:
small_docs_df = pd.DataFrame(small_docs,
columns=['paper_id', 'small_doc_num', 'text', 'section', 'ref_titles'])
return clean_df, small_docs_df
else:
return clean_df
```
## Load Data
### Load Metadata
```
metadata_df = pd.read_csv(metadata_file)
metadata_df.head()
```
### Clean Metadata
```
metadata_df[metadata_df['cord_uid'].duplicated(keep=False)].sort_values('cord_uid').head(10)
metadata_df['publish_year'] = metadata_df['publish_time'].astype(str).apply(lambda d:
d[:4] if d[0] in ('1', '2') else
'19xx' if d == 'nan' else
# d[2:6] if d.startswith("'[") else
'')
metadata_df['publish_year'].unique()
```
### Load Data Files
```
dfd = {}
small_docs = {}
for name, indir in SOURCE_FILES.items():
print(f'Loading {name} from {indir}')
data_files = load_files(indir)
print(f"Cleaning {name} {len(data_files)} files" )
if PROCESS_SMALL_DOCS:
dfd[name], small_docs[name] = generate_clean_df(data_files, prepare_small_docs=True)
else:
dfd[name] = generate_clean_df(data_files)
dfd['COMM-USE'].head()
```
### Combine data from text files
```
for name, df in dfd.items():
df['dataset'] = name
df_combined = pd.concat(dfd.values(), ignore_index=True, sort=False)
df_combined.head()
if PROCESS_SMALL_DOCS:
for name, df in small_docs.items():
df['dataset'] = name
df_combined_small_docs = pd.concat(small_docs.values(), ignore_index=True, sort=False)
print(df_combined_small_docs.shape)
if PROCESS_SMALL_DOCS:
print(df_combined_small_docs.columns)
```
### Join Metadata and Data Files
```
df = metadata_df.copy()
df_joined = df.join(df_combined.set_index('paper_id'), how='left', on='sha', rsuffix='_ft')
df_joined.head()
df_joined_ft = df_joined[~ df_joined['sha'].isnull()].copy()
df_joined_ft.shape
```
### Clean abstract
```
df_joined_ft['abstract_clean'] = df_joined_ft['abstract'].fillna('')
df_joined_ft['abstract_clean'] = df_joined_ft['abstract_clean'].apply(lambda x: x[9:] if x.lower().startswith('abstract') else x)
import re
mentions_covid = re.compile('COVID-19|SARS-CoV-2|2019-nCov|SARS Coronavirus 2|2019 Novel Coronavirus',
re.IGNORECASE)
df_joined_ft['abstract_mentions_covid'] = df_joined_ft['abstract_clean'].str.contains(mentions_covid)
df_joined_ft['abstract_mentions_covid'].sum()
```
### Create citation ref
```
def first_author_lastname(metadata_author_list):
if pd.isnull(metadata_author_list):
return 'UNKNOWN'
alist = metadata_author_list.split(';')
if len(alist) == 1 and alist[0].count(',') > 1:
# check if commas were used as delimiters
alist = alist[0].split(',')
first_author = alist[0]
if ',' in first_author:
split_char = ','
else:
split_char = ' '
first_author_split = first_author.split(split_char)
if split_char == ',':
if len(first_author_split[0]) <=3:
# short last name, use last name and first letter of first name
lastname = first_author_split[0].strip() + '_' + first_author_split[1].strip()[0]
else:
lastname = first_author_split[0].strip()
first_author_split = lastname.split(' ')
if len(first_author_split) > 3 and len([x for x in first_author_split if len(x) > 3]) > 4:
# probably a group name instead of a person's name
lastname = first_author_split[0].strip()
else:
if len(first_author_split) > 3 and len([x for x in first_author_split if len(x) > 2]) > 3:
# probably a group name instead of a person's name
lastname = first_author_split[0].strip()
elif len(first_author_split[-1]) <=3:
# short last name, use last name and first letter of first name
lastname = first_author_split[-1].strip() + '_' + first_author_split[0].strip()[0]
else:
lastname = first_author_split[-1].strip()
if ' ' in lastname:
lastname_split = lastname.split(' ')
if '.' in lastname_split[0] or '(' in lastname_split[0]:
# previously missed I. Last
lastname_split = lastname_split[1:]
elif '.' in lastname_split[1]:
# somehow missed first i. last
lastname_split = lastname_split[2:]
lastname = '_'.join(lastname_split)
return lastname
df_joined_ft['cite_ad'] = df_joined_ft['authors'].apply(first_author_lastname) + '_' + df_joined_ft['publish_year']
```
### Write data
```
df_joined_ft.columns
df_joined_ft.to_json(outfile, **out_json_args)
print(outfile)
if PROCESS_SMALL_DOCS:
df_combined_small_docs.to_json(outfile_small_docs, **out_json_args)
print(outfile_small_docs)
df_joined_ft.head()
df_joined_ft.loc[:, ['cord_uid', 'sha', 'abstract_clean', 'abstract_mentions_covid',
'cite_ad', 'title', 'authors', 'publish_year', 'publish_time', 'dataset',
'pmcid', 'pubmed_id', 'doi'
]].to_json(outfile_abstracts, **out_json_args)
print(outfile_abstracts)
df_joined_ft.loc[:, ['cord_uid', 'sha', 'abstract_clean']].to_json(outfile_only_abstracts, **out_json_args)
print(outfile_only_abstracts)
```
|
github_jupyter
|
import os
import json
import datetime
from pprint import pprint
from copy import deepcopy
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
ROOT = '..'
datafile_date = '2020-04-10-v7'
PROCESS_SMALL_DOCS = False # True # Small docs are the individual paragraphs in the text
json_subdir = 'pdf_json/' # may want pmc (xml) for small docs (set to '' for dataversion <= 5)
SOURCE_FILES = {
'COMM-USE': ROOT + f'/data/raw/{datafile_date}/comm_use_subset/{json_subdir}',
'BioRxiv': ROOT + f'/data/raw/{datafile_date}/biorxiv_medrxiv/{json_subdir}',
'NONCOMM': ROOT + f'/data/raw/{datafile_date}/noncomm_use_subset/{json_subdir}',
'PMC': ROOT + f'/data/raw/{datafile_date}/custom_license/{json_subdir}',
}
metadata_file = ROOT + f'/data/raw/{datafile_date}/metadata.csv'
outdir = ROOT + f'/data/interim/{datafile_date}/'
outfile = f'{outdir}{datafile_date}-covid19-combined.jsonl'
outfile_small_docs = f'{outdir}{datafile_date}-covid19-combined-smalldocs.jsonl'
outfile_only_abstracts = f'{outdir}{datafile_date}-covid19-combined-only-abstracts.jsonl'
outfile_abstracts = f'{outdir}{datafile_date}-covid19-combined-abstracts.jsonl'
json_args = {'orient': 'records', 'lines': True}
out_json_args = {'date_format': 'iso', **json_args}
out_path_mode = 0o777
os.makedirs(outdir, mode = out_path_mode, exist_ok = True)
os.makedirs(outdir_am, mode = out_path_mode, exist_ok = True)
def format_name(author):
middle_name = " ".join(author['middle'])
if author['middle']:
return " ".join([author['first'], middle_name, author['last']])
else:
return " ".join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ", ".join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f"{name} ({affiliation})")
else:
name_ls.append(name)
else:
name_ls.append(name)
return ", ".join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: "" for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ""
for section, text in texts_di.items():
body += section
body += "\n\n"
body += text
body += "\n\n"
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(
bib['authors'],
with_affiliation=False
)
doi = None
pmid = None
other_ids = bib.get('other_ids')
if other_ids:
doi = other_ids.get('DOI')
pmid = other_ids.get('PMID')
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
if doi:
formatted_ls.extend(doi)
if pmid:
formatted_ls.extend(['PMID' + p for p in pmid])
formatted.append(", ".join(formatted_ls))
return "\n ".join(formatted)
def bib_titles(bibs):
result = {}
for key, bib in bibs.items():
result[key] = bib['title']
return result
def extract_small_docs(main_doc_id, body_text, bib_titles_dict):
result = []
for i, di in enumerate(body_text):
ref_titles = []
for ref in di['cite_spans']:
title = bib_titles_dict.get(ref['ref_id'])
if title:
ref_titles.append(title)
result.append((main_doc_id, i, di['text'], di['section'], ref_titles))
return result
def load_files(dirname):
filenames = os.listdir(dirname)
raw_files = []
for filename in tqdm(filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return raw_files
def generate_clean_df(all_files, prepare_small_docs=False):
cleaned_files = []
small_docs = []
for file in tqdm(all_files):
if prepare_small_docs:
bib_titles_dict = bib_titles(file['bib_entries'])
docs = extract_small_docs(file['paper_id'], file['body_text'], bib_titles_dict)
else:
docs = []
features = [
file['paper_id'],
file['metadata']['title'],
format_authors(file['metadata']['authors']),
format_authors(file['metadata']['authors'],
with_affiliation=True),
format_body(file['abstract']),
format_body(file['body_text']),
format_bib(file['bib_entries']),
file['metadata']['authors'],
file['bib_entries'],
len(docs)
]
cleaned_files.append(features)
if prepare_small_docs:
small_docs.extend(docs)
col_names = ['paper_id', 'title', 'authors',
'affiliations', 'abstract', 'text',
'bibliography','raw_authors','raw_bibliography',
'num_small_docs']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
clean_df.head()
if prepare_small_docs:
small_docs_df = pd.DataFrame(small_docs,
columns=['paper_id', 'small_doc_num', 'text', 'section', 'ref_titles'])
return clean_df, small_docs_df
else:
return clean_df
metadata_df = pd.read_csv(metadata_file)
metadata_df.head()
metadata_df[metadata_df['cord_uid'].duplicated(keep=False)].sort_values('cord_uid').head(10)
metadata_df['publish_year'] = metadata_df['publish_time'].astype(str).apply(lambda d:
d[:4] if d[0] in ('1', '2') else
'19xx' if d == 'nan' else
# d[2:6] if d.startswith("'[") else
'')
metadata_df['publish_year'].unique()
dfd = {}
small_docs = {}
for name, indir in SOURCE_FILES.items():
print(f'Loading {name} from {indir}')
data_files = load_files(indir)
print(f"Cleaning {name} {len(data_files)} files" )
if PROCESS_SMALL_DOCS:
dfd[name], small_docs[name] = generate_clean_df(data_files, prepare_small_docs=True)
else:
dfd[name] = generate_clean_df(data_files)
dfd['COMM-USE'].head()
for name, df in dfd.items():
df['dataset'] = name
df_combined = pd.concat(dfd.values(), ignore_index=True, sort=False)
df_combined.head()
if PROCESS_SMALL_DOCS:
for name, df in small_docs.items():
df['dataset'] = name
df_combined_small_docs = pd.concat(small_docs.values(), ignore_index=True, sort=False)
print(df_combined_small_docs.shape)
if PROCESS_SMALL_DOCS:
print(df_combined_small_docs.columns)
df = metadata_df.copy()
df_joined = df.join(df_combined.set_index('paper_id'), how='left', on='sha', rsuffix='_ft')
df_joined.head()
df_joined_ft = df_joined[~ df_joined['sha'].isnull()].copy()
df_joined_ft.shape
df_joined_ft['abstract_clean'] = df_joined_ft['abstract'].fillna('')
df_joined_ft['abstract_clean'] = df_joined_ft['abstract_clean'].apply(lambda x: x[9:] if x.lower().startswith('abstract') else x)
import re
mentions_covid = re.compile('COVID-19|SARS-CoV-2|2019-nCov|SARS Coronavirus 2|2019 Novel Coronavirus',
re.IGNORECASE)
df_joined_ft['abstract_mentions_covid'] = df_joined_ft['abstract_clean'].str.contains(mentions_covid)
df_joined_ft['abstract_mentions_covid'].sum()
def first_author_lastname(metadata_author_list):
if pd.isnull(metadata_author_list):
return 'UNKNOWN'
alist = metadata_author_list.split(';')
if len(alist) == 1 and alist[0].count(',') > 1:
# check if commas were used as delimiters
alist = alist[0].split(',')
first_author = alist[0]
if ',' in first_author:
split_char = ','
else:
split_char = ' '
first_author_split = first_author.split(split_char)
if split_char == ',':
if len(first_author_split[0]) <=3:
# short last name, use last name and first letter of first name
lastname = first_author_split[0].strip() + '_' + first_author_split[1].strip()[0]
else:
lastname = first_author_split[0].strip()
first_author_split = lastname.split(' ')
if len(first_author_split) > 3 and len([x for x in first_author_split if len(x) > 3]) > 4:
# probably a group name instead of a person's name
lastname = first_author_split[0].strip()
else:
if len(first_author_split) > 3 and len([x for x in first_author_split if len(x) > 2]) > 3:
# probably a group name instead of a person's name
lastname = first_author_split[0].strip()
elif len(first_author_split[-1]) <=3:
# short last name, use last name and first letter of first name
lastname = first_author_split[-1].strip() + '_' + first_author_split[0].strip()[0]
else:
lastname = first_author_split[-1].strip()
if ' ' in lastname:
lastname_split = lastname.split(' ')
if '.' in lastname_split[0] or '(' in lastname_split[0]:
# previously missed I. Last
lastname_split = lastname_split[1:]
elif '.' in lastname_split[1]:
# somehow missed first i. last
lastname_split = lastname_split[2:]
lastname = '_'.join(lastname_split)
return lastname
df_joined_ft['cite_ad'] = df_joined_ft['authors'].apply(first_author_lastname) + '_' + df_joined_ft['publish_year']
df_joined_ft.columns
df_joined_ft.to_json(outfile, **out_json_args)
print(outfile)
if PROCESS_SMALL_DOCS:
df_combined_small_docs.to_json(outfile_small_docs, **out_json_args)
print(outfile_small_docs)
df_joined_ft.head()
df_joined_ft.loc[:, ['cord_uid', 'sha', 'abstract_clean', 'abstract_mentions_covid',
'cite_ad', 'title', 'authors', 'publish_year', 'publish_time', 'dataset',
'pmcid', 'pubmed_id', 'doi'
]].to_json(outfile_abstracts, **out_json_args)
print(outfile_abstracts)
df_joined_ft.loc[:, ['cord_uid', 'sha', 'abstract_clean']].to_json(outfile_only_abstracts, **out_json_args)
print(outfile_only_abstracts)
| 0.161287 | 0.377369 |
# 1000 Genomes analysis on BigQuery
Authors: Prathima Vembu <br>
Notebook kernel: R (https://irkernel.github.io/installation/)<br>
Recommended Google Cloud Platform machine type: n1-standard-1 <br>
Predicted runtime: ~8 minutes <br>
Predicted cost for analysis <br>
1. Variant optimized table : 83.40 USD per one month <br>
2. Sample optimized table : 0.00 USD per one month first
*Note: 1 TB of query data processed per month is free* <br>
This notebook includes methods for reproducing several figures in the 1000 Genomes phase 3 publication (https://doi.org/10.1038/nature15393)<br>
#### Variant optimized vs sample optimized tables
|Feature|Variant optimized|Sample optimized|
|---|---|---|
|User specified |No, generated by default|Yes, use ```--sample_lookup_optimized_output_table``` parameter|
|Number of tables created |25 (23 chromosome, sample_info, residual_info)|25 (23 chromosome, sample_info, residual_info)|
|Question answered|Variant lookup query - Find all variants in a given genomic region|Sample lookup query - Find all variants in Sample 'A'|
|Storage cost|No additional storage cost|Additional storage cost (for 25 tables), recovered when running queries for sample lookup analysis|
|Call field|Call column not flattened/unested |Call column flattened/unested|
|New field|No new column added|New column sample_id added which has the same value as ```call.sample_id``` column|
|1000 Genomes table size|1000_genomes_phase_3_variants_20150220 - 5.38 TB|1000_genomes_phase_3_optimized_schema_variants_20150220 - 1.94 TB|
#### Table location
Variant optimized table : ```bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220``` <br>
Sample optimized table : ```bigquery-public-data.human_genome_variants.1000_genomes_phase_3_optimized_schema_variants_20150220```
#### Query cost comparison
| Analysis | Variant table(1000Genomes variant table)| Data processed | Sample table(1000Genomes sample optimized table) | Data processed |
|---|---|---|---|---|
|Distinct sample_idโs|3.70 USD |1.74 TB|0.00 USD|0.092 TB|
|Number of mapped and missing IDโs|3.70 USD|1.74 TB|0.00 USD|0.092 TB|
|Number of variant sites per sample| 19.00 USD|4.8 TB|0.00 USD|0.255 TB|
|Average singletons|19.00 USD|4.8 TB|0.00 USD|0.256 TB|
|Average singletons and rare variants |19.00 USD|4.8 TB|0.00 USD|0.256 TB|
|Distribution of Allele Frequency|0.00 USD|0.001 TB|0.00 USD|0.001 TB|
|Z-score computation|19.00 USD|4.8 TB|0.00 USD|0.255 TB|
|Total|83.4 USD|22.68 TB|0.00 USD|1.207 TB|
Before starting the analysis install and load the below packages
```
#helps connecting R to DBMS
install.packages("DBI")
#helps to work with data on cloud storage
install.packages("bigrquery")
#package to perform multiple data manipulation functions
install.packages("pillar")
#package to restructure and aggregate dataframes
install.packages("reshape2")
#package for creating graphics
install.packages("ggplot2")
library(DBI)
library(bigrquery)
library(pillar)
library(reshape2)
library(ggplot2)
```
### Authenticate and connect to BigQuery
Connect to the Google BigQuery database account using ```dbconnect```. Replace ```BILLING_ACCOUNT``` with the name of the Google Cloud project you want charges to be billed to.
```
#dbconnect creates a connection to the DBMS
gcp_account<-dbConnect(bigquery(),"bigquery-public-data",
dataset="human_genome_variants",
billing = "gbsc-gcp-project-mvp-dev")
```
### Introductory analysis
Check to confirm the number of samples present in the BigQuery table (from the paper, n=2504). <br>
Two tables are available for the 1000 Genomes project which can be found under *bigquery-public-data.human_genome_variants* <br>
1. 1000_genomes_phase_3_variants_20150220 : The Variant table<br>
2. 1000_genomes_sample_info : The Sample table
```
#Number of samples in the Variant table - queries 1.74 TB
variant_sample_count_sql<-"SELECT
COUNT(distinct call.name) AS Sample_count
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220`v, UNNEST(v.call) AS call"
variant_sample_count<-dbGetQuery(gcp_account,variant_sample_count_sql)
variant_sample_count
#Optimized table - queries 94.27 GB data
variant_optimized_sample_count_sql<-"SELECT
COUNT(distinct call.name) AS Sample_count
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_optimized_schema_variants_20150220`v, UNNEST(v.call) AS call"
variant_optimized_sample_count<-dbGetQuery(gcp_account,variant_optimized_sample_count_sql)
variant_optimized_sample_count
#Number of samples in the Sample table
sample_table_count_sql<-"SELECT
COUNT(Sample) AS Sample_count
FROM `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`"
sample_table_count<-dbGetQuery(gcp_account, sample_table_count_sql)
sample_table_count
#Compute the number of mapped ID's and the number of missing ID's - queries 1.74Tb
mapped_missing_values_sql<-"WITH cte1 AS (
SELECT DISTINCT call.name AS call_name
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220`v,UNNEST(v.call) AS call
), cte2 AS (SELECT distinct Sample AS sample_name
FROM `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`
),cte3 AS(SELECT call_name, sample_name
FROM cte1
RIGHT JOIN cte2
ON cte1.call_name=cte2.sample_name
)
SELECT SUM(CASE WHEN call_name IS NULL THEN 1 ELSE 0 END) AS missing_values,
SUM(CASE WHEN call_name IS NOT NULL THEN 1 ELSE 0 END) AS mapped_values FROM cte3"
mapped_missing_values<-dbGetQuery(gcp_account, mapped_missing_values_sql)
mapped_missing_values
```
Given that both these tables have different number of distinct sample ID's, it is important to make sure that the list of sample ID's in the variant table is a subset of the ID's from the sample table to prevent data loss. The query below computes the number of mapped ID's and number of missing ID's
```
#OPTIMIZED TABLE - queries 94.27 GB
mapped_optimized_missing_values_sql<-"WITH cte1 AS (
SELECT DISTINCT call.name AS call_name
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_optimized_schema_variants_20150220`v,UNNEST(v.call) AS call
), cte2 AS (SELECT distinct Sample AS sample_name
FROM `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`
),cte3 AS(SELECT call_name, sample_name
FROM cte1
RIGHT JOIN cte2
ON cte1.call_name=cte2.sample_name
)
SELECT SUM(CASE WHEN call_name IS NULL THEN 1 ELSE 0 END) AS missing_values,
SUM(CASE WHEN call_name IS NOT NULL THEN 1 ELSE 0 END) AS mapped_values FROM cte3"
mapped_optimized_missing_values<-dbGetQuery(gcp_account, mapped_optimized_missing_values_sql)
mapped_optimized_missing_values
```
#### 1.1: Number of variant sites per genome
Refer: [Figure 1b](https://www.nature.com/articles/nature15393/figures/1)
```
#Compute the number of variant sites per genome - queries 4.8 TB
variant_sites_per_genome_sql<-"SELECT s.Population,s.Sample,COUNT(reference_name) as variant_count,
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220`v, UNNEST(v.call) AS call
INNER JOIN `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`s
ON call.name=s.Sample
WHERE EXISTS (SELECT gt FROM UNNEST(call.genotype) gt WHERE gt > 0 AND reference_name NOT IN ('X','Y','MT'))
GROUP BY 1,2 ORDER BY variant_count DESC"
variant_sites_per_genome<-dbGetQuery(gcp_account, variant_sites_per_genome_sql)
head(variant_sites_per_genome)
#OPTIMIZED TABLE - queries 261.17 GB
variant_sites_per_genome_optimized_sql<-"SELECT s.Population,s.Sample,COUNT(reference_name) as variant_count,
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_optimized_schema_variants_20150220`v, UNNEST(v.call) AS call
INNER JOIN `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`s
ON call.name=s.Sample
WHERE EXISTS (SELECT gt FROM UNNEST(call.genotype) gt WHERE gt > 0 AND reference_name NOT IN ('X','Y','MT'))
GROUP BY 1,2 ORDER BY variant_count DESC"
variant_sites_per_optimized_genome<-dbGetQuery(gcp_account, variant_sites_per_genome_optimized_sql)
head(variant_sites_per_optimized_genome)
#Set the order of the labels
variant_sites_per_genome$Population<-factor(variant_sites_per_genome$Population,
levels = unique(variant_sites_per_genome$Population))
#Make plots wider on jupyter notebooks
options(repr.plot.width=15, repr.plot.height=8)
#Generate the plot
ggplot(variant_sites_per_genome, aes(y=variant_count,x=Population, color=Population))+
geom_point()+theme(axis.texet.x = element_blank(),axis.ticks.x = element_blank())+
scale_y_continuous(breaks = c(3800000,4000000,4200000,4400000,4600000,4800000,5000000,5200000,5400000,5600000),
minor_breaks = NULL, labels = function(l) {paste0(round(l/1e6,1),"M")})+
labs(title = "Variant sites per genome across populations",x = "Individual",y = "Variant sites")+
theme(plot.title = element_text(size=22),axis.text=element_text(size=14),axis.title=element_text(size=18))
```
#### 1.2: The average number of singletons per genome
Refer: [Figure 1c](https://www.nature.com/articles/nature15393/figures/1)
```
#Compute the number of singletons per genome
singletons_per_genome_sql<-"WITH cte1 AS (SELECT s.Population,Sample,
COUNT(reference_name) as variant_sites,COUNTIF(AC=1) AS Singletons,
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220`v,
UNNEST(v.alternate_bases) AS Alternate, UNNEST(v.call) AS call
INNER JOIN `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`s
ON call.name=s.Sample
WHERE EXISTS(SELECT gt FROM UNNEST(call.genotype) gt WHERE gt>0 ) AND reference_name NOT IN ('X','Y','MT')
GROUP BY 1,2)
SELECT Population, AVG(Singletons) AS Average_Singletons FROM cte1 GROUP BY 1 ORDER BY 2 DESC"
singletons_per_genome<-dbGetQuery(gcp_account, singletons_per_genome_sql)
head(singletons_per_genome)
```
The output table is sorted in a descending order by the "Average_Singletons" column. Output dataframe has 2 columns, the sub-population group and the average singletons computed.
```
#Setting the labels, AC=1
singletons_per_genome$Population<-factor(singletons_per_genome$Population,
levels = unique(singletons_per_genome$Population))
#Make plots wider on jupyter notebooks
options(repr.plot.width=15, repr.plot.height=8)
#Generate the plot, AC=1
ggplot(singletons_per_genome,aes(y=Average_Singletons,x=Population))+
geom_bar(aes(fill=Population),stat = "identity",position="dodge")+
scale_y_continuous(breaks = c(0,2000,4000,6000,8000,10000,12000,14000,16000,18000,20000,22000),
minor_breaks = NULL, labels = function(l) {paste0(round(l/1e3,1),"K")})+
labs(title = "Average number of singletons per genome",x = "Population",y = "Singletons per genome")+
theme(plot.title = element_text(size=22),axis.text=element_text(size=14),axis.title=element_text(size=18))
```
#### 1.3: Allele frequency - range
Refer: Extended Figure 3a
```
#Extracting the allele frequency
af_sql<-"SELECT COUNT(AF) AF_Count, AF_bucket,
FROM (SELECT AF, CASE WHEN AF>=0 AND AF<=0.1 THEN 1
WHEN AF>0.1 AND AF<=0.2 THEN 2
WHEN AF>0.2 AND AF<=0.3 THEN 3
WHEN AF>0.3 AND AF<=0.4 THEN 4
WHEN AF>0.4 AND AF<=0.5 THEN 5
WHEN AF>0.5 AND AF<=0.6 THEN 6
WHEN AF>0.6 AND AF<=0.7 THEN 7
WHEN AF>0.7 AND AF<=0.8 THEN 8
WHEN AF>0.8 AND AF<=0.9 THEN 9
WHEN AF>0.9 AND AF<=1.0 THEN 10
ELSE -1 END AS AF_bucket
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220`v,UNNEST(v.alternate_bases) AS Alternate )
GROUP BY AF_bucket
ORDER BY AF_bucket"
af<-dbGetQuery(gcp_account, af_sql)
af
#Make plots wider on jupyter notebooks
options(repr.plot.width=15, repr.plot.height=8)
#Generating the graph for variant tables
ggplot(af, aes(x=AF_bucket, y=AF_Count))+
geom_bar(stat = "identity",fill="red")+
labs(title = "Variant table - Allele Frequency distribution",x = "Allele Frequency bucket",y = "Count(allele frequency)")+
theme(plot.title = element_text(size=22),axis.text=element_text(size=14),axis.title=element_text(size=18),
legend.title = element_blank(),legend.text = element_blank())+
scale_x_continuous(breaks=c(1,2,3,4,5,6,7,8,9,10), minor_breaks=NULL,labels=c("0.1", "0.2", "0.3", "0.4", "0.5","0.6","0.7","0.8","0.9","1.0"))
```
#### 1.4: Number of rare variants and singletons per genome <br>
Refer: [Extended Figure 3b](https://www.nature.com/articles/nature15393/figures/7)
```
#Compute the number of rare variants and singletons per genome
rare_var_and_singletons_per_genome_sql<-"WITH cte1 AS (SELECT Population,Sample,
COUNT(reference_name) as variant_sites,
COUNTIF(AF<0.005) AS Rare_Variants,
COUNTIF(AC=1) AS Singletons,
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220`v,
UNNEST(v.alternate_bases) AS Alternate, UNNEST(v.call) AS call
INNER JOIN `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`s
ON call.name=s.Sample
WHERE EXISTS(SELECT gt FROM UNNEST(call.genotype) gt WHERE gt>0 ) AND reference_name NOT IN ('X','Y','MT')
GROUP BY 1,2) SELECT Population, AVG(Rare_variants) AS Average_rare_variants, AVG(Singletons) AS Average_singletons
FROM cte1 GROUP BY 1 ORDER BY Average_rare_variants,Average_singletons DESC"
rare_var_and_singletons_per_genome<-dbGetQuery(gcp_account, rare_var_and_singletons_per_genome_sql)
head(rare_var_and_singletons_per_genome)
```
The output table contains 3 colums, the sub-population, average rare variants computed and the avaerage singletons computed. All 3 columns were used to generate the plot shown.
```
#Set the order of the labels
rare_var_and_singletons_per_genome$Population<-factor(rare_var_and_singletons_per_genome$Population,
levels = unique(rare_var_and_singletons_per_genome$Population))
#Make plots wider on jupyter notebooks
options(repr.plot.width=15, repr.plot.height=8)
#Generating the graph
ggplot(rare_var_and_singletons_per_genome,aes(x=Population, y=Average_rare_variants))+
geom_bar(stat = "identity", position = "stack", fill="grey")+
geom_bar(data = subset(rare_var_and_singletons_per_genome,variable=Average_singletons),
aes(x=Population, y=Average_singletons, fill=Population),stat = "identity")+
scale_y_continuous(breaks = c(0,20000,40000,60000,80000,100000,120000,140000,160000,180000,200000,220000,
240000,260000,280000),labels = function(l) {paste0(round(l/1e3,1),"K")})+
labs(title = "Average number of rare variants(grey) with average number of singletons(colours)",
y = "Rare variants per genome")+
theme(plot.title = element_text(size=20),axis.text=element_text(size=14),axis.title=element_text(size=18))
```
#### 1.5: Computing the z score for variant count for each population
The z score will tell you how far a data point is from the mean. <br>
Z score was computed using the formula: 
Where:<br>
Z - standard score <br>
X - Observed value <br>
ฮผ - Mean variant site count of all genomes (across populations included)<br>
๐ - Standard deviation computed across all genomes (all populations included)<br>
Refer: [Extended Figure 4](https://www.nature.com/articles/nature15393/figures/8)
```
#Compute number of variant sites per genome
z_score_sql<-"WITH cte1 AS (SELECT Population,Sample,
COUNT(reference_name) as variant_site,
FROM `bigquery-public-data.human_genome_variants.1000_genomes_phase_3_variants_20150220`v,UNNEST(v.alternate_bases) AS Alternate,
UNNEST(v.call) AS call
INNER JOIN `bigquery-public-data.human_genome_variants.1000_genomes_sample_info`s
ON call.name=s.Sample
WHERE EXISTS(SELECT gt FROM UNNEST(call.genotype) gt WHERE gt>0 ) AND reference_name NOT IN ('X','Y','MT')
GROUP BY 1,2), cte2 AS (SELECT AVG(variant_site) AS total_mean, STDDEV(variant_site) AS total_sd FROM cte1),
cte3 AS (SELECT Population, AVG(variant_site) AS sub_population_mean FROM cte1 GROUP BY 1)
SELECT Population, (sub_population_mean-total_mean)/total_sd AS Z_score FROM cte2 join cte3 on 1=1
GROUP BY 1,2 ORDER BY Z_score DESC"
z_score<-dbGetQuery(gcp_account, z_score_sql)
z_score
```
For the computation of the z score, the query has been broken into 4 sections:<br>
- Subquery 1: Join the variant and sample table based on sample id <br>
- Subquery 2: Compute the population mean and standard deviation across all samples (and sub-population groups)<br>
- Subquery 3: Compute the sample mean across variant sites per sub-population group <br>
- Finally, calulate the z score using the formula <br>
The output dataframe has 2 colums, the sub-population and the z-score calculated and will be used as the input dataframe to generate the below graph.
```
#Set the order of the labels
z_score$Population<-factor(z_score$Population, levels = unique(z_score$Population))
#Generating the graph
ggplot(z_score, aes(y=Z_score,x=Population))+
geom_bar(aes(fill=Population), stat = "identity", position = "dodge")+
theme(axis.text.x = element_blank(),axis.ticks.x = element_blank())+
scale_y_continuous(breaks = c(-2.0,-1.5,-1.0,-0.5,0,0.5,1.0,1.5,2.0),minor_breaks = NULL)+
labs(title = "Z score for variant count for each population",y = "Standardized number of sites/genome")+
theme(plot.title = element_text(size=20),axis.text=element_text(size=14),axis.title=element_text(size=18))
```
|
github_jupyter
|
#### Query cost comparison
| Analysis | Variant table(1000Genomes variant table)| Data processed | Sample table(1000Genomes sample optimized table) | Data processed |
|---|---|---|---|---|
|Distinct sample_idโs|3.70 USD |1.74 TB|0.00 USD|0.092 TB|
|Number of mapped and missing IDโs|3.70 USD|1.74 TB|0.00 USD|0.092 TB|
|Number of variant sites per sample| 19.00 USD|4.8 TB|0.00 USD|0.255 TB|
|Average singletons|19.00 USD|4.8 TB|0.00 USD|0.256 TB|
|Average singletons and rare variants |19.00 USD|4.8 TB|0.00 USD|0.256 TB|
|Distribution of Allele Frequency|0.00 USD|0.001 TB|0.00 USD|0.001 TB|
|Z-score computation|19.00 USD|4.8 TB|0.00 USD|0.255 TB|
|Total|83.4 USD|22.68 TB|0.00 USD|1.207 TB|
Before starting the analysis install and load the below packages
### Authenticate and connect to BigQuery
Connect to the Google BigQuery database account using ```dbconnect```. Replace ```BILLING_ACCOUNT``` with the name of the Google Cloud project you want charges to be billed to.
### Introductory analysis
Check to confirm the number of samples present in the BigQuery table (from the paper, n=2504). <br>
Two tables are available for the 1000 Genomes project which can be found under *bigquery-public-data.human_genome_variants* <br>
1. 1000_genomes_phase_3_variants_20150220 : The Variant table<br>
2. 1000_genomes_sample_info : The Sample table
Given that both these tables have different number of distinct sample ID's, it is important to make sure that the list of sample ID's in the variant table is a subset of the ID's from the sample table to prevent data loss. The query below computes the number of mapped ID's and number of missing ID's
#### 1.1: Number of variant sites per genome
Refer: [Figure 1b](https://www.nature.com/articles/nature15393/figures/1)
#### 1.2: The average number of singletons per genome
Refer: [Figure 1c](https://www.nature.com/articles/nature15393/figures/1)
The output table is sorted in a descending order by the "Average_Singletons" column. Output dataframe has 2 columns, the sub-population group and the average singletons computed.
#### 1.3: Allele frequency - range
Refer: Extended Figure 3a
#### 1.4: Number of rare variants and singletons per genome <br>
Refer: [Extended Figure 3b](https://www.nature.com/articles/nature15393/figures/7)
The output table contains 3 colums, the sub-population, average rare variants computed and the avaerage singletons computed. All 3 columns were used to generate the plot shown.
#### 1.5: Computing the z score for variant count for each population
The z score will tell you how far a data point is from the mean. <br>
Z score was computed using the formula: 
Where:<br>
Z - standard score <br>
X - Observed value <br>
ฮผ - Mean variant site count of all genomes (across populations included)<br>
๐ - Standard deviation computed across all genomes (all populations included)<br>
Refer: [Extended Figure 4](https://www.nature.com/articles/nature15393/figures/8)
For the computation of the z score, the query has been broken into 4 sections:<br>
- Subquery 1: Join the variant and sample table based on sample id <br>
- Subquery 2: Compute the population mean and standard deviation across all samples (and sub-population groups)<br>
- Subquery 3: Compute the sample mean across variant sites per sub-population group <br>
- Finally, calulate the z score using the formula <br>
The output dataframe has 2 colums, the sub-population and the z-score calculated and will be used as the input dataframe to generate the below graph.
| 0.924338 | 0.972831 |
# 07 Numbers
* *Computational Physics*: Ch 2.4, 2.5, 3
* Python Tutorial [Floating Point Arithmetic: Issues and Limitations](https://docs.python.org/3/tutorial/floatingpoint.html)
## Binary representation
Computers store information with two-state logic. This can be represented in a binary system with numbers 0 and 1 (i.e. base 2)
Any number can be represented in any base as a polynomial (possibly with infinitely many terms): the digits are $0 \leq x_k < b$ and determine the contribution of the base $b$ raise to the $k$th power.
$$
q_b = \sum_{k=-\infty}^{+\infty} x_k b^k
$$
## Integers
Convert 10 (base 10, i.e. $1 \times 10^1 + 0\times 10^0$) into binary
(Note: `divmod(x, 2)` is `x // 2, x % 2`, i.e. integer division and remainder):
```
divmod(10, 2)
divmod(5, 2)
divmod(2, 2)
```
The binary representation of $10_{10}$ is $1010_2$ (keep dividing until there's only 1 left, then collect the 1 and all remainders in reverse order, essentially long division).
Double check by multiplying out $1010_2$:
```
1*2**3 + 0*2**2 + 1*2**1 + 0*2**0
```
or in Python
```
int('0b1010', 2)
```
### Summary: Integers in binary representation
**All integers are exactly representable in base 2 with a finite number of digits**.
* The sign (+ or โ) is represented by a single bit (0 = +, 1 = โ).
* The number of available "bits" (digits) determines the largest representable integer.
For example, with 8 bits available (a "*byte*"), what is the largest and smallest integer?
```
0b1111111 # 7 bits for number, 1 for sign (not included)
-0b1111111
```
### Sidenote: using numpy to quickly convert integers
If you want to properly sum all terms, use numpy arrays and the element-wise operations:
```
import numpy as np
nbits = 7
exponents = np.arange(nbits)
bases = 2*np.ones(nbits) # base 2
digits = np.ones(nbits) # all 1, for 1111111 (127 in binary)
exponents, bases, digits
np.sum(digits * bases**exponents)
```
### Examples: limits of integers
What is the smallest and largest integer that you can represent
1. if you have 4 bits available and only consider non-negative ("unsigned") integers?
2. if you have 32 bits and consider positive and negative integers?
3. if you have 64 bits and consider positive and negative integers?
Smallest and largest 4 bit unsigned integer:
```
0b0000
0b1111
```
Smallest and largest 32-bit signed integer (int32):
1 bit is sign, 31 bits are available, so the highest number has 31 ones (111...11111). The *next highest* number is 1000...000, a one with 32 bits and 31 zeroes, i.e., $2^{31}$.
Thus, the highest number is $2^{31} - 1$:
```
2**31 - 1
```
(and the smallest number is just $-(2^{31} - 1)$)
And int64 (signed):
```
max64 = 2**(64-1) - 1
print(-max64, max64)
```
### Python's arbitrary precision integers
In Python, integers *have arbitrary precision*: integer arithmetic (`+`, `-`, `*`, `//`) is exact and will not overflow. Thus the following code will run forever (until memory is exhausted); if you run it, you can stop the evaluation with the ''Kernel / Interrupt'' menu command in the notebook and then investigate `n` and `nbits`:
```
n = 1
nbits = 1
while True:
n *= 2
nbits += 1
type(n)
int.bit_length(n)
nbits
```
### NumPy has fixed precision integers
NumPy data types (dtypes) are fixed precision. Overflows "wrap around":
```
import numpy as np
np.array([2**15-1], dtype=np.int16)
np.array([2**15], dtype=np.int16)
np.array([2**15 + 1], dtype=np.int16)
```
## Binary fractions
Decimal fractions can be represented as binary fractions:
Convert $0.125_{10}$ to base 2:
```
0.125 * 2 # 0.0
_ * 2 # 0.00
_ * 2 # 0.001
```
Thus the binary representation of $0.125_{10}$ is $0.001_2$.
General recipe:
1. multiply by 2
2. if you get a number < 1, add a digit 0 to the right
3. if you get a number โฅ 1, add a digit 1 to the right and then repeat the algorithm 1โ3 with the remainder
```
0.3125 * 2 # 0.0
_ * 2 # 0.01
_ - 1
_ * 2 # 0.010
_ * 2 # 0.0101
```
Thus, 0.3125 is $0.0101_2$.
What is the binary representation of decimal $0.1 = \frac{1}{10}$?
```
0.1 * 2 # 0.0
_ * 2 # 0.00
_ * 2 # 0.000
_ * 2 # 0.0001
_ - 1
_ * 2 # 0.00011
_ - 1
_ * 2 # 0.000110
_ * 2 # 0.0001100
_ * 2 # 0.00011001
```
... etc: this is an infinitely repeating fraction and the binary representation of $0.1_{10}$ is $0.000 1100 1100 1100 ..._2$.
**Thus, with a finite number of bits, 0.1 is not exactly representable in the computer.**
The number 0.1 is not stored exactly in the computer. `print` only shows you a convenient approximation:
```
print(0.1)
print("{0:.55f}".format(0.1))
```
## Problems with floating point arithmetic
Only a subset of all real numbers can be represented with **floating point numbers of finite bit size**. Almost all floating point numbers are not exact:
```
0.1 + 0.1 + 0.1 == 0.3
```
... which should have yielded `True`! But because the machine representation of 0.1 is not exact, the equality cannot be fulfilled.
## Representation of floats: IEEE 754
Floating point numbers are stored in "scientific notation": e.g. $c = 2.88792458 \times 10^8$ m/s
* **mantissa**: $2.88792458$
* **exponent**: $+8$
* **sign**: +
Format:
$$
x = (-1)^s \times 1.f \times 2^{e - \mathrm{bias}}
$$
($f$ is $M$ bits long. The leading 1 in the mantissa is assumed and not stored: "ghost" or "phantom" bit.)
Format:
$$
x = (-1)^s \times 1.f \times 2^{e - \mathrm{bias}}
$$
Note:
* In IEEE 754, the highest value of $e$ in the exponent is reserved and not used, e.g. for a 32-bit *float* (see below) the exponent has $(30 - 23) + 1 = 8$ bit and hence the highest number for $e$ is $(2^8 - 1) - 1 = 255 - 1 = 254$. Taking the *bias* into account (for *float*, *bias* = 127), the largest value for the exponent is $2^{254 - 127} = 2^{127}$.
* The case of $e=0$ is also special. In this case, the format is $$x = (-1)^s \times 0.f \times 2^{-\mathrm{bias}}$$ i.e. the "ghost 1" becomes a zero, gaining a additional order of magnitude.
### IEEE float (32 bit)
IEEE *float* uses **32 bits**
* $\mathrm{bias} = 127_{10}$
* bits
<table>
<tr><td></td><td>s</td><td>e</td><td>f</td></tr>
<tr><td>bit position</td><td>31</td><td>30โ23</td><td>22โ0</td></tr>
</table>
* **six or seven decimal places of significance** (1 in $2^{23}$)
* range: $1.4 \times 10^{-45} \leq |x_{(32)}| \leq 3.4 \times 10^{38}$
```
1/2**23
```
### IEEE double (64 bit)
Python floating point numbers are 64-bit doubles. NumPy has dtypes `float32` and `float64`.
IEEE *double* uses **64 bits**
* $\mathrm{bias} = 1023_{10}$
* bits
<table>
<tr><td></td><td>s</td><td>e</td><td>f</td></tr>
<tr><td>bit position</td><td>63</td><td>62โ52</td><td>51โ0</td></tr>
</table>
* **about 16 decimal places of significance** (1 in $2^{52}$)
* range: $4.9 \times 10^{-324} \leq |x_{(64)}| \leq 1.8 \times 10^{308}$
```
1/2**52
```
For numerical calculations, *doubles* are typically required.
### Special numbers
IEEE 754 also introduces special "numbers" that can result from floating point arithmetic
* `NaN` (not a number)
* `+INF` and `-INF` (infinity)
* `-0` (signed zero)
Python itself does not use the IEEE special numbers
```
1/0
```
But numpy does:
```
np.array([1, -1])/np.zeros(2)
```
But beware, you cannot use `INF` to "take limits". It is purely a sign that something bad happened somewhere...
And **not a number**, `nan`
```
np.zeros(2)/np.zeros(2)
```
### Overflow and underflow
* underflow: typically just set to zero (and that works well most of the time)
* overflow: raises exception or just set to `inf`
```
big = 1.79e308
big
2 * big
2 * np.array([big], dtype=np.float64)
```
... but you can (maybe*) just use an even bigger data type:
```
2 * np.array([big], dtype=np.float128)
```
(*Note: float128 is 80 bit on Linux/macOS and 64 bit on Windows... see [Numpy data types: Extended Precisions](https://docs.scipy.org/doc/numpy/user/basics.types.html#extended-precision))
### Insignificant digits
```
x = 1000.2
A = 1000.2 - 1000.0
print(A)
A == 0.2
```
... oops
```
x = 700
y = 1e-14
x - y
x - y < 700
```
... ooops
## Machine precision
Only a limited number of floating point numbers can be represented. This *limited precision* affects calculations:
```
x = 5 + 1e-16
x
x == 5
```
... oops.
**Machine precision** $\epsilon_m$ is defined as the maximum number that can be added to 1 in the computer without changing that number 1:
$$
1_c + \epsilon_m := 1_c
$$
Thus, the *floating point representation* $x_c$ of an arbitrary number $x$ is "in the vicinity of $x$"
$$
x_c = x(1\pm\epsilon), \quad |\epsilon| \leq \epsilon_m
$$
where we don't know the true value of $\epsilon$.
Thus except for powers of 2 (which are represented exactly) **all floating point numbers contain an unknown error in the 6th decimal place (32 bit floats) or 15th decimal (64 bit doubles)**.
This error should be treated as a random error because we don't know its magnitude.
```
N = 100
eps = 1
for nbits in range(N):
eps /= 2
one_plus_eps = 1.0 + eps
# print("eps = {0}, 1 + eps = {1}".format(eps, one_plus_eps))
if one_plus_eps == 1.0:
print("machine precision reached for {0} bits".format(nbits))
print("eps = {0}, 1 + eps = {1}".format(eps, one_plus_eps))
break
```
Compare to our estimate for the precision of float64:
```
1/2**52
```
## Appendix
A quick hack to convert a floating point binary representation to a floating point number.
```
bits = "1010.0001100110011001100110011001100110011001100110011"
import math
def bits2number(bits):
if '.' in bits:
integer, fraction = bits.split('.')
else:
integer = bits
fraction = ""
powers = [int(bit) * 2**n for n, bit in enumerate(reversed(integer))]
powers.extend([int(bit) * 2**(-n) for n, bit in enumerate(fraction, start=1)])
return math.fsum(powers)
bits2number(bits)
bits2number('1111')
bits2number('0.0001100110011001100110011001100110011001100110011')
bits2number('0.0001100')
bits2number('0.0101')
bits2number("10.10101")
bits2number('0.0111111111111111111111111111111111111111')
bits2number('0.110011001100')
```
Python can convert to binary using the `struct` module:
```
x = 6.0e-8
import struct
fpack = struct.pack('f', x) # pack float into bytes
fint = struct.unpack('i', fpack)[0] # unpack to int
m_bits = bin(fint)[-23:] # mantissa bits
print(m_bits)
```
With phantom bit:
```
mantissa_bits = '1.' + m_bits
print(mantissa_bits)
import math
mn, ex = math.frexp(x)
print(mn, ex)
```
|
github_jupyter
|
divmod(10, 2)
divmod(5, 2)
divmod(2, 2)
1*2**3 + 0*2**2 + 1*2**1 + 0*2**0
int('0b1010', 2)
0b1111111 # 7 bits for number, 1 for sign (not included)
-0b1111111
import numpy as np
nbits = 7
exponents = np.arange(nbits)
bases = 2*np.ones(nbits) # base 2
digits = np.ones(nbits) # all 1, for 1111111 (127 in binary)
exponents, bases, digits
np.sum(digits * bases**exponents)
0b0000
0b1111
2**31 - 1
max64 = 2**(64-1) - 1
print(-max64, max64)
n = 1
nbits = 1
while True:
n *= 2
nbits += 1
type(n)
int.bit_length(n)
nbits
import numpy as np
np.array([2**15-1], dtype=np.int16)
np.array([2**15], dtype=np.int16)
np.array([2**15 + 1], dtype=np.int16)
0.125 * 2 # 0.0
_ * 2 # 0.00
_ * 2 # 0.001
0.3125 * 2 # 0.0
_ * 2 # 0.01
_ - 1
_ * 2 # 0.010
_ * 2 # 0.0101
0.1 * 2 # 0.0
_ * 2 # 0.00
_ * 2 # 0.000
_ * 2 # 0.0001
_ - 1
_ * 2 # 0.00011
_ - 1
_ * 2 # 0.000110
_ * 2 # 0.0001100
_ * 2 # 0.00011001
print(0.1)
print("{0:.55f}".format(0.1))
0.1 + 0.1 + 0.1 == 0.3
1/2**23
1/2**52
1/0
np.array([1, -1])/np.zeros(2)
np.zeros(2)/np.zeros(2)
big = 1.79e308
big
2 * big
2 * np.array([big], dtype=np.float64)
2 * np.array([big], dtype=np.float128)
x = 1000.2
A = 1000.2 - 1000.0
print(A)
A == 0.2
x = 700
y = 1e-14
x - y
x - y < 700
x = 5 + 1e-16
x
x == 5
N = 100
eps = 1
for nbits in range(N):
eps /= 2
one_plus_eps = 1.0 + eps
# print("eps = {0}, 1 + eps = {1}".format(eps, one_plus_eps))
if one_plus_eps == 1.0:
print("machine precision reached for {0} bits".format(nbits))
print("eps = {0}, 1 + eps = {1}".format(eps, one_plus_eps))
break
1/2**52
bits = "1010.0001100110011001100110011001100110011001100110011"
import math
def bits2number(bits):
if '.' in bits:
integer, fraction = bits.split('.')
else:
integer = bits
fraction = ""
powers = [int(bit) * 2**n for n, bit in enumerate(reversed(integer))]
powers.extend([int(bit) * 2**(-n) for n, bit in enumerate(fraction, start=1)])
return math.fsum(powers)
bits2number(bits)
bits2number('1111')
bits2number('0.0001100110011001100110011001100110011001100110011')
bits2number('0.0001100')
bits2number('0.0101')
bits2number("10.10101")
bits2number('0.0111111111111111111111111111111111111111')
bits2number('0.110011001100')
x = 6.0e-8
import struct
fpack = struct.pack('f', x) # pack float into bytes
fint = struct.unpack('i', fpack)[0] # unpack to int
m_bits = bin(fint)[-23:] # mantissa bits
print(m_bits)
mantissa_bits = '1.' + m_bits
print(mantissa_bits)
import math
mn, ex = math.frexp(x)
print(mn, ex)
| 0.182244 | 0.986044 |
# SMA Crossover Strategy
* Buy at market price if the fast SMA is greater than the slow SMA
* If in the market, sell if the fast SMA is smaller than the slow SMA
* Only 1 active operation is allowed in the market
```
%matplotlib inline
from typing import List
import backtrader as bt
import pandas as pd
from matplotlib import pyplot as plt
from utils import CryptoPandasData
```
## 1. Reading Data
```
from utils import read_bars as read_bars_tmp
def read_bars(csv_file: str)->pd.DataFrame:
TIME_BEGIN = pd.to_datetime('2020-05-09T00:00:00.000Z')
TIME_END = pd.to_datetime('2020-05-15T00:00:00.000Z')
bars_df = read_bars_tmp(csv_file)
bars_df = bars_df[(bars_df['timestamp'] >= TIME_BEGIN) & (bars_df['timestamp_end'] < TIME_END)]
return bars_df
time_bars = read_bars('/data/bars/TimeBar/60000/TimeBar.60000.Binance.Swap.BTC_USDT.csv')
time_bars
from utils import CryptoPandasData
data_feed = CryptoPandasData(dataname=time_bars, timeframe=bt.TimeFrame.Minutes)
```
## 2. SMA Crossover Strategy Demo
```
def sma_cross_demo(data_feed: bt.feeds.PandasData)->None:
cerebro = bt.Cerebro()
cerebro.addstrategy(bt.strategies.MA_CrossOver, fast=128, slow=512)
cerebro.adddata(data_feed)
cerebro.broker.setcash(10000.0)
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
cerebro.broker.setcommission(commission=0.0004) # Binance Swap taker fee
cerebro.addsizer(bt.sizers.FixedSize, stake=1)
cerebro.addanalyzer(bt.analyzers.SharpeRatio, timeframe=bt.TimeFrame.Days, compression=1, factor=365, annualize=True)
results = cerebro.run()
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
print('Sharpe Ratio: ', results[0].analyzers.sharperatio.get_analysis()['sharperatio'])
plt.rcParams['figure.figsize'] = (16, 8)
cerebro.plot(iplot=False)
sma_cross_demo(data_feed)
```
## 3. Searching for Optimal Parameters with Backtesting
```
def grid_search(data_feed: bt.feeds.PandasData, fast: List[int], slow: List[int])->List[List[bt.cerebro.OptReturn]]:
cerebro = bt.Cerebro()
cerebro.optstrategy(bt.strategies.MA_CrossOver, fast=fast, slow=slow)
cerebro.adddata(data_feed)
cerebro.broker.setcash(10000.0)
cerebro.broker.setcommission(commission=0.0004) # Binance Swap taker fee
cerebro.addsizer(bt.sizers.FixedSize, stake=1)
# Add analyzers
cerebro.addanalyzer(bt.analyzers.SharpeRatio, timeframe=bt.TimeFrame.Days, factor=365)
cerebro.addanalyzer(bt.analyzers.VWR, timeframe=bt.TimeFrame.Days, tann=365)
cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.NoTimeFrame)
cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.NoTimeFrame, data=data_feed, _name='buyandhold')
return cerebro.run()
def get_perf(results: List[List[bt.cerebro.OptReturn]])->pd.DataFrame:
stats = []
for i in results:
for j in i:
stats.append(
{'strategy': f'SMA_Cross_{j.params.fast}_{j.params.slow}',
'sharpe_ratio': j.analyzers.sharperatio.get_analysis()['sharperatio'],
'return': '{0:.2f}%'.format(list(j.analyzers.timereturn.get_analysis().values())[0] * 100),
'buy_and_hold': '{0:.2f}%'.format(list(j.analyzers.buyandhold.get_analysis().values())[0] * 100),
'vwr': j.analyzers.vwr.get_analysis()['vwr'],
}
)
df = pd.DataFrame(stats)
df.sort_values(by='sharpe_ratio', ascending=False, inplace=True)
df.set_index('strategy', inplace=True)
return df
results = grid_search(data_feed, fast=[32, 64, 128], slow=[256, 512, 1024])
get_perf(results)
```
## References
* [Quickstart Guide - Backtrader](https://www.backtrader.com/docu/quickstart/quickstart/)
* [Benchmarking - Backtrader](https://www.backtrader.com/blog/posts/2016-07-22-benchmarking/benchmarking/)
* [Analyzer - VWR - Backtrader](https://www.backtrader.com/blog/posts/2016-09-06-vwr/vwr/)
|
github_jupyter
|
%matplotlib inline
from typing import List
import backtrader as bt
import pandas as pd
from matplotlib import pyplot as plt
from utils import CryptoPandasData
from utils import read_bars as read_bars_tmp
def read_bars(csv_file: str)->pd.DataFrame:
TIME_BEGIN = pd.to_datetime('2020-05-09T00:00:00.000Z')
TIME_END = pd.to_datetime('2020-05-15T00:00:00.000Z')
bars_df = read_bars_tmp(csv_file)
bars_df = bars_df[(bars_df['timestamp'] >= TIME_BEGIN) & (bars_df['timestamp_end'] < TIME_END)]
return bars_df
time_bars = read_bars('/data/bars/TimeBar/60000/TimeBar.60000.Binance.Swap.BTC_USDT.csv')
time_bars
from utils import CryptoPandasData
data_feed = CryptoPandasData(dataname=time_bars, timeframe=bt.TimeFrame.Minutes)
def sma_cross_demo(data_feed: bt.feeds.PandasData)->None:
cerebro = bt.Cerebro()
cerebro.addstrategy(bt.strategies.MA_CrossOver, fast=128, slow=512)
cerebro.adddata(data_feed)
cerebro.broker.setcash(10000.0)
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
cerebro.broker.setcommission(commission=0.0004) # Binance Swap taker fee
cerebro.addsizer(bt.sizers.FixedSize, stake=1)
cerebro.addanalyzer(bt.analyzers.SharpeRatio, timeframe=bt.TimeFrame.Days, compression=1, factor=365, annualize=True)
results = cerebro.run()
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
print('Sharpe Ratio: ', results[0].analyzers.sharperatio.get_analysis()['sharperatio'])
plt.rcParams['figure.figsize'] = (16, 8)
cerebro.plot(iplot=False)
sma_cross_demo(data_feed)
def grid_search(data_feed: bt.feeds.PandasData, fast: List[int], slow: List[int])->List[List[bt.cerebro.OptReturn]]:
cerebro = bt.Cerebro()
cerebro.optstrategy(bt.strategies.MA_CrossOver, fast=fast, slow=slow)
cerebro.adddata(data_feed)
cerebro.broker.setcash(10000.0)
cerebro.broker.setcommission(commission=0.0004) # Binance Swap taker fee
cerebro.addsizer(bt.sizers.FixedSize, stake=1)
# Add analyzers
cerebro.addanalyzer(bt.analyzers.SharpeRatio, timeframe=bt.TimeFrame.Days, factor=365)
cerebro.addanalyzer(bt.analyzers.VWR, timeframe=bt.TimeFrame.Days, tann=365)
cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.NoTimeFrame)
cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.NoTimeFrame, data=data_feed, _name='buyandhold')
return cerebro.run()
def get_perf(results: List[List[bt.cerebro.OptReturn]])->pd.DataFrame:
stats = []
for i in results:
for j in i:
stats.append(
{'strategy': f'SMA_Cross_{j.params.fast}_{j.params.slow}',
'sharpe_ratio': j.analyzers.sharperatio.get_analysis()['sharperatio'],
'return': '{0:.2f}%'.format(list(j.analyzers.timereturn.get_analysis().values())[0] * 100),
'buy_and_hold': '{0:.2f}%'.format(list(j.analyzers.buyandhold.get_analysis().values())[0] * 100),
'vwr': j.analyzers.vwr.get_analysis()['vwr'],
}
)
df = pd.DataFrame(stats)
df.sort_values(by='sharpe_ratio', ascending=False, inplace=True)
df.set_index('strategy', inplace=True)
return df
results = grid_search(data_feed, fast=[32, 64, 128], slow=[256, 512, 1024])
get_perf(results)
| 0.679817 | 0.789071 |
```
%%markdown
For the following exercises, write the equation in standard form and state the center, vertices, and foci.
%%markdown
1) $\frac{x^2}{9}+\frac{y^2}{4} = 1$
<br>2) $4x^2 + 9y^2 - 36 = 0$
<br>3) C:$(a-c)+(a-(-c)) = \sqrt{b^2+c^2}+\sqrt{b^2+(-c)^2}$
<br> Major vertices: ($\pm3, 0$), Minor Vertices:($0, \pm2$)
<br>4) $(3-c)+(3-(-c)) = 2\sqrt{2^2+c^2}$
<br>5) $3 = \sqrt{4+c^2}$
<br>6) $9= (4+c^2)$
<br>7) $5 = c^2$
<br>8) $c = \pm\sqrt{5}$
<br>9) Foci:$ (\pm2.24, 0)$
%%markdown
2) $9y^2 + 16x^2 โ 36y + 32x โ 92 = 0$
<br> transfer to the form of $\frac{(x-h)^2}{a^2} + \frac{(y-k)^2}{b^2} = 1$
<br> 1) $9y^2 + 16x^2 โ 36y + 32x โ 92 +36+16= 0 +36+16$
<br> 2) $(4x+4)^2+(3y-6)^2= 144$
<br> 3) $16(x+1)^2 +9(y-2)^2= 144$
<br> 4) $(x+1)^2 +(y-2)^2= 1$
<br> Center and focus: ($-1, 2) Vertices at (0,2), (-1, 3), (-2, 2), (-1, 1)
%%markdown
For the following exercises, sketch the graph, identifying the center, vertices, and foci.
%%markdown
4) $y^2 + 2x^2 โ 6y + 8x โ 72 = 0$
%%markdown
5. write the standard form equation f an elipse with a
center at (1, 2), vertex at (7, 2), and focus at (4, 2)
<br> The major axis is perpendicular to the y axis, because the center, a focus and the given vertex all lie on the line y = 2. therefore we can asume that the foci parameter $c$ equals $ 3\rightarrow(4-1)$, and the given vertex parameter $a$ equals $6 \rightarrow(7-1)$
<br>1) $\frac{(x-1)^2}{36}+\frac{(y-2)^2}{b^2} = 1$
<br>2) $a = \sqrt{(b^2+c^2)}$
<br>3) $36 = b^2+9$
<br>4) $b = \pm5$
<br>5) $\frac{(x-1)^2}{36}+\frac{(y-2)^2}{25} = 1$
%%markdown
6. A whispering gallery is to be constructed with a length of 150 feet. If the foci are to be located 20 feet
away from the wall, how high should the ceiling be?
<br> assuming that the origin is located on the cetner of the gallery:
<br> 1) $2a = 150$
<br> 2) $a = 75$
<br> 3) $c = 150/2 -20 = 55$
<br> 4) Find the semi major vertex $b$:
<br> 5) $a^2 = b^2+c^2$
<br> 6) $5620 = 3030+b^2$
<br> 7) $2600 = b^2$
<br> 8) $b = 50.99$
<br> The ceiling should be 50.99ft high at its top.
%%markdown
For the following exercises, write the equation of the hyperbola in standard form, and give the center, vertices, foci,
and asymptotes.
%%markdown
7) $\frac{x^2}{49}-\frac{y^2}{81} = 1$
<br> $a: (7,0); b: (0, 9)$
<br> 1) $81x^2+49y^2 -3969 = 0$
<br> 2) $c^2 = a^2 + b^2$
<br> 3) $c = \sqrt{130}$, the foci are located at $\boxed{(\pm\sqrt{130}, 0)}$, the center is at the origin, the vertices are $\boxed{a: (7,0); b: (0, 9)}$, asymptotes $\boxed{ y= \pm\frac{9}{7}*x}$
%%markdown
8) $-9x^2+16y^2+128y+112 = 0$
<br> 1) $-9x^2+16y^2+128y+112+256 = 0+256$
<br> 2) $-9x^2+(4y+16)^2-144 = 0 $
<br> 3) $-9x^2+4(y+4)^2= 144$
<br> 4) $\frac{(y+4)^2}{36}-\frac{x^2}{16}= 1$
<br> Center is located at $(0, -4)$, The major vertex is $(0, \pm6)$, the minor vertex is $(\pm4, -4)$
<br> 5) $c^2 = a^2 + b^2$
<br> 6) $c = 2\sqrt{13}$
<br> Foci are $(0, \pm2\sqrt{13})$. The asymptotes are $ y = \pm\frac{a}{b}x$
%%markdown
For the following exercises, graph the hyperbola, noting its center, vertices, and foci. State the equations of the
asymptotes.
%%markdown
9) $\frac{(x-3)^2}{25}+\frac{(y+3)^2}{1} = 1$
%%markdown
10) $y^2-x^y2+4y-4x-18 = 0$
%%markdown
11) Write the standard form equation of a hyperbola with foci at (1, 0) and (1, 6) and a vertex at (1, 2)
<br> since the difference between the foci occurs on the $y$ axis, The hyperbola is of the form $\frac{y^2}{a^2} - \frac{x^2}{b^2} = 1$. The center is at $O: (1,\frac{6-0}{2})\Rightarrow \boxed{(1, 3)}$
<br> Because the $Y$ value of the vertex is lesser than the $Y$ value of the center of the hyperbola, the value of $a$ is $-a = -Y(o) + Y(vertex) \Rightarrow \boxed{a = 1}$
<br> filing in its $\frac{(y-3)^2} - \frac{(x-1)^2}{b^2} = 1$
<br> 1) $c^2 = a^2 + b^2$
<br> 2) $b^2 = 9-1$
<br> 3) $b^2 = 4\sqrt{2}$
<br> 4) $\frac{(y-3)^2}{1} - \frac{(x-1)^2}{8} = 1$
%%markdown
For the following exercises, write the equation of the parabola in standard form, and give the vertex, focus, and
equation of the directrix.
%%markdown
12) $y^2 + 10x = 0$
<br> 1) $y^2 = -10x$
<br> Vertex at the origin,
<br> 2) $4p = -10$
<br> 3) $p = -\frac{5}{2}$
<br> 4) directix: $x = -p \Rightarrow \boxed{x=\frac{5}{2}}$
<br> the distance between the vertex and the directix equals the distance between the directix and focus
<br> 5) Focus is $(-\frac{5}{2}, 0)$
%%markdown
13) $3x^2 โ 12x โ y + 11 = 0$
<br> 1) $3x^2 โ 12x = y-11 $
<br> 2) $x^2 โ 4x +4= y/3-11/3 +4$
<br> 3) $(x-2)^2= y/3-11/3 +4*3/3$
<br> 4) $(x-2)^2= \frac{y+1}{3}$ , Vertex: $(2, -1)$
<br> 5) $4p = *\frac{1}{3}$
<br> 6) $p = *\frac{1}{12}$ Directix: $y = -\frac{1}{12}$
<br> the distance between the vertex and the directix equals the distance between the directix and focus
<br> 7) $-1-(-\frac{1}{12}) = f - (-1)$
<br> 8) $f = -1\frac{11}{12}$ Cetner is at$(2, -1\frac{11}{12})$
%%markdown
For the following exercises, graph the parabola, labeling the vertex, focus, and directrix.
%%markdown
14. $(x โ 1)^2 = โ4(y + 3)$
%%markdown
15. $y^2 + 8x โ 8y + 40 = 0$
%%markdown
16. Write the equation of a parabola with a focus at
(2, 3) and directrix y = โ1.
<br> 1) Vertix(2, 2)
<br> 2) $(x-2)^2 = 4(y-2)$
%%markdown
17. A searchlight is shaped like a paraboloid of
revolution. If the light source is located 1.5 feet from
the base along the axis of symmetry, and the depth
of the searchlight is 3 feet, what should the width of
the opening be?
%%markdown
<br> $xยฒ = 4py$ ; $ y = -p$ ; F: $(0, \frac{3}{2})$, $A_{depth}:(\frac{w}{2}, 3)$
<br> 1) $\Delta d_{origin \rightarrow directix} = \Delta d_{origin \rightarrow focus}$ ; $\Delta d_{origin \rightarrow directix} = |-p|$ ; $\Delta d_{origin \rightarrow focus }= p = \frac{3}{2}$
<br> 2) $x_a^2 = 6y_a$
<br> 3) $x_a^2 = 18$
<br> 4) $x_a = 3\sqrt2 = \frac{w}{2}$
<br> 5) $w \approx 8.48$
%%markdown
For the following exercises, determine which conic section is represented by the given equation, and then determine
the angle ฮธ that will eliminate the xy term.
%%markdown
18. $3x^2 โ 2xy + 3y^2 = 4$
<br>=====<br>
<br> 1) find the value of the discriminant $b^2 - 4ac$
<br> 2)$ (-2)^2 - 4*3*3 = -32 < 0$ The equation represents an elipse because the value of the discriminant is lesser than 0.
<br> 3) $cot2\theta = \frac{a-c}{b}$
<br> 4) $cot2\theta = \frac{3-3}{-2} = 0$
<br> 5) $cot2\theta = \frac{1}{tan2\theta}$
<br> 6) $tan2\theta = \frac{1}{0} \rightarrow undefined$
<br> 7) $2\theta = 90^\circ$
<br> 8) $\theta = 45^\circ$
%%markdown
19. $x^2 + 4xy + 4y^2 + 6x โ 8y = 0$
<br>=====<br>
<br> 1) find the value of the discriminant $b^2 - 4ac$
<br> 2) $ (4)^2 - 4*1*4 = 0 $ The equation represents a parabola
<br> 3) $ cot2\theta = \frac{1-4}{4} = \frac{-3}{4}$
<br> 4) $h^2 = (-3)^2 + 4^2$
<br> 5) $h = 5$
<br> 6) $cos\theta = \frac{4}{5}$
<br> 7) $\theta \approx 36.87^\circ$
%%markdown
For the following exercises, rewrite in the x'y' system without the x'y' term, and graph the rotated graph.
%%markdown
20. $11x^2 + 10\sqrt{3}xy + y^2 = 4$
<br>=====<br>
<br> 1) $cot2\theta = \frac{11-1}{10\sqrt{3}}$
<br> 2) $cot2\theta = \frac{1}{\sqrt3}$
<br> 3) $h^2 = 1^2 + \sqrt{3}^2$
<br> 4) $h = 2$
<br> 5) $cos\theta = \frac{\sqrt{3}}{2}$ ; $sin\theta = \frac{1}{2}$
<br> 6) $x = \frac{\sqrt{3}x'-y'}{2}$ ; $y = \frac{x'+\sqrt{3}y'}{2}$
<br> 7) $11(\frac{\sqrt{3}x'-y'}{2})^2 + 10\sqrt{3}(\frac{\sqrt{3}x'-y'}{2})(\frac{x'+\sqrt{3}y'}{2}) + (\frac{x'+\sqrt{3}y'}{2})^2 = 4$
<br> 8) $11(\frac{3x'^2-2\sqrt{3}x'y'+y'^2}{4}) + 10\sqrt{3}(\frac{\sqrt{3}x'^2+3x'y'-x'y'-\sqrt{3}y'^2}{4}) + (\frac{x'^2+2\sqrt{3}x'y'+3y'^2}{4}) = 4$
<br> 9) $\frac{33x'^2-22\sqrt{3}x'y'+11y'^2+30x'^2+20\sqrt{3}x'y'-30y'^2+x'^2+2\sqrt{3}x'y'+3y'^2}{4}= 4$
<br> 10) $64x'^2-17y'^2= 16$
import numpy as np
print(24**2)
print(24*7*2)
print(24*7)
print(25**2)
%%markdown
21. $16x^2 + 24xy + 9y^2 โ 125x = 0$
<br>=====<br>
<br> 1) $cot2\theta = \frac{16-9}{24}$
<br> 2) $cot2\theta = \frac{7}{24}$
<br> 3) $h^2 = 24^2+7^2$
<br> 4) $h = 25$
<br> 5) $cos\theta = \sqrt{\frac{1+cos2\theta}{2}}$ ; $sin\theta = \sqrt{\frac{1-cos2\theta}{2}}$
<br> 6) $cos\theta = \sqrt{\frac{1+\frac{7}{25}}{2}}$ ; $sin\theta = \sqrt{\frac{1-\frac{7}{25}}{2}}$
<br> 7) $cos\theta = \sqrt{\frac{32}{50}}$ ; $sin\theta = \sqrt{\frac{18}{50}}$
<br> 8) $cos\theta = \frac{4}{5}$ ; $sin\theta = \frac{3}{5}$
<br> 9) $x = \frac{4x'-3y'}{5}$ ; $y= \frac{3x'+4y'}{5}$
<br> 10) $16(\frac{4x'-3y'}{5})^2 + 24(\frac{4x'-3y'}{5})(\frac{3x'+4y'}{5}) + 9(\frac{3x'+4y'}{5})^2 โ 125(\frac{4x'-3y'}{5}) = 0$
<br> 11) $16(16x'^2-24x'y'+9y'^2) + 24(12x'^2+7x'y'-12y'^2) + 9(9x'^2+24x'y'+16y'^2) โ 625(4x'-3y') = 0$
<br> 12) $256x'^2-384x'y'+144y'^2 + 288x'^2+168x'y'-288y'^2 + 81x'^2+216x'y'+144y'^2 -2500x'-1875y' = 0$
<br> 13) $760x'^2-2500x'-1875y' = 0$
<br> 14) $152x'^2-500x'-375y' = 0$
%%markdown
For the following exercises, identify the conic with focus at the origin, and then give the directrix and eccentricity.
%%markdown
22) $r = \frac{3}{2-sin\theta}$
<br> ===== <br>
<br> 1) $e = \pm\frac{1}{2}$ ; since eccentricity $0<e<1$ the conic section is en elipse
<br> 2) $ep = 3$
<br> 3) $p\cdot\frac{1}{2} = \frac{3}{2}$
<br> 4) $p = 6$ The directix is $p=\pm3$
%%markdown
23) $r = \frac{5}{4+6cos\theta}$
<br> $r = \frac{\frac{5}{4}}{1+\frac{6}{4}cos\theta}$
<br> ===== <br>
<br> 1) $e = \pm\frac{3}{2}$ ; since eccentricity $e>1$ the conic section is a hyperbola
<br> 2) $e\cdot p = \frac{5}{4}$
<br> 3) $p\cdot\frac{3}{2} = \frac{5}{4}$
<br> 4) $p = \frac{5}{6}$
%%markdown
For the following exercises, graph the given conic section. If it is a parabola, label vertex, focus, and directrix. If it is
an ellipse or a hyperbola, label vertices and foci.
%%markdown
24) $r = \frac{12}{4-8sin\theta}$
%%markdown
25) $r = \frac{2}{4+4sin\theta}$
%%markdown
26. Find a polar equation of the conic with focus at the
origin, eccentricity of e = 2, and directrix: x = 3.
<br>=====<br>
<br> 1) $f: (0, 0)) \text{ ; } e= \pm2 \text{ ; directix: }x=3$
<br> 2) $r=\frac{p\cdot e}{1-e\cdot sin\theta}$
<br> 3) $r=\frac{6}{1-2sin\theta}$
```
|
github_jupyter
|
%%markdown
For the following exercises, write the equation in standard form and state the center, vertices, and foci.
%%markdown
1) $\frac{x^2}{9}+\frac{y^2}{4} = 1$
<br>2) $4x^2 + 9y^2 - 36 = 0$
<br>3) C:$(a-c)+(a-(-c)) = \sqrt{b^2+c^2}+\sqrt{b^2+(-c)^2}$
<br> Major vertices: ($\pm3, 0$), Minor Vertices:($0, \pm2$)
<br>4) $(3-c)+(3-(-c)) = 2\sqrt{2^2+c^2}$
<br>5) $3 = \sqrt{4+c^2}$
<br>6) $9= (4+c^2)$
<br>7) $5 = c^2$
<br>8) $c = \pm\sqrt{5}$
<br>9) Foci:$ (\pm2.24, 0)$
%%markdown
2) $9y^2 + 16x^2 โ 36y + 32x โ 92 = 0$
<br> transfer to the form of $\frac{(x-h)^2}{a^2} + \frac{(y-k)^2}{b^2} = 1$
<br> 1) $9y^2 + 16x^2 โ 36y + 32x โ 92 +36+16= 0 +36+16$
<br> 2) $(4x+4)^2+(3y-6)^2= 144$
<br> 3) $16(x+1)^2 +9(y-2)^2= 144$
<br> 4) $(x+1)^2 +(y-2)^2= 1$
<br> Center and focus: ($-1, 2) Vertices at (0,2), (-1, 3), (-2, 2), (-1, 1)
%%markdown
For the following exercises, sketch the graph, identifying the center, vertices, and foci.
%%markdown
4) $y^2 + 2x^2 โ 6y + 8x โ 72 = 0$
%%markdown
5. write the standard form equation f an elipse with a
center at (1, 2), vertex at (7, 2), and focus at (4, 2)
<br> The major axis is perpendicular to the y axis, because the center, a focus and the given vertex all lie on the line y = 2. therefore we can asume that the foci parameter $c$ equals $ 3\rightarrow(4-1)$, and the given vertex parameter $a$ equals $6 \rightarrow(7-1)$
<br>1) $\frac{(x-1)^2}{36}+\frac{(y-2)^2}{b^2} = 1$
<br>2) $a = \sqrt{(b^2+c^2)}$
<br>3) $36 = b^2+9$
<br>4) $b = \pm5$
<br>5) $\frac{(x-1)^2}{36}+\frac{(y-2)^2}{25} = 1$
%%markdown
6. A whispering gallery is to be constructed with a length of 150 feet. If the foci are to be located 20 feet
away from the wall, how high should the ceiling be?
<br> assuming that the origin is located on the cetner of the gallery:
<br> 1) $2a = 150$
<br> 2) $a = 75$
<br> 3) $c = 150/2 -20 = 55$
<br> 4) Find the semi major vertex $b$:
<br> 5) $a^2 = b^2+c^2$
<br> 6) $5620 = 3030+b^2$
<br> 7) $2600 = b^2$
<br> 8) $b = 50.99$
<br> The ceiling should be 50.99ft high at its top.
%%markdown
For the following exercises, write the equation of the hyperbola in standard form, and give the center, vertices, foci,
and asymptotes.
%%markdown
7) $\frac{x^2}{49}-\frac{y^2}{81} = 1$
<br> $a: (7,0); b: (0, 9)$
<br> 1) $81x^2+49y^2 -3969 = 0$
<br> 2) $c^2 = a^2 + b^2$
<br> 3) $c = \sqrt{130}$, the foci are located at $\boxed{(\pm\sqrt{130}, 0)}$, the center is at the origin, the vertices are $\boxed{a: (7,0); b: (0, 9)}$, asymptotes $\boxed{ y= \pm\frac{9}{7}*x}$
%%markdown
8) $-9x^2+16y^2+128y+112 = 0$
<br> 1) $-9x^2+16y^2+128y+112+256 = 0+256$
<br> 2) $-9x^2+(4y+16)^2-144 = 0 $
<br> 3) $-9x^2+4(y+4)^2= 144$
<br> 4) $\frac{(y+4)^2}{36}-\frac{x^2}{16}= 1$
<br> Center is located at $(0, -4)$, The major vertex is $(0, \pm6)$, the minor vertex is $(\pm4, -4)$
<br> 5) $c^2 = a^2 + b^2$
<br> 6) $c = 2\sqrt{13}$
<br> Foci are $(0, \pm2\sqrt{13})$. The asymptotes are $ y = \pm\frac{a}{b}x$
%%markdown
For the following exercises, graph the hyperbola, noting its center, vertices, and foci. State the equations of the
asymptotes.
%%markdown
9) $\frac{(x-3)^2}{25}+\frac{(y+3)^2}{1} = 1$
%%markdown
10) $y^2-x^y2+4y-4x-18 = 0$
%%markdown
11) Write the standard form equation of a hyperbola with foci at (1, 0) and (1, 6) and a vertex at (1, 2)
<br> since the difference between the foci occurs on the $y$ axis, The hyperbola is of the form $\frac{y^2}{a^2} - \frac{x^2}{b^2} = 1$. The center is at $O: (1,\frac{6-0}{2})\Rightarrow \boxed{(1, 3)}$
<br> Because the $Y$ value of the vertex is lesser than the $Y$ value of the center of the hyperbola, the value of $a$ is $-a = -Y(o) + Y(vertex) \Rightarrow \boxed{a = 1}$
<br> filing in its $\frac{(y-3)^2} - \frac{(x-1)^2}{b^2} = 1$
<br> 1) $c^2 = a^2 + b^2$
<br> 2) $b^2 = 9-1$
<br> 3) $b^2 = 4\sqrt{2}$
<br> 4) $\frac{(y-3)^2}{1} - \frac{(x-1)^2}{8} = 1$
%%markdown
For the following exercises, write the equation of the parabola in standard form, and give the vertex, focus, and
equation of the directrix.
%%markdown
12) $y^2 + 10x = 0$
<br> 1) $y^2 = -10x$
<br> Vertex at the origin,
<br> 2) $4p = -10$
<br> 3) $p = -\frac{5}{2}$
<br> 4) directix: $x = -p \Rightarrow \boxed{x=\frac{5}{2}}$
<br> the distance between the vertex and the directix equals the distance between the directix and focus
<br> 5) Focus is $(-\frac{5}{2}, 0)$
%%markdown
13) $3x^2 โ 12x โ y + 11 = 0$
<br> 1) $3x^2 โ 12x = y-11 $
<br> 2) $x^2 โ 4x +4= y/3-11/3 +4$
<br> 3) $(x-2)^2= y/3-11/3 +4*3/3$
<br> 4) $(x-2)^2= \frac{y+1}{3}$ , Vertex: $(2, -1)$
<br> 5) $4p = *\frac{1}{3}$
<br> 6) $p = *\frac{1}{12}$ Directix: $y = -\frac{1}{12}$
<br> the distance between the vertex and the directix equals the distance between the directix and focus
<br> 7) $-1-(-\frac{1}{12}) = f - (-1)$
<br> 8) $f = -1\frac{11}{12}$ Cetner is at$(2, -1\frac{11}{12})$
%%markdown
For the following exercises, graph the parabola, labeling the vertex, focus, and directrix.
%%markdown
14. $(x โ 1)^2 = โ4(y + 3)$
%%markdown
15. $y^2 + 8x โ 8y + 40 = 0$
%%markdown
16. Write the equation of a parabola with a focus at
(2, 3) and directrix y = โ1.
<br> 1) Vertix(2, 2)
<br> 2) $(x-2)^2 = 4(y-2)$
%%markdown
17. A searchlight is shaped like a paraboloid of
revolution. If the light source is located 1.5 feet from
the base along the axis of symmetry, and the depth
of the searchlight is 3 feet, what should the width of
the opening be?
%%markdown
<br> $xยฒ = 4py$ ; $ y = -p$ ; F: $(0, \frac{3}{2})$, $A_{depth}:(\frac{w}{2}, 3)$
<br> 1) $\Delta d_{origin \rightarrow directix} = \Delta d_{origin \rightarrow focus}$ ; $\Delta d_{origin \rightarrow directix} = |-p|$ ; $\Delta d_{origin \rightarrow focus }= p = \frac{3}{2}$
<br> 2) $x_a^2 = 6y_a$
<br> 3) $x_a^2 = 18$
<br> 4) $x_a = 3\sqrt2 = \frac{w}{2}$
<br> 5) $w \approx 8.48$
%%markdown
For the following exercises, determine which conic section is represented by the given equation, and then determine
the angle ฮธ that will eliminate the xy term.
%%markdown
18. $3x^2 โ 2xy + 3y^2 = 4$
<br>=====<br>
<br> 1) find the value of the discriminant $b^2 - 4ac$
<br> 2)$ (-2)^2 - 4*3*3 = -32 < 0$ The equation represents an elipse because the value of the discriminant is lesser than 0.
<br> 3) $cot2\theta = \frac{a-c}{b}$
<br> 4) $cot2\theta = \frac{3-3}{-2} = 0$
<br> 5) $cot2\theta = \frac{1}{tan2\theta}$
<br> 6) $tan2\theta = \frac{1}{0} \rightarrow undefined$
<br> 7) $2\theta = 90^\circ$
<br> 8) $\theta = 45^\circ$
%%markdown
19. $x^2 + 4xy + 4y^2 + 6x โ 8y = 0$
<br>=====<br>
<br> 1) find the value of the discriminant $b^2 - 4ac$
<br> 2) $ (4)^2 - 4*1*4 = 0 $ The equation represents a parabola
<br> 3) $ cot2\theta = \frac{1-4}{4} = \frac{-3}{4}$
<br> 4) $h^2 = (-3)^2 + 4^2$
<br> 5) $h = 5$
<br> 6) $cos\theta = \frac{4}{5}$
<br> 7) $\theta \approx 36.87^\circ$
%%markdown
For the following exercises, rewrite in the x'y' system without the x'y' term, and graph the rotated graph.
%%markdown
20. $11x^2 + 10\sqrt{3}xy + y^2 = 4$
<br>=====<br>
<br> 1) $cot2\theta = \frac{11-1}{10\sqrt{3}}$
<br> 2) $cot2\theta = \frac{1}{\sqrt3}$
<br> 3) $h^2 = 1^2 + \sqrt{3}^2$
<br> 4) $h = 2$
<br> 5) $cos\theta = \frac{\sqrt{3}}{2}$ ; $sin\theta = \frac{1}{2}$
<br> 6) $x = \frac{\sqrt{3}x'-y'}{2}$ ; $y = \frac{x'+\sqrt{3}y'}{2}$
<br> 7) $11(\frac{\sqrt{3}x'-y'}{2})^2 + 10\sqrt{3}(\frac{\sqrt{3}x'-y'}{2})(\frac{x'+\sqrt{3}y'}{2}) + (\frac{x'+\sqrt{3}y'}{2})^2 = 4$
<br> 8) $11(\frac{3x'^2-2\sqrt{3}x'y'+y'^2}{4}) + 10\sqrt{3}(\frac{\sqrt{3}x'^2+3x'y'-x'y'-\sqrt{3}y'^2}{4}) + (\frac{x'^2+2\sqrt{3}x'y'+3y'^2}{4}) = 4$
<br> 9) $\frac{33x'^2-22\sqrt{3}x'y'+11y'^2+30x'^2+20\sqrt{3}x'y'-30y'^2+x'^2+2\sqrt{3}x'y'+3y'^2}{4}= 4$
<br> 10) $64x'^2-17y'^2= 16$
import numpy as np
print(24**2)
print(24*7*2)
print(24*7)
print(25**2)
%%markdown
21. $16x^2 + 24xy + 9y^2 โ 125x = 0$
<br>=====<br>
<br> 1) $cot2\theta = \frac{16-9}{24}$
<br> 2) $cot2\theta = \frac{7}{24}$
<br> 3) $h^2 = 24^2+7^2$
<br> 4) $h = 25$
<br> 5) $cos\theta = \sqrt{\frac{1+cos2\theta}{2}}$ ; $sin\theta = \sqrt{\frac{1-cos2\theta}{2}}$
<br> 6) $cos\theta = \sqrt{\frac{1+\frac{7}{25}}{2}}$ ; $sin\theta = \sqrt{\frac{1-\frac{7}{25}}{2}}$
<br> 7) $cos\theta = \sqrt{\frac{32}{50}}$ ; $sin\theta = \sqrt{\frac{18}{50}}$
<br> 8) $cos\theta = \frac{4}{5}$ ; $sin\theta = \frac{3}{5}$
<br> 9) $x = \frac{4x'-3y'}{5}$ ; $y= \frac{3x'+4y'}{5}$
<br> 10) $16(\frac{4x'-3y'}{5})^2 + 24(\frac{4x'-3y'}{5})(\frac{3x'+4y'}{5}) + 9(\frac{3x'+4y'}{5})^2 โ 125(\frac{4x'-3y'}{5}) = 0$
<br> 11) $16(16x'^2-24x'y'+9y'^2) + 24(12x'^2+7x'y'-12y'^2) + 9(9x'^2+24x'y'+16y'^2) โ 625(4x'-3y') = 0$
<br> 12) $256x'^2-384x'y'+144y'^2 + 288x'^2+168x'y'-288y'^2 + 81x'^2+216x'y'+144y'^2 -2500x'-1875y' = 0$
<br> 13) $760x'^2-2500x'-1875y' = 0$
<br> 14) $152x'^2-500x'-375y' = 0$
%%markdown
For the following exercises, identify the conic with focus at the origin, and then give the directrix and eccentricity.
%%markdown
22) $r = \frac{3}{2-sin\theta}$
<br> ===== <br>
<br> 1) $e = \pm\frac{1}{2}$ ; since eccentricity $0<e<1$ the conic section is en elipse
<br> 2) $ep = 3$
<br> 3) $p\cdot\frac{1}{2} = \frac{3}{2}$
<br> 4) $p = 6$ The directix is $p=\pm3$
%%markdown
23) $r = \frac{5}{4+6cos\theta}$
<br> $r = \frac{\frac{5}{4}}{1+\frac{6}{4}cos\theta}$
<br> ===== <br>
<br> 1) $e = \pm\frac{3}{2}$ ; since eccentricity $e>1$ the conic section is a hyperbola
<br> 2) $e\cdot p = \frac{5}{4}$
<br> 3) $p\cdot\frac{3}{2} = \frac{5}{4}$
<br> 4) $p = \frac{5}{6}$
%%markdown
For the following exercises, graph the given conic section. If it is a parabola, label vertex, focus, and directrix. If it is
an ellipse or a hyperbola, label vertices and foci.
%%markdown
24) $r = \frac{12}{4-8sin\theta}$
%%markdown
25) $r = \frac{2}{4+4sin\theta}$
%%markdown
26. Find a polar equation of the conic with focus at the
origin, eccentricity of e = 2, and directrix: x = 3.
<br>=====<br>
<br> 1) $f: (0, 0)) \text{ ; } e= \pm2 \text{ ; directix: }x=3$
<br> 2) $r=\frac{p\cdot e}{1-e\cdot sin\theta}$
<br> 3) $r=\frac{6}{1-2sin\theta}$
| 0.609989 | 0.982574 |
# Matrix Factorization via Singular Value Decomposition
Matrix factorization is the breaking down of one matrix in a product of multiple matrices. It's extremely well studied in mathematics, and it's highly useful. There are many different ways to factor matrices, but singular value decomposition is particularly useful for making recommendations.
So what is singular value decomposition (SVD)? At a high level, SVD is an algorithm that decomposes a matrix $R$ into the best lower rank (i.e. smaller/simpler) approximation of the original matrix $R$. Mathematically, it decomposes R into a two unitary matrices and a diagonal matrix:
$$\begin{equation}
R = U\Sigma V^{T}
\end{equation}$$
where R is users's ratings matrix, $U$ is the user "features" matrix, $\Sigma$ is the diagonal matrix of singular values (essentially weights), and $V^{T}$ is the movie "features" matrix. $U$ and $V^{T}$ are orthogonal, and represent different things. $U$ represents how much users "like" each feature and $V^{T}$ represents how relevant each feature is to each movie.
To get the lower rank approximation, we take these matrices and keep only the top $k$ features, which we think of as the underlying tastes and preferences vectors.
```
import pandas as pd
import numpy as np
r_cols = ['user_id', 'movie_id', 'rating']
m_cols = ['movie_id', 'title']
ratings_df = pd.read_csv('u.data',sep='\t', names=r_cols, usecols = range(3), dtype = int)
movies_df = pd.read_csv('u.item', sep='|', names=m_cols, usecols=range(2))
movies_df['movie_id'] = movies_df['movie_id'].apply(pd.to_numeric)
movies_df.head(3)
ratings_df.head(3)
```
These look good, but I want the format of my ratings matrix to be one row per user and one column per movie. I'll `pivot` `ratings_df` to get that and call the new variable `R`.
```
R_df = ratings_df.pivot(index = 'user_id', columns ='movie_id', values = 'rating').fillna(0)
R_df.head()
```
The last thing I need to do is de-mean the data (normalize by each users mean) and convert it from a dataframe to a numpy array.
```
R = R_df.as_matrix()
user_ratings_mean = np.mean(R, axis = 1)
R_demeaned = R - user_ratings_mean.reshape(-1, 1)
```
# Singular Value Decomposition
Scipy and Numpy both have functions to do the singular value decomposition. I'm going to use the Scipy function `svds` because it let's me choose how many latent factors I want to use to approximate the original ratings matrix (instead of having to truncate it after).
```
from scipy.sparse.linalg import svds
U, sigma, Vt = svds(R_demeaned, k = 50)
```
Done. The function returns exactly what I detailed earlier in this post, except that the $\Sigma$ returned is just the values instead of a diagonal matrix. This is useful, but since I'm going to leverage matrix multiplication to get predictions I'll convert it to the diagonal matrix form.
```
sigma = np.diag(sigma)
```
# Making Predictions from the Decomposed Matrices
I now have everything I need to make movie ratings predictions for every user. I can do it all at once by following the math and matrix multiply $U$, $\Sigma$, and $V^{T}$ back to get the rank $k=50$ approximation of $R$.
I also need to add the user means back to get the actual star ratings prediction.
```
all_user_predicted_ratings = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)
preds_df = pd.DataFrame(all_user_predicted_ratings, columns = R_df.columns)
preds_df.head()
def recommend_movies(predictions_df, userID, movies_df, original_ratings_df, num_recommendations):
# Get and sort the user's predictions
user_row_number = userID - 1 # UserID starts at 1, not 0
sorted_user_predictions = preds_df.iloc[user_row_number].sort_values(ascending=False) # UserID starts at 1
# Get the user's data and merge in the movie information.
user_data = original_ratings_df[original_ratings_df.user_id == (userID)]
user_full = (user_data.merge(movies_df, how = 'left', left_on = 'movie_id', right_on = 'movie_id').
sort_values(['rating'], ascending=False)
)
print('User {0} has already rated {1} movies.'.format(userID, user_full.shape[0]))
print('Recommending highest {0} predicted ratings movies not already rated.'.format(num_recommendations))
# Recommend the highest predicted rating movies that the user hasn't seen yet.
recommendations = (movies_df[~movies_df['movie_id'].isin(user_full['movie_id'])].
merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left',
left_on = 'movie_id',
right_on = 'movie_id').
rename(columns = {user_row_number: 'Predictions'}).
sort_values('Predictions', ascending = False).
iloc[:num_recommendations, :-1]
)
return user_full, recommendations
already_rated, predictions = recommend_movies(preds_df,276, movies_df, ratings_df, 10)
predictions
already_rated.head(10)
```
# Conclusion
We've seen that we can make good recommendations with raw data based collaborative filtering methods (neighborhood models) and latent features from low-rank matrix factorization methods (factorization models).
Low-dimensional matrix recommenders try to capture the underlying features driving the raw data (which we understand as tastes and preferences). From a theoretical perspective, if we want to make recommendations based on people's tastes, this seems like the better approach. This technique also scales **significantly** better to larger datasets.
However, we still likely lose some meaningful signals by using a lower-rank matrix. And though these factorization based techniques work extremely well, there's research being done on new methods. These efforts have resulted in various types probabilistic matrix factorization (which works and scales even better) and many other approaches.
|
github_jupyter
|
import pandas as pd
import numpy as np
r_cols = ['user_id', 'movie_id', 'rating']
m_cols = ['movie_id', 'title']
ratings_df = pd.read_csv('u.data',sep='\t', names=r_cols, usecols = range(3), dtype = int)
movies_df = pd.read_csv('u.item', sep='|', names=m_cols, usecols=range(2))
movies_df['movie_id'] = movies_df['movie_id'].apply(pd.to_numeric)
movies_df.head(3)
ratings_df.head(3)
R_df = ratings_df.pivot(index = 'user_id', columns ='movie_id', values = 'rating').fillna(0)
R_df.head()
R = R_df.as_matrix()
user_ratings_mean = np.mean(R, axis = 1)
R_demeaned = R - user_ratings_mean.reshape(-1, 1)
from scipy.sparse.linalg import svds
U, sigma, Vt = svds(R_demeaned, k = 50)
sigma = np.diag(sigma)
all_user_predicted_ratings = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)
preds_df = pd.DataFrame(all_user_predicted_ratings, columns = R_df.columns)
preds_df.head()
def recommend_movies(predictions_df, userID, movies_df, original_ratings_df, num_recommendations):
# Get and sort the user's predictions
user_row_number = userID - 1 # UserID starts at 1, not 0
sorted_user_predictions = preds_df.iloc[user_row_number].sort_values(ascending=False) # UserID starts at 1
# Get the user's data and merge in the movie information.
user_data = original_ratings_df[original_ratings_df.user_id == (userID)]
user_full = (user_data.merge(movies_df, how = 'left', left_on = 'movie_id', right_on = 'movie_id').
sort_values(['rating'], ascending=False)
)
print('User {0} has already rated {1} movies.'.format(userID, user_full.shape[0]))
print('Recommending highest {0} predicted ratings movies not already rated.'.format(num_recommendations))
# Recommend the highest predicted rating movies that the user hasn't seen yet.
recommendations = (movies_df[~movies_df['movie_id'].isin(user_full['movie_id'])].
merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left',
left_on = 'movie_id',
right_on = 'movie_id').
rename(columns = {user_row_number: 'Predictions'}).
sort_values('Predictions', ascending = False).
iloc[:num_recommendations, :-1]
)
return user_full, recommendations
already_rated, predictions = recommend_movies(preds_df,276, movies_df, ratings_df, 10)
predictions
already_rated.head(10)
| 0.456168 | 0.978198 |
# Multiclass classification with Amazon SageMaker XGBoost algorithm
_**Single machine and distributed training for multiclass classification with Amazon SageMaker XGBoost algorithm**_
---
---
## Contents
1. [Introduction](#Introduction)
2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)
1. [Permissions and environment variables](#Permissions-and-environment-variables)
2. [Data ingestion](#Data-ingestion)
3. [Data conversion](#Data-conversion)
3. [Training the XGBoost model](#Training-the-XGBoost-model)
1. [Training on a single instance](#Training-on-a-single-instance)
2. [Training on multiple instances](#Training-on-multiple-instances)
4. [Set up hosting for the model](#Set-up-hosting-for-the-model)
1. [Import model into hosting](#Import-model-into-hosting)
2. [Create endpoint configuration](#Create-endpoint-configuration)
3. [Create endpoint](#Create-endpoint)
5. [Validate the model for use](#Validate-the-model-for-use)
---
## Introduction
This notebook demonstrates the use of Amazon SageMakerโs implementation of the XGBoost algorithm to train and host a multiclass classification model. The MNIST dataset is used for training. It has a training set of 60,000 examples and a test set of 10,000 examples. To illustrate the use of libsvm training data format, we download the dataset and convert it to the libsvm format before training.
To get started, we need to set up the environment with a few prerequisites for permissions and configurations.
---
## Prequisites and Preprocessing
### Permissions and environment variables
Here we set up the linkage and authentication to AWS services.
1. The roles used to give learning and hosting access to your data. See the documentation for how to specify these.
2. The S3 bucket that you want to use for training and model data.
```
%%time
import os
import boto3
import re
import copy
import time
from time import gmtime, strftime
from sagemaker import get_execution_role
role = get_execution_role()
region = boto3.Session().region_name
bucket='<bucket-name>' # put your s3 bucket name here, and create s3 bucket
prefix = 'sagemaker/DEMO-xgboost-multiclass-classification'
# customize to your bucket where you have stored the data
bucket_path = 'https://s3-{}.amazonaws.com/{}'.format(region,bucket)
```
### Data ingestion
Next, we read the dataset from the existing repository into memory, for preprocessing prior to training. This processing could be done *in situ* by Amazon Athena, Apache Spark in Amazon EMR, Amazon Redshift, etc., assuming the dataset is present in the appropriate location. Then, the next step would be to transfer the data to S3 for use in training. For small datasets, such as this one, reading into memory isn't onerous, though it would be for larger datasets.
```
%%time
import pickle, gzip, numpy, urllib.request, json
# Load the dataset
urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz")
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
```
### Data conversion
Since algorithms have particular input and output requirements, converting the dataset is also part of the process that a data scientist goes through prior to initiating training. In this particular case, the data is converted from pickle-ized numpy array to the libsvm format before being uploaded to S3. The hosted implementation of xgboost consumes the libsvm converted data from S3 for training. The following provides functions for data conversions and file upload to S3 and download from S3.
```
%%time
import struct
import io
import boto3
def to_libsvm(f, labels, values):
f.write(bytes('\n'.join(
['{} {}'.format(label, ' '.join(['{}:{}'.format(i + 1, el) for i, el in enumerate(vec)])) for label, vec in
zip(labels, values)]), 'utf-8'))
return f
def write_to_s3(fobj, bucket, key):
return boto3.Session(region_name=region).resource('s3').Bucket(bucket).Object(key).upload_fileobj(fobj)
def get_dataset():
import pickle
import gzip
with gzip.open('mnist.pkl.gz', 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
def upload_to_s3(partition_name, partition):
labels = [t.tolist() for t in partition[1]]
vectors = [t.tolist() for t in partition[0]]
num_partition = 5 # partition file into 5 parts
partition_bound = int(len(labels)/num_partition)
for i in range(num_partition):
f = io.BytesIO()
to_libsvm(f, labels[i*partition_bound:(i+1)*partition_bound], vectors[i*partition_bound:(i+1)*partition_bound])
f.seek(0)
key = "{}/{}/examples{}".format(prefix,partition_name,str(i))
url = 's3n://{}/{}'.format(bucket, key)
print('Writing to {}'.format(url))
write_to_s3(f, bucket, key)
print('Done writing to {}'.format(url))
def download_from_s3(partition_name, number, filename):
key = "{}/{}/examples{}".format(prefix,partition_name, number)
url = 's3n://{}/{}'.format(bucket, key)
print('Reading from {}'.format(url))
s3 = boto3.resource('s3', region_name = region)
s3.Bucket(bucket).download_file(key, filename)
try:
s3.Bucket(bucket).download_file(key, 'mnist.local.test')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print('The object does not exist at {}.'.format(url))
else:
raise
def convert_data():
train_set, valid_set, test_set = get_dataset()
partitions = [('train', train_set), ('validation', valid_set), ('test', test_set)]
for partition_name, partition in partitions:
print('{}: {} {}'.format(partition_name, partition[0].shape, partition[1].shape))
upload_to_s3(partition_name, partition)
%%time
convert_data()
```
## Training the XGBoost model
Now that we have our data in S3, we can begin training. We'll use Amazon SageMaker XGboost algorithm, and will actually fit two models in order to demonstrate the single machine and distributed training on SageMaker. In the first job, we'll use a single machine to train. In the second job, we'll use two machines and use the ShardedByS3Key mode for the train channel. Since we have 5 part file, one machine will train on three and the other on two part files. Note that the number of instances should not exceed the number of part files.
First let's setup a list of training parameters which are common across the two jobs.
```
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(region, 'xgboost')
#Ensure that the train and validation data folders generated above are reflected in the "InputDataConfig" parameter below.
common_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": bucket_path + "/"+ prefix + "/xgboost"
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m4.10xlarge",
"VolumeSizeInGB": 5
},
"HyperParameters": {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"silent":"0",
"objective": "multi:softmax",
"num_class": "10",
"num_round": "10"
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 86400
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix+ '/train/',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "libsvm",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix+ '/validation/',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "libsvm",
"CompressionType": "None"
}
]
}
```
Now we'll create two separate jobs, updating the parameters that are unique to each.
### Training on a single instance
```
#single machine job params
single_machine_job_name = 'DEMO-xgboost-classification' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Job name is:", single_machine_job_name)
single_machine_job_params = copy.deepcopy(common_training_params)
single_machine_job_params['TrainingJobName'] = single_machine_job_name
single_machine_job_params['OutputDataConfig']['S3OutputPath'] = bucket_path + "/"+ prefix + "/xgboost-single"
single_machine_job_params['ResourceConfig']['InstanceCount'] = 1
```
### Training on multiple instances
You can also run the training job distributed over multiple instances. For larger datasets with multiple partitions, this can significantly boost the training speed. Here we'll still use the small/toy MNIST dataset to demo this feature.
```
#distributed job params
distributed_job_name = 'DEMO-xgboost-distrib-classification' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Job name is:", distributed_job_name)
distributed_job_params = copy.deepcopy(common_training_params)
distributed_job_params['TrainingJobName'] = distributed_job_name
distributed_job_params['OutputDataConfig']['S3OutputPath'] = bucket_path + "/"+ prefix + "/xgboost-distributed"
#number of instances used for training
distributed_job_params['ResourceConfig']['InstanceCount'] = 2 # no more than 5 if there are total 5 partition files generated above
# data distribution type for train channel
distributed_job_params['InputDataConfig'][0]['DataSource']['S3DataSource']['S3DataDistributionType'] = 'ShardedByS3Key'
# data distribution type for validation channel
distributed_job_params['InputDataConfig'][1]['DataSource']['S3DataSource']['S3DataDistributionType'] = 'ShardedByS3Key'
```
Let's submit these jobs, taking note that the first will be submitted to run in the background so that we can immediately run the second in parallel.
```
%%time
sm = boto3.Session(region_name=region).client('sagemaker')
sm.create_training_job(**single_machine_job_params)
sm.create_training_job(**distributed_job_params)
status = sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus']
print(status)
sm.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=distributed_job_name)
status = sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus']
print("Training job ended with status: " + status)
if status == 'Failed':
message = sm.describe_training_job(TrainingJobName=distributed_job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Training job failed')
```
Let's confirm both jobs have finished.
```
print('Single Machine:', sm.describe_training_job(TrainingJobName=single_machine_job_name)['TrainingJobStatus'])
print('Distributed:', sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus'])
```
# Set up hosting for the model
In order to set up hosting, we have to import the model from training to hosting. The step below demonstrated hosting the model generated from the distributed training job. Same steps can be followed to host the model obtained from the single machine job.
### Import model into hosting
Next, you register the model with hosting. This allows you the flexibility of importing models trained elsewhere.
```
%%time
import boto3
from time import gmtime, strftime
model_name=distributed_job_name + '-mod'
print(model_name)
info = sm.describe_training_job(TrainingJobName=distributed_job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
create_model_response = sm.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
```
### Create endpoint configuration
SageMaker supports configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way. In addition, the endpoint configuration describes the instance type required for model deployment.
```
from time import gmtime, strftime
endpoint_config_name = 'DEMO-XGBoostEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_config_name)
create_endpoint_config_response = sm.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialVariantWeight':1,
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
```
### Create endpoint
Lastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
```
%%time
import time
endpoint_name = 'DEMO-XGBoostEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
create_endpoint_response = sm.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
print(create_endpoint_response['EndpointArn'])
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
while status=='Creating':
time.sleep(60)
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
print("Arn: " + resp['EndpointArn'])
print("Status: " + status)
```
## Validate the model for use
Finally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
```
runtime_client = boto3.client('runtime.sagemaker', region_name=region)
```
In order to evaluate the model, we'll use the test dataset previously generated. Let us first download the data from S3 to the local host.
```
download_from_s3('test', 0, 'mnist.local.test') # reading the first part file within test
```
Start with a single prediction. Lets use the first record from the test file.
```
!head -1 mnist.local.test > mnist.single.test
%%time
import json
file_name = 'mnist.single.test' #customize to your test file 'mnist.single.test' if use the data above
with open(file_name, 'r') as f:
payload = f.read()
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType='text/x-libsvm',
Body=payload)
result = response['Body'].read().decode('ascii')
print('Predicted label is {}.'.format(result))
```
OK, a single prediction works.
Let's do a whole batch and see how good is the predictions accuracy.
```
import sys
def do_predict(data, endpoint_name, content_type):
payload = '\n'.join(data)
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType=content_type,
Body=payload)
result = response['Body'].read().decode('ascii')
preds = [float(num) for num in result.split(',')]
return preds
def batch_predict(data, batch_size, endpoint_name, content_type):
items = len(data)
arrs = []
for offset in range(0, items, batch_size):
arrs.extend(do_predict(data[offset:min(offset+batch_size, items)], endpoint_name, content_type))
sys.stdout.write('.')
return(arrs)
```
The following function helps us calculate the error rate on the batch dataset.
```
%%time
import json
file_name = 'mnist.local.test'
with open(file_name, 'r') as f:
payload = f.read().strip()
labels = [float(line.split(' ')[0]) for line in payload.split('\n')]
test_data = payload.split('\n')
preds = batch_predict(test_data, 100, endpoint_name, 'text/x-libsvm')
print ('\nerror rate=%f' % ( sum(1 for i in range(len(preds)) if preds[i]!=labels[i]) /float(len(preds))))
```
Here are a few predictions
```
preds[0:10]
```
and the corresponding labels
```
labels[0:10]
```
The following function helps us create the confusion matrix on the labeled batch test dataset.
```
import numpy
def error_rate(predictions, labels):
"""Return the error rate and confusions."""
correct = numpy.sum(predictions == labels)
total = predictions.shape[0]
error = 100.0 - (100 * float(correct) / float(total))
confusions = numpy.zeros([10, 10], numpy.int32)
bundled = zip(predictions, labels)
for predicted, actual in bundled:
confusions[int(predicted), int(actual)] += 1
return error, confusions
```
The following helps us visualize the erros that the XGBoost classifier is making.
```
import matplotlib.pyplot as plt
%matplotlib inline
NUM_LABELS = 10 # change it according to num_class in your dataset
test_error, confusions = error_rate(numpy.asarray(preds), numpy.asarray(labels))
print('Test error: %.1f%%' % test_error)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.grid(False)
plt.xticks(numpy.arange(NUM_LABELS))
plt.yticks(numpy.arange(NUM_LABELS))
plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest');
for i, cas in enumerate(confusions):
for j, count in enumerate(cas):
if count > 0:
xoff = .07 * len(str(count))
plt.text(j-xoff, i+.2, int(count), fontsize=9, color='white')
```
### Delete Endpoint
Once you are done using the endpoint, you can use the following to delete it.
```
sm.delete_endpoint(EndpointName=endpoint_name)
```
|
github_jupyter
|
%%time
import os
import boto3
import re
import copy
import time
from time import gmtime, strftime
from sagemaker import get_execution_role
role = get_execution_role()
region = boto3.Session().region_name
bucket='<bucket-name>' # put your s3 bucket name here, and create s3 bucket
prefix = 'sagemaker/DEMO-xgboost-multiclass-classification'
# customize to your bucket where you have stored the data
bucket_path = 'https://s3-{}.amazonaws.com/{}'.format(region,bucket)
%%time
import pickle, gzip, numpy, urllib.request, json
# Load the dataset
urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz")
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
%%time
import struct
import io
import boto3
def to_libsvm(f, labels, values):
f.write(bytes('\n'.join(
['{} {}'.format(label, ' '.join(['{}:{}'.format(i + 1, el) for i, el in enumerate(vec)])) for label, vec in
zip(labels, values)]), 'utf-8'))
return f
def write_to_s3(fobj, bucket, key):
return boto3.Session(region_name=region).resource('s3').Bucket(bucket).Object(key).upload_fileobj(fobj)
def get_dataset():
import pickle
import gzip
with gzip.open('mnist.pkl.gz', 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
def upload_to_s3(partition_name, partition):
labels = [t.tolist() for t in partition[1]]
vectors = [t.tolist() for t in partition[0]]
num_partition = 5 # partition file into 5 parts
partition_bound = int(len(labels)/num_partition)
for i in range(num_partition):
f = io.BytesIO()
to_libsvm(f, labels[i*partition_bound:(i+1)*partition_bound], vectors[i*partition_bound:(i+1)*partition_bound])
f.seek(0)
key = "{}/{}/examples{}".format(prefix,partition_name,str(i))
url = 's3n://{}/{}'.format(bucket, key)
print('Writing to {}'.format(url))
write_to_s3(f, bucket, key)
print('Done writing to {}'.format(url))
def download_from_s3(partition_name, number, filename):
key = "{}/{}/examples{}".format(prefix,partition_name, number)
url = 's3n://{}/{}'.format(bucket, key)
print('Reading from {}'.format(url))
s3 = boto3.resource('s3', region_name = region)
s3.Bucket(bucket).download_file(key, filename)
try:
s3.Bucket(bucket).download_file(key, 'mnist.local.test')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print('The object does not exist at {}.'.format(url))
else:
raise
def convert_data():
train_set, valid_set, test_set = get_dataset()
partitions = [('train', train_set), ('validation', valid_set), ('test', test_set)]
for partition_name, partition in partitions:
print('{}: {} {}'.format(partition_name, partition[0].shape, partition[1].shape))
upload_to_s3(partition_name, partition)
%%time
convert_data()
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(region, 'xgboost')
#Ensure that the train and validation data folders generated above are reflected in the "InputDataConfig" parameter below.
common_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": bucket_path + "/"+ prefix + "/xgboost"
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m4.10xlarge",
"VolumeSizeInGB": 5
},
"HyperParameters": {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"silent":"0",
"objective": "multi:softmax",
"num_class": "10",
"num_round": "10"
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 86400
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix+ '/train/',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "libsvm",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix+ '/validation/',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "libsvm",
"CompressionType": "None"
}
]
}
#single machine job params
single_machine_job_name = 'DEMO-xgboost-classification' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Job name is:", single_machine_job_name)
single_machine_job_params = copy.deepcopy(common_training_params)
single_machine_job_params['TrainingJobName'] = single_machine_job_name
single_machine_job_params['OutputDataConfig']['S3OutputPath'] = bucket_path + "/"+ prefix + "/xgboost-single"
single_machine_job_params['ResourceConfig']['InstanceCount'] = 1
#distributed job params
distributed_job_name = 'DEMO-xgboost-distrib-classification' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Job name is:", distributed_job_name)
distributed_job_params = copy.deepcopy(common_training_params)
distributed_job_params['TrainingJobName'] = distributed_job_name
distributed_job_params['OutputDataConfig']['S3OutputPath'] = bucket_path + "/"+ prefix + "/xgboost-distributed"
#number of instances used for training
distributed_job_params['ResourceConfig']['InstanceCount'] = 2 # no more than 5 if there are total 5 partition files generated above
# data distribution type for train channel
distributed_job_params['InputDataConfig'][0]['DataSource']['S3DataSource']['S3DataDistributionType'] = 'ShardedByS3Key'
# data distribution type for validation channel
distributed_job_params['InputDataConfig'][1]['DataSource']['S3DataSource']['S3DataDistributionType'] = 'ShardedByS3Key'
%%time
sm = boto3.Session(region_name=region).client('sagemaker')
sm.create_training_job(**single_machine_job_params)
sm.create_training_job(**distributed_job_params)
status = sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus']
print(status)
sm.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=distributed_job_name)
status = sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus']
print("Training job ended with status: " + status)
if status == 'Failed':
message = sm.describe_training_job(TrainingJobName=distributed_job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Training job failed')
print('Single Machine:', sm.describe_training_job(TrainingJobName=single_machine_job_name)['TrainingJobStatus'])
print('Distributed:', sm.describe_training_job(TrainingJobName=distributed_job_name)['TrainingJobStatus'])
%%time
import boto3
from time import gmtime, strftime
model_name=distributed_job_name + '-mod'
print(model_name)
info = sm.describe_training_job(TrainingJobName=distributed_job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
create_model_response = sm.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
from time import gmtime, strftime
endpoint_config_name = 'DEMO-XGBoostEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_config_name)
create_endpoint_config_response = sm.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialVariantWeight':1,
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
%%time
import time
endpoint_name = 'DEMO-XGBoostEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
create_endpoint_response = sm.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
print(create_endpoint_response['EndpointArn'])
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
while status=='Creating':
time.sleep(60)
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
print("Arn: " + resp['EndpointArn'])
print("Status: " + status)
runtime_client = boto3.client('runtime.sagemaker', region_name=region)
download_from_s3('test', 0, 'mnist.local.test') # reading the first part file within test
!head -1 mnist.local.test > mnist.single.test
%%time
import json
file_name = 'mnist.single.test' #customize to your test file 'mnist.single.test' if use the data above
with open(file_name, 'r') as f:
payload = f.read()
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType='text/x-libsvm',
Body=payload)
result = response['Body'].read().decode('ascii')
print('Predicted label is {}.'.format(result))
import sys
def do_predict(data, endpoint_name, content_type):
payload = '\n'.join(data)
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType=content_type,
Body=payload)
result = response['Body'].read().decode('ascii')
preds = [float(num) for num in result.split(',')]
return preds
def batch_predict(data, batch_size, endpoint_name, content_type):
items = len(data)
arrs = []
for offset in range(0, items, batch_size):
arrs.extend(do_predict(data[offset:min(offset+batch_size, items)], endpoint_name, content_type))
sys.stdout.write('.')
return(arrs)
%%time
import json
file_name = 'mnist.local.test'
with open(file_name, 'r') as f:
payload = f.read().strip()
labels = [float(line.split(' ')[0]) for line in payload.split('\n')]
test_data = payload.split('\n')
preds = batch_predict(test_data, 100, endpoint_name, 'text/x-libsvm')
print ('\nerror rate=%f' % ( sum(1 for i in range(len(preds)) if preds[i]!=labels[i]) /float(len(preds))))
preds[0:10]
labels[0:10]
import numpy
def error_rate(predictions, labels):
"""Return the error rate and confusions."""
correct = numpy.sum(predictions == labels)
total = predictions.shape[0]
error = 100.0 - (100 * float(correct) / float(total))
confusions = numpy.zeros([10, 10], numpy.int32)
bundled = zip(predictions, labels)
for predicted, actual in bundled:
confusions[int(predicted), int(actual)] += 1
return error, confusions
import matplotlib.pyplot as plt
%matplotlib inline
NUM_LABELS = 10 # change it according to num_class in your dataset
test_error, confusions = error_rate(numpy.asarray(preds), numpy.asarray(labels))
print('Test error: %.1f%%' % test_error)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.grid(False)
plt.xticks(numpy.arange(NUM_LABELS))
plt.yticks(numpy.arange(NUM_LABELS))
plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest');
for i, cas in enumerate(confusions):
for j, count in enumerate(cas):
if count > 0:
xoff = .07 * len(str(count))
plt.text(j-xoff, i+.2, int(count), fontsize=9, color='white')
sm.delete_endpoint(EndpointName=endpoint_name)
| 0.336113 | 0.975693 |
GAN (Generative Adversarial Network)
- https://github.com/taki0112/GAN-Tensorflow
- https://github.com/zalandoresearch/fashion-mnist
```
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
from skimage.io import imsave
import os
import shutil
import gzip
img_height = 28
img_width = 28
img_size = img_height * img_width
to_train = True
to_restore = False
output_path = "samples"
max_epoch = 500
h1_size = 150
h2_size = 300
z_size = 100
batch_size = 256
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte.gz'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte.gz'
% kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8,
offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(len(labels), 784)
return images, labels
# ์ ๋๋ ์ดํฐ (G)
def generator(z):
w1 = tf.Variable(tf.truncated_normal([z_size, h1_size], stddev=0.1), name = "g_w1", dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h1_size]), name = "g_b1", dtype=tf.float32)
h1 = tf.nn.relu(tf.matmul(z,w1)+b1)
w2 = tf.Variable(tf.truncated_normal([h1_size, h2_size], stddev=0.1), name = "g_w2", dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h2_size]), name = "g_b2", dtype=tf.float32)
h2 = tf.nn.relu(tf.matmul(h1,w2)+b2)
w3 = tf.Variable(tf.truncated_normal([h2_size, img_size], stddev=0.1), name = "g_w3", dtype=tf.float32)
b3 = tf.Variable(tf.zeros([img_size]), name = "g_b3", dtype=tf.float32)
h3 = tf.matmul(h2,w3) + b3
x_generated = tf.nn.tanh(h3)
g_params = [w1, b1, w2, b2, w3, b3]
return x_generated, g_params
# ๋์คํฌ๋ฆฌ๋ฏธ๋ค์ดํฐ (D)
def discriminator(x_data, x_generated, keep_prob):
x_in = tf.concat([x_data, x_generated],0)
w1 = tf.Variable(tf.truncated_normal([img_size, h2_size], stddev=0.1), name = "d_w1", dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h2_size]), name = "d_b1", dtype=tf.float32)
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_in,w1)+b1), keep_prob=keep_prob)
w2 = tf.Variable(tf.truncated_normal([h2_size, h1_size], stddev=0.1), name = "d_w2", dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h1_size]), name = "d_b2", dtype=tf.float32)
h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1,w2)+b2), keep_prob=keep_prob)
w3 = tf.Variable(tf.truncated_normal([h1_size, 1], stddev=0.1), name = "d_w3", dtype=tf.float32)
b3 = tf.Variable(tf.zeros([1]), name = "d_b3", dtype=tf.float32)
h3 = tf.matmul(h2,w3) + b3
y_data = tf.nn.sigmoid(tf.slice(h3, [0,0], [batch_size, -1], name=None))
y_generated = tf.nn.sigmoid(tf.slice(h3, [batch_size, 0], [-1,-1], name=None))
d_params = [w1, b1, w2, b2, w3, b3]
return y_data, y_generated, d_params
def show_result(batch_res, fname, grid_size=(8, 8), grid_pad=5):
batch_res = 0.5 * batch_res.reshape((batch_res.shape[0], img_height, img_width)) + 0.5
img_h, img_w = batch_res.shape[1], batch_res.shape[2]
grid_h = img_h * grid_size[0] + grid_pad * (grid_size[0] - 1)
grid_w = img_w * grid_size[1] + grid_pad * (grid_size[1] - 1)
img_grid = np.zeros((grid_h, grid_w), dtype=np.uint8)
for i, res in enumerate(batch_res):
if i >= grid_size[0] * grid_size[1]:
break
img = (res) * 255.
img = img.astype(np.uint8)
row = (i // grid_size[0]) * (img_h + grid_pad)
col = (i % grid_size[1]) * (img_w + grid_pad)
img_grid[row:row + img_h, col:col + img_w] = img
imsave(fname, img_grid)
def train():
global_step = tf.Variable(0, name="global_step", trainable=False)
train_x, train_y = load_mnist("fashion_mnist", kind='train')
size = train_x.shape[0]
x_data = tf.placeholder(tf.float32, [batch_size, img_size], name = "x_data")
z = tf.placeholder(tf.float32, [batch_size, z_size], name = 'z')
keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')
x_generated, g_params = generator(z)
y_data, y_generated, d_params = discriminator(x_data, x_generated, keep_prob)
d_loss = - (tf.log(y_data) + tf.log(1 - y_generated))
g_loss = - tf.log(y_generated)
optimizer = tf.train.AdamOptimizer(0.0001)
d_trainer = optimizer.minimize(d_loss, var_list=d_params)
g_trainer = optimizer.minimize(g_loss, var_list=g_params)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.Session()
sess.run(init)
if to_restore:
chkpt_fname = tf.train.latest_checkpoint(output_path)
saver.restore(sess, chkpt_fname)
else:
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
z_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
for i in range(sess.run(global_step), max_epoch):
if i % 50 == 0:
print("epoch:%s" % (i))
for j in range(21870 // batch_size):
batch_end = j * batch_size + batch_size
if batch_end >= size:
batch_end = size - 1
x_value = train_x[ j * batch_size : batch_end ]
x_value = x_value / 255.
x_value = 2 * x_value - 1
z_value = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
sess.run(d_trainer,
feed_dict={x_data: x_value, z: z_value, keep_prob: np.sum(0.7).astype(np.float32)})
if j % 1 == 0:
sess.run(g_trainer,
feed_dict={x_data: x_value, z: z_value, keep_prob: np.sum(0.7).astype(np.float32)})
x_gen_val = sess.run(x_generated, feed_dict={z: z_sample_val})
if i % 10 == 0 or i == max_epoch-1:
show_result(x_gen_val, os.path.join(output_path, "sample%s.jpg" % i))
sess.run(tf.assign(global_step, i + 1))
train()
```
|
github_jupyter
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
from skimage.io import imsave
import os
import shutil
import gzip
img_height = 28
img_width = 28
img_size = img_height * img_width
to_train = True
to_restore = False
output_path = "samples"
max_epoch = 500
h1_size = 150
h2_size = 300
z_size = 100
batch_size = 256
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte.gz'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte.gz'
% kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8,
offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(len(labels), 784)
return images, labels
# ์ ๋๋ ์ดํฐ (G)
def generator(z):
w1 = tf.Variable(tf.truncated_normal([z_size, h1_size], stddev=0.1), name = "g_w1", dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h1_size]), name = "g_b1", dtype=tf.float32)
h1 = tf.nn.relu(tf.matmul(z,w1)+b1)
w2 = tf.Variable(tf.truncated_normal([h1_size, h2_size], stddev=0.1), name = "g_w2", dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h2_size]), name = "g_b2", dtype=tf.float32)
h2 = tf.nn.relu(tf.matmul(h1,w2)+b2)
w3 = tf.Variable(tf.truncated_normal([h2_size, img_size], stddev=0.1), name = "g_w3", dtype=tf.float32)
b3 = tf.Variable(tf.zeros([img_size]), name = "g_b3", dtype=tf.float32)
h3 = tf.matmul(h2,w3) + b3
x_generated = tf.nn.tanh(h3)
g_params = [w1, b1, w2, b2, w3, b3]
return x_generated, g_params
# ๋์คํฌ๋ฆฌ๋ฏธ๋ค์ดํฐ (D)
def discriminator(x_data, x_generated, keep_prob):
x_in = tf.concat([x_data, x_generated],0)
w1 = tf.Variable(tf.truncated_normal([img_size, h2_size], stddev=0.1), name = "d_w1", dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h2_size]), name = "d_b1", dtype=tf.float32)
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_in,w1)+b1), keep_prob=keep_prob)
w2 = tf.Variable(tf.truncated_normal([h2_size, h1_size], stddev=0.1), name = "d_w2", dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h1_size]), name = "d_b2", dtype=tf.float32)
h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1,w2)+b2), keep_prob=keep_prob)
w3 = tf.Variable(tf.truncated_normal([h1_size, 1], stddev=0.1), name = "d_w3", dtype=tf.float32)
b3 = tf.Variable(tf.zeros([1]), name = "d_b3", dtype=tf.float32)
h3 = tf.matmul(h2,w3) + b3
y_data = tf.nn.sigmoid(tf.slice(h3, [0,0], [batch_size, -1], name=None))
y_generated = tf.nn.sigmoid(tf.slice(h3, [batch_size, 0], [-1,-1], name=None))
d_params = [w1, b1, w2, b2, w3, b3]
return y_data, y_generated, d_params
def show_result(batch_res, fname, grid_size=(8, 8), grid_pad=5):
batch_res = 0.5 * batch_res.reshape((batch_res.shape[0], img_height, img_width)) + 0.5
img_h, img_w = batch_res.shape[1], batch_res.shape[2]
grid_h = img_h * grid_size[0] + grid_pad * (grid_size[0] - 1)
grid_w = img_w * grid_size[1] + grid_pad * (grid_size[1] - 1)
img_grid = np.zeros((grid_h, grid_w), dtype=np.uint8)
for i, res in enumerate(batch_res):
if i >= grid_size[0] * grid_size[1]:
break
img = (res) * 255.
img = img.astype(np.uint8)
row = (i // grid_size[0]) * (img_h + grid_pad)
col = (i % grid_size[1]) * (img_w + grid_pad)
img_grid[row:row + img_h, col:col + img_w] = img
imsave(fname, img_grid)
def train():
global_step = tf.Variable(0, name="global_step", trainable=False)
train_x, train_y = load_mnist("fashion_mnist", kind='train')
size = train_x.shape[0]
x_data = tf.placeholder(tf.float32, [batch_size, img_size], name = "x_data")
z = tf.placeholder(tf.float32, [batch_size, z_size], name = 'z')
keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')
x_generated, g_params = generator(z)
y_data, y_generated, d_params = discriminator(x_data, x_generated, keep_prob)
d_loss = - (tf.log(y_data) + tf.log(1 - y_generated))
g_loss = - tf.log(y_generated)
optimizer = tf.train.AdamOptimizer(0.0001)
d_trainer = optimizer.minimize(d_loss, var_list=d_params)
g_trainer = optimizer.minimize(g_loss, var_list=g_params)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.Session()
sess.run(init)
if to_restore:
chkpt_fname = tf.train.latest_checkpoint(output_path)
saver.restore(sess, chkpt_fname)
else:
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
z_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
for i in range(sess.run(global_step), max_epoch):
if i % 50 == 0:
print("epoch:%s" % (i))
for j in range(21870 // batch_size):
batch_end = j * batch_size + batch_size
if batch_end >= size:
batch_end = size - 1
x_value = train_x[ j * batch_size : batch_end ]
x_value = x_value / 255.
x_value = 2 * x_value - 1
z_value = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
sess.run(d_trainer,
feed_dict={x_data: x_value, z: z_value, keep_prob: np.sum(0.7).astype(np.float32)})
if j % 1 == 0:
sess.run(g_trainer,
feed_dict={x_data: x_value, z: z_value, keep_prob: np.sum(0.7).astype(np.float32)})
x_gen_val = sess.run(x_generated, feed_dict={z: z_sample_val})
if i % 10 == 0 or i == max_epoch-1:
show_result(x_gen_val, os.path.join(output_path, "sample%s.jpg" % i))
sess.run(tf.assign(global_step, i + 1))
train()
| 0.707809 | 0.791861 |
# NAACL'21 DLG4NLP Tutorial Demo: Text Classification
In this tutorial demo, we will use the Graph4NLP library to build a GNN-based text classification model. The model consists of
- graph construction module (e.g., dependency based static graph)
- graph embedding module (e.g., Bi-Fuse GraphSAGE)
- predictoin module (e.g., graph pooling + MLP classifier)
We will use the built-in module APIs to build the model, and evaluate it on the TREC dataset.
### Environment setup
1. Create virtual environment
```
conda create --name graph4nlp python=3.7
conda activate graph4nlp
```
2. Install [graph4nlp](https://github.com/graph4ai/graph4nlp) library
- Clone the github repo
```
git clone -b stable https://github.com/graph4ai/graph4nlp.git
cd graph4nlp
```
- Then run `./configure` (or `./configure.bat` if you are using Windows 10) to config your installation. The configuration program will ask you to specify your CUDA version. If you do not have a GPU, please choose 'cpu'.
```
./configure
```
- Finally, install the package
```
python setup.py install
```
3. Set up StanfordCoreNLP (for static graph construction only, unnecessary for this demo because preprocessed data is provided)
- Download [StanfordCoreNLP](https://stanfordnlp.github.io/CoreNLP/)
- Go to the root folder and start the server
```
java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000
```
```
import os
import time
import datetime
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.backends.cudnn as cudnn
from graph4nlp.pytorch.datasets.trec import TrecDataset
from graph4nlp.pytorch.modules.graph_construction import *
from graph4nlp.pytorch.modules.graph_construction.embedding_construction import WordEmbedding
from graph4nlp.pytorch.modules.graph_embedding import *
from graph4nlp.pytorch.modules.prediction.classification.graph_classification import FeedForwardNN
from graph4nlp.pytorch.modules.evaluation.base import EvaluationMetricBase
from graph4nlp.pytorch.modules.evaluation.accuracy import Accuracy
from graph4nlp.pytorch.modules.utils.generic_utils import EarlyStopping
from graph4nlp.pytorch.modules.loss.general_loss import GeneralLoss
from graph4nlp.pytorch.modules.utils.logger import Logger
from graph4nlp.pytorch.modules.utils import constants as Constants
class TextClassifier(nn.Module):
def __init__(self, vocab, config):
super(TextClassifier, self).__init__()
self.config = config
self.vocab = vocab
embedding_style = {'single_token_item': True if config['graph_type'] != 'ie' else False,
'emb_strategy': config.get('emb_strategy', 'w2v_bilstm'),
'num_rnn_layers': 1,
'bert_model_name': config.get('bert_model_name', 'bert-base-uncased'),
'bert_lower_case': True
}
assert not (config['graph_type'] in ('node_emb', 'node_emb_refined') and config['gnn'] == 'gat'), \
'dynamic graph construction does not support GAT'
use_edge_weight = False
if config['graph_type'] == 'dependency':
self.graph_topology = DependencyBasedGraphConstruction(
embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=config['num_hidden'],
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False))
elif config['graph_type'] == 'constituency':
self.graph_topology = ConstituencyBasedGraphConstruction(
embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=config['num_hidden'],
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False))
elif config['graph_type'] == 'ie':
self.graph_topology = IEBasedGraphConstruction(
embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=config['num_hidden'],
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False))
elif config['graph_type'] == 'node_emb':
self.graph_topology = NodeEmbeddingBasedGraphConstruction(
vocab.in_word_vocab,
embedding_style,
sim_metric_type=config['gl_metric_type'],
num_heads=config['gl_num_heads'],
top_k_neigh=config['gl_top_k'],
epsilon_neigh=config['gl_epsilon'],
smoothness_ratio=config['gl_smoothness_ratio'],
connectivity_ratio=config['gl_connectivity_ratio'],
sparsity_ratio=config['gl_sparsity_ratio'],
input_size=config['num_hidden'],
hidden_size=config['gl_num_hidden'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False),
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'])
use_edge_weight = True
elif config['graph_type'] == 'node_emb_refined':
self.graph_topology = NodeEmbeddingBasedRefinedGraphConstruction(
vocab.in_word_vocab,
embedding_style,
config['init_adj_alpha'],
sim_metric_type=config['gl_metric_type'],
num_heads=config['gl_num_heads'],
top_k_neigh=config['gl_top_k'],
epsilon_neigh=config['gl_epsilon'],
smoothness_ratio=config['gl_smoothness_ratio'],
connectivity_ratio=config['gl_connectivity_ratio'],
sparsity_ratio=config['gl_sparsity_ratio'],
input_size=config['num_hidden'],
hidden_size=config['gl_num_hidden'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False),
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'])
use_edge_weight = True
else:
raise RuntimeError('Unknown graph_type: {}'.format(config['graph_type']))
if 'w2v' in self.graph_topology.embedding_layer.word_emb_layers:
self.word_emb = self.graph_topology.embedding_layer.word_emb_layers['w2v'].word_emb_layer
else:
self.word_emb = WordEmbedding(
self.vocab.in_word_vocab.embeddings.shape[0],
self.vocab.in_word_vocab.embeddings.shape[1],
pretrained_word_emb=self.vocab.in_word_vocab.embeddings,
fix_emb=not config['no_fix_word_emb'],
device=config['device']).word_emb_layer
if config['gnn'] == 'gat':
heads = [config['gat_num_heads']] * (config['gnn_num_layers'] - 1) + [config['gat_num_out_heads']]
self.gnn = GAT(config['gnn_num_layers'],
config['num_hidden'],
config['num_hidden'],
config['num_hidden'],
heads,
direction_option=config['gnn_direction_option'],
feat_drop=config['gnn_dropout'],
attn_drop=config['gat_attn_dropout'],
negative_slope=config['gat_negative_slope'],
residual=config['gat_residual'],
activation=F.elu)
elif config['gnn'] == 'graphsage':
self.gnn = GraphSAGE(config['gnn_num_layers'],
config['num_hidden'],
config['num_hidden'],
config['num_hidden'],
config['graphsage_aggreagte_type'],
direction_option=config['gnn_direction_option'],
feat_drop=config['gnn_dropout'],
bias=True,
norm=None,
activation=F.relu,
use_edge_weight=use_edge_weight)
elif config['gnn'] == 'ggnn':
self.gnn = GGNN(config['gnn_num_layers'],
config['num_hidden'],
config['num_hidden'],
config['num_hidden'],
feat_drop=config['gnn_dropout'],
direction_option=config['gnn_direction_option'],
bias=True,
use_edge_weight=use_edge_weight)
else:
raise RuntimeError('Unknown gnn type: {}'.format(config['gnn']))
self.clf = FeedForwardNN(2 * config['num_hidden'] \
if config['gnn_direction_option'] == 'bi_sep' \
else config['num_hidden'],
config['num_classes'],
[config['num_hidden']],
graph_pool_type=config['graph_pooling'],
dim=config['num_hidden'],
use_linear_proj=config['max_pool_linear_proj'])
self.loss = GeneralLoss('CrossEntropy')
def forward(self, graph_list, tgt=None, require_loss=True):
# build graph topology
batch_gd = self.graph_topology(graph_list)
# run GNN encoder
self.gnn(batch_gd)
# run graph classifier
self.clf(batch_gd)
logits = batch_gd.graph_attributes['logits']
if require_loss:
loss = self.loss(logits, tgt)
return logits, loss
else:
return logits
class ModelHandler:
def __init__(self, config):
super(ModelHandler, self).__init__()
self.config = config
self.logger = Logger(self.config['out_dir'], config={k:v for k, v in self.config.items() if k != 'device'}, overwrite=True)
self.logger.write(self.config['out_dir'])
self._build_device()
self._build_dataloader()
self._build_model()
self._build_optimizer()
self._build_evaluation()
def _build_device(self):
if not self.config['no_cuda'] and torch.cuda.is_available():
print('[ Using CUDA ]')
self.config['device'] = torch.device('cuda' if self.config['gpu'] < 0 else 'cuda:%d' % self.config['gpu'])
torch.cuda.manual_seed(self.config['seed'])
torch.cuda.manual_seed_all(self.config['seed'])
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
else:
self.config['device'] = torch.device('cpu')
def _build_dataloader(self):
dynamic_init_topology_builder = None
if self.config['graph_type'] == 'dependency':
topology_builder = DependencyBasedGraphConstruction
graph_type = 'static'
merge_strategy = 'tailhead'
elif self.config['graph_type'] == 'constituency':
topology_builder = ConstituencyBasedGraphConstruction
graph_type = 'static'
merge_strategy = 'tailhead'
elif self.config['graph_type'] == 'ie':
topology_builder = IEBasedGraphConstruction
graph_type = 'static'
merge_strategy = 'global'
elif self.config['graph_type'] == 'node_emb':
topology_builder = NodeEmbeddingBasedGraphConstruction
graph_type = 'dynamic'
merge_strategy = None
elif self.config['graph_type'] == 'node_emb_refined':
topology_builder = NodeEmbeddingBasedRefinedGraphConstruction
graph_type = 'dynamic'
merge_strategy = 'tailhead'
if self.config['init_graph_type'] == 'line':
dynamic_init_topology_builder = None
elif self.config['init_graph_type'] == 'dependency':
dynamic_init_topology_builder = DependencyBasedGraphConstruction
elif self.config['init_graph_type'] == 'constituency':
dynamic_init_topology_builder = ConstituencyBasedGraphConstruction
elif self.config['init_graph_type'] == 'ie':
merge_strategy = 'global'
dynamic_init_topology_builder = IEBasedGraphConstruction
else:
raise RuntimeError('Define your own dynamic_init_topology_builder')
else:
raise RuntimeError('Unknown graph_type: {}'.format(self.config['graph_type']))
topology_subdir = '{}_graph'.format(self.config['graph_type'])
if self.config['graph_type'] == 'node_emb_refined':
topology_subdir += '_{}'.format(self.config['init_graph_type'])
dataset = TrecDataset(root_dir=self.config.get('root_dir', self.config['root_data_dir']),
pretrained_word_emb_name=self.config.get('pretrained_word_emb_name', "840B"),
merge_strategy=merge_strategy,
seed=self.config['seed'],
thread_number=4,
port=9000,
timeout=15000,
word_emb_size=300,
graph_type=graph_type,
topology_builder=topology_builder,
topology_subdir=topology_subdir,
dynamic_graph_type=self.config['graph_type'] if \
self.config['graph_type'] in ('node_emb', 'node_emb_refined') else None,
dynamic_init_topology_builder=dynamic_init_topology_builder,
dynamic_init_topology_aux_args={'dummy_param': 0})
self.train_dataloader = DataLoader(dataset.train, batch_size=self.config['batch_size'], shuffle=True,
num_workers=self.config['num_workers'],
collate_fn=dataset.collate_fn)
if hasattr(dataset, 'val')==False:
dataset.val = dataset.test
self.val_dataloader = DataLoader(dataset.val, batch_size=self.config['batch_size'], shuffle=False,
num_workers=self.config['num_workers'],
collate_fn=dataset.collate_fn)
self.test_dataloader = DataLoader(dataset.test, batch_size=self.config['batch_size'], shuffle=False,
num_workers=self.config['num_workers'],
collate_fn=dataset.collate_fn)
self.vocab = dataset.vocab_model
self.config['num_classes'] = dataset.num_classes
self.num_train = len(dataset.train)
self.num_val = len(dataset.val)
self.num_test = len(dataset.test)
print('Train size: {}, Val size: {}, Test size: {}'
.format(self.num_train, self.num_val, self.num_test))
self.logger.write('Train size: {}, Val size: {}, Test size: {}'
.format(self.num_train, self.num_val, self.num_test))
def _build_model(self):
self.model = TextClassifier(self.vocab, self.config).to(self.config['device'])
def _build_optimizer(self):
parameters = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = optim.Adam(parameters, lr=self.config['lr'])
self.stopper = EarlyStopping(os.path.join(self.config['out_dir'], Constants._SAVED_WEIGHTS_FILE), patience=self.config['patience'])
self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=self.config['lr_reduce_factor'], \
patience=self.config['lr_patience'], verbose=True)
def _build_evaluation(self):
self.metric = Accuracy(['accuracy'])
def train(self):
dur = []
for epoch in range(self.config['epochs']):
self.model.train()
train_loss = []
train_acc = []
t0 = time.time()
for i, data in enumerate(self.train_dataloader):
tgt = data['tgt_tensor'].to(self.config['device'])
data['graph_data'] = data['graph_data'].to(self.config['device'])
logits, loss = self.model(data['graph_data'], tgt, require_loss=True)
# add graph regularization loss if available
if data['graph_data'].graph_attributes.get('graph_reg', None) is not None:
loss = loss + data['graph_data'].graph_attributes['graph_reg']
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss.append(loss.item())
pred = torch.max(logits, dim=-1)[1].cpu()
train_acc.append(self.metric.calculate_scores(ground_truth=tgt.cpu(), predict=pred.cpu(), zero_division=0)[0])
dur.append(time.time() - t0)
val_acc = self.evaluate(self.val_dataloader)
self.scheduler.step(val_acc)
print('Epoch: [{} / {}] | Time: {:.2f}s | Loss: {:.4f} | Train Acc: {:.4f} | Val Acc: {:.4f}'.
format(epoch + 1, self.config['epochs'], np.mean(dur), np.mean(train_loss), np.mean(train_acc), val_acc))
self.logger.write('Epoch: [{} / {}] | Time: {:.2f}s | Loss: {:.4f} | Train Acc: {:.4f} | Val Acc: {:.4f}'.
format(epoch + 1, self.config['epochs'], np.mean(dur), np.mean(train_loss), np.mean(train_acc), val_acc))
if self.stopper.step(val_acc, self.model):
break
return self.stopper.best_score
def evaluate(self, dataloader):
self.model.eval()
with torch.no_grad():
pred_collect = []
gt_collect = []
for i, data in enumerate(dataloader):
tgt = data['tgt_tensor'].to(self.config['device'])
data['graph_data'] = data['graph_data'].to(self.config["device"])
logits = self.model(data['graph_data'], require_loss=False)
pred_collect.append(logits)
gt_collect.append(tgt)
pred_collect = torch.max(torch.cat(pred_collect, 0), dim=-1)[1].cpu()
gt_collect = torch.cat(gt_collect, 0).cpu()
score = self.metric.calculate_scores(ground_truth=gt_collect, predict=pred_collect, zero_division=0)[0]
return score
def test(self):
# restored best saved model
self.stopper.load_checkpoint(self.model)
t0 = time.time()
acc = self.evaluate(self.test_dataloader)
dur = time.time() - t0
print('Test examples: {} | Time: {:.2f}s | Test Acc: {:.4f}'.
format(self.num_test, dur, acc))
self.logger.write('Test examples: {} | Time: {:.2f}s | Test Acc: {:.4f}'.
format(self.num_test, dur, acc))
return acc
def print_config(config):
print('**************** MODEL CONFIGURATION ****************')
for key in sorted(config.keys()):
val = config[key]
keystr = '{}'.format(key) + (' ' * (24 - len(key)))
print('{} --> {}'.format(keystr, val))
print('**************** MODEL CONFIGURATION ****************')
# config setup
config_file = '../config/trec/graphsage_bi_fuse_static_dependency.yaml'
config = yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader)
print_config(config)
# run model
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
ts = datetime.datetime.now().timestamp()
config['out_dir'] += '_{}'.format(ts)
print('\n' + config['out_dir'])
runner = ModelHandler(config)
t0 = time.time()
val_acc = runner.train()
test_acc = runner.test()
runtime = time.time() - t0
print('Total runtime: {:.2f}s'.format(runtime))
runner.logger.write('Total runtime: {:.2f}s\n'.format(runtime))
runner.logger.close()
print('val acc: {}, test acc: {}'.format(val_acc, test_acc))
```
|
github_jupyter
|
conda create --name graph4nlp python=3.7
conda activate graph4nlp
git clone -b stable https://github.com/graph4ai/graph4nlp.git
cd graph4nlp
./configure
python setup.py install
java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000
import os
import time
import datetime
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.backends.cudnn as cudnn
from graph4nlp.pytorch.datasets.trec import TrecDataset
from graph4nlp.pytorch.modules.graph_construction import *
from graph4nlp.pytorch.modules.graph_construction.embedding_construction import WordEmbedding
from graph4nlp.pytorch.modules.graph_embedding import *
from graph4nlp.pytorch.modules.prediction.classification.graph_classification import FeedForwardNN
from graph4nlp.pytorch.modules.evaluation.base import EvaluationMetricBase
from graph4nlp.pytorch.modules.evaluation.accuracy import Accuracy
from graph4nlp.pytorch.modules.utils.generic_utils import EarlyStopping
from graph4nlp.pytorch.modules.loss.general_loss import GeneralLoss
from graph4nlp.pytorch.modules.utils.logger import Logger
from graph4nlp.pytorch.modules.utils import constants as Constants
class TextClassifier(nn.Module):
def __init__(self, vocab, config):
super(TextClassifier, self).__init__()
self.config = config
self.vocab = vocab
embedding_style = {'single_token_item': True if config['graph_type'] != 'ie' else False,
'emb_strategy': config.get('emb_strategy', 'w2v_bilstm'),
'num_rnn_layers': 1,
'bert_model_name': config.get('bert_model_name', 'bert-base-uncased'),
'bert_lower_case': True
}
assert not (config['graph_type'] in ('node_emb', 'node_emb_refined') and config['gnn'] == 'gat'), \
'dynamic graph construction does not support GAT'
use_edge_weight = False
if config['graph_type'] == 'dependency':
self.graph_topology = DependencyBasedGraphConstruction(
embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=config['num_hidden'],
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False))
elif config['graph_type'] == 'constituency':
self.graph_topology = ConstituencyBasedGraphConstruction(
embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=config['num_hidden'],
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False))
elif config['graph_type'] == 'ie':
self.graph_topology = IEBasedGraphConstruction(
embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=config['num_hidden'],
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False))
elif config['graph_type'] == 'node_emb':
self.graph_topology = NodeEmbeddingBasedGraphConstruction(
vocab.in_word_vocab,
embedding_style,
sim_metric_type=config['gl_metric_type'],
num_heads=config['gl_num_heads'],
top_k_neigh=config['gl_top_k'],
epsilon_neigh=config['gl_epsilon'],
smoothness_ratio=config['gl_smoothness_ratio'],
connectivity_ratio=config['gl_connectivity_ratio'],
sparsity_ratio=config['gl_sparsity_ratio'],
input_size=config['num_hidden'],
hidden_size=config['gl_num_hidden'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False),
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'])
use_edge_weight = True
elif config['graph_type'] == 'node_emb_refined':
self.graph_topology = NodeEmbeddingBasedRefinedGraphConstruction(
vocab.in_word_vocab,
embedding_style,
config['init_adj_alpha'],
sim_metric_type=config['gl_metric_type'],
num_heads=config['gl_num_heads'],
top_k_neigh=config['gl_top_k'],
epsilon_neigh=config['gl_epsilon'],
smoothness_ratio=config['gl_smoothness_ratio'],
connectivity_ratio=config['gl_connectivity_ratio'],
sparsity_ratio=config['gl_sparsity_ratio'],
input_size=config['num_hidden'],
hidden_size=config['gl_num_hidden'],
fix_word_emb=not config['no_fix_word_emb'],
fix_bert_emb=not config.get('no_fix_bert_emb', False),
word_dropout=config['word_dropout'],
rnn_dropout=config['rnn_dropout'])
use_edge_weight = True
else:
raise RuntimeError('Unknown graph_type: {}'.format(config['graph_type']))
if 'w2v' in self.graph_topology.embedding_layer.word_emb_layers:
self.word_emb = self.graph_topology.embedding_layer.word_emb_layers['w2v'].word_emb_layer
else:
self.word_emb = WordEmbedding(
self.vocab.in_word_vocab.embeddings.shape[0],
self.vocab.in_word_vocab.embeddings.shape[1],
pretrained_word_emb=self.vocab.in_word_vocab.embeddings,
fix_emb=not config['no_fix_word_emb'],
device=config['device']).word_emb_layer
if config['gnn'] == 'gat':
heads = [config['gat_num_heads']] * (config['gnn_num_layers'] - 1) + [config['gat_num_out_heads']]
self.gnn = GAT(config['gnn_num_layers'],
config['num_hidden'],
config['num_hidden'],
config['num_hidden'],
heads,
direction_option=config['gnn_direction_option'],
feat_drop=config['gnn_dropout'],
attn_drop=config['gat_attn_dropout'],
negative_slope=config['gat_negative_slope'],
residual=config['gat_residual'],
activation=F.elu)
elif config['gnn'] == 'graphsage':
self.gnn = GraphSAGE(config['gnn_num_layers'],
config['num_hidden'],
config['num_hidden'],
config['num_hidden'],
config['graphsage_aggreagte_type'],
direction_option=config['gnn_direction_option'],
feat_drop=config['gnn_dropout'],
bias=True,
norm=None,
activation=F.relu,
use_edge_weight=use_edge_weight)
elif config['gnn'] == 'ggnn':
self.gnn = GGNN(config['gnn_num_layers'],
config['num_hidden'],
config['num_hidden'],
config['num_hidden'],
feat_drop=config['gnn_dropout'],
direction_option=config['gnn_direction_option'],
bias=True,
use_edge_weight=use_edge_weight)
else:
raise RuntimeError('Unknown gnn type: {}'.format(config['gnn']))
self.clf = FeedForwardNN(2 * config['num_hidden'] \
if config['gnn_direction_option'] == 'bi_sep' \
else config['num_hidden'],
config['num_classes'],
[config['num_hidden']],
graph_pool_type=config['graph_pooling'],
dim=config['num_hidden'],
use_linear_proj=config['max_pool_linear_proj'])
self.loss = GeneralLoss('CrossEntropy')
def forward(self, graph_list, tgt=None, require_loss=True):
# build graph topology
batch_gd = self.graph_topology(graph_list)
# run GNN encoder
self.gnn(batch_gd)
# run graph classifier
self.clf(batch_gd)
logits = batch_gd.graph_attributes['logits']
if require_loss:
loss = self.loss(logits, tgt)
return logits, loss
else:
return logits
class ModelHandler:
def __init__(self, config):
super(ModelHandler, self).__init__()
self.config = config
self.logger = Logger(self.config['out_dir'], config={k:v for k, v in self.config.items() if k != 'device'}, overwrite=True)
self.logger.write(self.config['out_dir'])
self._build_device()
self._build_dataloader()
self._build_model()
self._build_optimizer()
self._build_evaluation()
def _build_device(self):
if not self.config['no_cuda'] and torch.cuda.is_available():
print('[ Using CUDA ]')
self.config['device'] = torch.device('cuda' if self.config['gpu'] < 0 else 'cuda:%d' % self.config['gpu'])
torch.cuda.manual_seed(self.config['seed'])
torch.cuda.manual_seed_all(self.config['seed'])
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
else:
self.config['device'] = torch.device('cpu')
def _build_dataloader(self):
dynamic_init_topology_builder = None
if self.config['graph_type'] == 'dependency':
topology_builder = DependencyBasedGraphConstruction
graph_type = 'static'
merge_strategy = 'tailhead'
elif self.config['graph_type'] == 'constituency':
topology_builder = ConstituencyBasedGraphConstruction
graph_type = 'static'
merge_strategy = 'tailhead'
elif self.config['graph_type'] == 'ie':
topology_builder = IEBasedGraphConstruction
graph_type = 'static'
merge_strategy = 'global'
elif self.config['graph_type'] == 'node_emb':
topology_builder = NodeEmbeddingBasedGraphConstruction
graph_type = 'dynamic'
merge_strategy = None
elif self.config['graph_type'] == 'node_emb_refined':
topology_builder = NodeEmbeddingBasedRefinedGraphConstruction
graph_type = 'dynamic'
merge_strategy = 'tailhead'
if self.config['init_graph_type'] == 'line':
dynamic_init_topology_builder = None
elif self.config['init_graph_type'] == 'dependency':
dynamic_init_topology_builder = DependencyBasedGraphConstruction
elif self.config['init_graph_type'] == 'constituency':
dynamic_init_topology_builder = ConstituencyBasedGraphConstruction
elif self.config['init_graph_type'] == 'ie':
merge_strategy = 'global'
dynamic_init_topology_builder = IEBasedGraphConstruction
else:
raise RuntimeError('Define your own dynamic_init_topology_builder')
else:
raise RuntimeError('Unknown graph_type: {}'.format(self.config['graph_type']))
topology_subdir = '{}_graph'.format(self.config['graph_type'])
if self.config['graph_type'] == 'node_emb_refined':
topology_subdir += '_{}'.format(self.config['init_graph_type'])
dataset = TrecDataset(root_dir=self.config.get('root_dir', self.config['root_data_dir']),
pretrained_word_emb_name=self.config.get('pretrained_word_emb_name', "840B"),
merge_strategy=merge_strategy,
seed=self.config['seed'],
thread_number=4,
port=9000,
timeout=15000,
word_emb_size=300,
graph_type=graph_type,
topology_builder=topology_builder,
topology_subdir=topology_subdir,
dynamic_graph_type=self.config['graph_type'] if \
self.config['graph_type'] in ('node_emb', 'node_emb_refined') else None,
dynamic_init_topology_builder=dynamic_init_topology_builder,
dynamic_init_topology_aux_args={'dummy_param': 0})
self.train_dataloader = DataLoader(dataset.train, batch_size=self.config['batch_size'], shuffle=True,
num_workers=self.config['num_workers'],
collate_fn=dataset.collate_fn)
if hasattr(dataset, 'val')==False:
dataset.val = dataset.test
self.val_dataloader = DataLoader(dataset.val, batch_size=self.config['batch_size'], shuffle=False,
num_workers=self.config['num_workers'],
collate_fn=dataset.collate_fn)
self.test_dataloader = DataLoader(dataset.test, batch_size=self.config['batch_size'], shuffle=False,
num_workers=self.config['num_workers'],
collate_fn=dataset.collate_fn)
self.vocab = dataset.vocab_model
self.config['num_classes'] = dataset.num_classes
self.num_train = len(dataset.train)
self.num_val = len(dataset.val)
self.num_test = len(dataset.test)
print('Train size: {}, Val size: {}, Test size: {}'
.format(self.num_train, self.num_val, self.num_test))
self.logger.write('Train size: {}, Val size: {}, Test size: {}'
.format(self.num_train, self.num_val, self.num_test))
def _build_model(self):
self.model = TextClassifier(self.vocab, self.config).to(self.config['device'])
def _build_optimizer(self):
parameters = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = optim.Adam(parameters, lr=self.config['lr'])
self.stopper = EarlyStopping(os.path.join(self.config['out_dir'], Constants._SAVED_WEIGHTS_FILE), patience=self.config['patience'])
self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=self.config['lr_reduce_factor'], \
patience=self.config['lr_patience'], verbose=True)
def _build_evaluation(self):
self.metric = Accuracy(['accuracy'])
def train(self):
dur = []
for epoch in range(self.config['epochs']):
self.model.train()
train_loss = []
train_acc = []
t0 = time.time()
for i, data in enumerate(self.train_dataloader):
tgt = data['tgt_tensor'].to(self.config['device'])
data['graph_data'] = data['graph_data'].to(self.config['device'])
logits, loss = self.model(data['graph_data'], tgt, require_loss=True)
# add graph regularization loss if available
if data['graph_data'].graph_attributes.get('graph_reg', None) is not None:
loss = loss + data['graph_data'].graph_attributes['graph_reg']
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss.append(loss.item())
pred = torch.max(logits, dim=-1)[1].cpu()
train_acc.append(self.metric.calculate_scores(ground_truth=tgt.cpu(), predict=pred.cpu(), zero_division=0)[0])
dur.append(time.time() - t0)
val_acc = self.evaluate(self.val_dataloader)
self.scheduler.step(val_acc)
print('Epoch: [{} / {}] | Time: {:.2f}s | Loss: {:.4f} | Train Acc: {:.4f} | Val Acc: {:.4f}'.
format(epoch + 1, self.config['epochs'], np.mean(dur), np.mean(train_loss), np.mean(train_acc), val_acc))
self.logger.write('Epoch: [{} / {}] | Time: {:.2f}s | Loss: {:.4f} | Train Acc: {:.4f} | Val Acc: {:.4f}'.
format(epoch + 1, self.config['epochs'], np.mean(dur), np.mean(train_loss), np.mean(train_acc), val_acc))
if self.stopper.step(val_acc, self.model):
break
return self.stopper.best_score
def evaluate(self, dataloader):
self.model.eval()
with torch.no_grad():
pred_collect = []
gt_collect = []
for i, data in enumerate(dataloader):
tgt = data['tgt_tensor'].to(self.config['device'])
data['graph_data'] = data['graph_data'].to(self.config["device"])
logits = self.model(data['graph_data'], require_loss=False)
pred_collect.append(logits)
gt_collect.append(tgt)
pred_collect = torch.max(torch.cat(pred_collect, 0), dim=-1)[1].cpu()
gt_collect = torch.cat(gt_collect, 0).cpu()
score = self.metric.calculate_scores(ground_truth=gt_collect, predict=pred_collect, zero_division=0)[0]
return score
def test(self):
# restored best saved model
self.stopper.load_checkpoint(self.model)
t0 = time.time()
acc = self.evaluate(self.test_dataloader)
dur = time.time() - t0
print('Test examples: {} | Time: {:.2f}s | Test Acc: {:.4f}'.
format(self.num_test, dur, acc))
self.logger.write('Test examples: {} | Time: {:.2f}s | Test Acc: {:.4f}'.
format(self.num_test, dur, acc))
return acc
def print_config(config):
print('**************** MODEL CONFIGURATION ****************')
for key in sorted(config.keys()):
val = config[key]
keystr = '{}'.format(key) + (' ' * (24 - len(key)))
print('{} --> {}'.format(keystr, val))
print('**************** MODEL CONFIGURATION ****************')
# config setup
config_file = '../config/trec/graphsage_bi_fuse_static_dependency.yaml'
config = yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader)
print_config(config)
# run model
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
ts = datetime.datetime.now().timestamp()
config['out_dir'] += '_{}'.format(ts)
print('\n' + config['out_dir'])
runner = ModelHandler(config)
t0 = time.time()
val_acc = runner.train()
test_acc = runner.test()
runtime = time.time() - t0
print('Total runtime: {:.2f}s'.format(runtime))
runner.logger.write('Total runtime: {:.2f}s\n'.format(runtime))
runner.logger.close()
print('val acc: {}, test acc: {}'.format(val_acc, test_acc))
| 0.803135 | 0.772788 |
## Hemoglobin and Transfusion Extraction for RDD Projects
## C.V. Cosgriff, MD, MPH
### MIT Critical Data
## 0 - Envrionement Setup
```
import numpy as np
import pandas as pd
import qgrid as qg
import psycopg2
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
dbname = 'eicu'
schema_name = 'eicu_crd'
query_schema = 'SET search_path TO ' + schema_name + ';'
con = psycopg2.connect(dbname=dbname)
```
## 1 - Extract Hemoglobin and Transfusion Events
```
hb_query = query_schema + '''
SELECT l.patientunitstayid, l.labresult AS hb, l.labresultoffset
FROM lab l
WHERE l.labname = 'Hgb'
ORDER BY l.patientunitstayid, l.labresultoffset ASC;
'''
hb_df = pd.read_sql_query(hb_query, con)
print(hb_df.shape)
hb_df.head()
```
Next, we grab all transfusions of pRBCs.
```
prbc_query = query_schema + '''
SELECT t.patientunitstayid, t.treatmentoffset
FROM treatment t
WHERE LOWER(treatmentstring) LIKE '%prbc%'
ORDER BY t.patientunitstayid, t.treatmentoffset ASC;
'''
prbc_df = pd.read_sql_query(prbc_query, con)
print(prbc_df.shape)
prbc_df.head()
```
How we combine these data is one of the fundamental challenges of this work.
## 2 - Defining the Exposure
In clinical practice hemoglobin values are checked in both a routine fashion, and when there is concern for an active bleed or other process that results in red cell consumption such as a hemolysis. This work will attempt to examine the often used threshold of $7g/dl$ that has become standard process since the TRICC trial; there are various populations that this cutoff doesn't apply to. Nevertheless, to address this we need an appropriate definition of _transfused_ that reflects if a transfusion decision occurred as a result of an index hemoglobin level.
For example if a hemoglobin of 7.4 is measured in the morning, and the plan is to repeat measurement at 3PM and transfuse if the value has dropped below 7 then we would expect to find a second hemoglobin level in the data. If that subsequent value is below 7 and there is a temporally associated transfusion then the transfusion goal of 7 was respected. The challenge therefore when extracting the data is which hemoglobin to extract for each patient. If we used the first, so in this example 7.4, and then detect a transfusion on the same day in the data, we capture the wrong scenario. If we use the minimum, say 6.7 in this scenario, we may capture this episode correctly, but consider the following: what if the transfusion occurred before the lowest and the threshold of 7 was therefore not followed.
At the same time we can't look back simply at the preceeding hemoglobin level for each transfusion because we would then condition on recieving treatment and would not be able to capture those who were never transfused correctly.
Before continuing lets look at some example data.
```
test_hb = hb_df.loc[hb_df.patientunitstayid == 242816, :]
test_tf = prbc_df.loc[prbc_df.patientunitstayid == 242816, :]
test_hb
test_tf
```
This patient was transfused multiple times.
At 91 minutes before ICU admission they had a hemoglobin level of 5.3, and we have no data to suggest they were transfused although it may be that treatments before the ICU are not captured.
At 344 minutes from ICU admission the level is 6. The next recorded level is 6.3 at 629 minutes from ICU admission. In between these two time points are three orders corresponding to transfusion. __The patient appears to not be responding to transfusion.__
Another transfusion is noted at 654 minutes from ICU admission, soon after the prior hemoglobin of 6.3 at 629 minutes from admission. The next recorded hemoglobin level is 7 at 945 minutes from ICU admission.
Between time 945 and 1230 another two transfusion events occur at 1223 and 1226; this is odd because they are much later than the 945 CBC and preceeds the next CBC only by 7 minutes. One might surmise that the timestamps are a bit off and that this 1230 CBC is a post-transfusion CBC, or that a transfusion was ordered to be given conditionally on the level of the CBC.
At 1599 we see one more transfusion, far after the 1230 CBC, and the next Hb we get is at 2344 and is a level of 7.2. However, looking forward we see that the next Hb level recorded goes up to 8.3 at 3113 minutes from admission and then stays stable.
The bigger question may be: __were hemoglobin levels driving any of the decision making at all?__ Lets look at why this patient was admitted in the first place.
```
query = query_schema + '''
SELECT * FROM patient WHERE patientunitstayid = 242816;
'''
pd.read_sql_query(query, con).T
```
This patient was admitted for an acute GI bleed, and was likely transfused as he was actively hemorrhaging. This would explain the timestamp discrepancy.
We will likely have to exclude patients actively hemorrhaging. Lets take a step back and see which patients got transfused.
```
query = query_schema + '''
WITH prbc AS (
SELECT t.patientunitstayid
FROM treatment t
WHERE LOWER(treatmentstring) LIKE '%prbc%'
)
SELECT * FROM patient p
INNER JOIN prbc
ON p.patientunitstayid = prbc.patientunitstayid;
'''
transfused_patients = pd.read_sql_query(query, con)
print(transfused_patients.shape)
display(transfused_patients.head())
```
We can then ask how many unique diagnoses there are.
```
admit_dx = pd.Categorical(transfused_patients.apacheadmissiondx)
unique_dx = admit_dx.value_counts()
unique_dx
```
There are 296, lets try and limit ourselves to common for now.
```
unique_dx[unique_dx > 100]
```
We can plot this data.
```
unique_dx[unique_dx > 100].plot(kind='pie', figsize=(10,10))
```
A large number of patients who recieve transfusion come in for acute GI bleed. I expect many of these patients will have a similar pattern of transfusion to the above patient example. Lets try a sepsis patient instead.
```
transfused_patients.loc[transfused_patients.apacheadmissiondx == 'Sepsis, pulmonary'].head()
test_hb = hb_df.loc[hb_df.patientunitstayid == 245240, :]
test_tf = prbc_df.loc[prbc_df.patientunitstayid == 245240, :]
test_hb
test_tf
```
This data is again very confusing. Before admission to the ICU this patient had normal hemoglobin levels. The first CBC in the ICU occurs ~7 hours into the stay and the patient has a hemoglobin of 7.7. A repeat is 8.5, and then there is a transfusion before the next recorded hemoglobin which returns as 8.4. The hemoglobin down trends from 8.4 to 7.5 with repeated sampling and then there is another transfusion before the final recorded value of 7.1. In both cases transfusion is done above threshold and a decrease in hemoglobin level follows.
For a last exploratory example, lets look at anemia.
```
transfused_patients.loc[transfused_patients.apacheadmissiondx == 'Anemia'].head()
test_hb = hb_df.loc[hb_df.patientunitstayid == 254027, :]
test_tf = prbc_df.loc[prbc_df.patientunitstayid == 254027, :]
test_hb
test_tf
```
This patient's admission diagnosis is _anemia_ and her initial hemoglobin ranges from 11.5 to 8.6 ahead of ICU entry. Her first Hb level in the ICU is 11.3. Between that level and her next level of 10.6 she is transfused. Again, this doesn't really make sense.
The most likely clinical trajectory is that she had a Hb level of 8.6 and was transfused ahead of ICU tranfer.
Similalry at time 545 her hemoglobin 10.6 and at time 1086 its 9.7. However, there is a transfusion at time 577.
|
github_jupyter
|
import numpy as np
import pandas as pd
import qgrid as qg
import psycopg2
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
dbname = 'eicu'
schema_name = 'eicu_crd'
query_schema = 'SET search_path TO ' + schema_name + ';'
con = psycopg2.connect(dbname=dbname)
hb_query = query_schema + '''
SELECT l.patientunitstayid, l.labresult AS hb, l.labresultoffset
FROM lab l
WHERE l.labname = 'Hgb'
ORDER BY l.patientunitstayid, l.labresultoffset ASC;
'''
hb_df = pd.read_sql_query(hb_query, con)
print(hb_df.shape)
hb_df.head()
prbc_query = query_schema + '''
SELECT t.patientunitstayid, t.treatmentoffset
FROM treatment t
WHERE LOWER(treatmentstring) LIKE '%prbc%'
ORDER BY t.patientunitstayid, t.treatmentoffset ASC;
'''
prbc_df = pd.read_sql_query(prbc_query, con)
print(prbc_df.shape)
prbc_df.head()
test_hb = hb_df.loc[hb_df.patientunitstayid == 242816, :]
test_tf = prbc_df.loc[prbc_df.patientunitstayid == 242816, :]
test_hb
test_tf
query = query_schema + '''
SELECT * FROM patient WHERE patientunitstayid = 242816;
'''
pd.read_sql_query(query, con).T
query = query_schema + '''
WITH prbc AS (
SELECT t.patientunitstayid
FROM treatment t
WHERE LOWER(treatmentstring) LIKE '%prbc%'
)
SELECT * FROM patient p
INNER JOIN prbc
ON p.patientunitstayid = prbc.patientunitstayid;
'''
transfused_patients = pd.read_sql_query(query, con)
print(transfused_patients.shape)
display(transfused_patients.head())
admit_dx = pd.Categorical(transfused_patients.apacheadmissiondx)
unique_dx = admit_dx.value_counts()
unique_dx
unique_dx[unique_dx > 100]
unique_dx[unique_dx > 100].plot(kind='pie', figsize=(10,10))
transfused_patients.loc[transfused_patients.apacheadmissiondx == 'Sepsis, pulmonary'].head()
test_hb = hb_df.loc[hb_df.patientunitstayid == 245240, :]
test_tf = prbc_df.loc[prbc_df.patientunitstayid == 245240, :]
test_hb
test_tf
transfused_patients.loc[transfused_patients.apacheadmissiondx == 'Anemia'].head()
test_hb = hb_df.loc[hb_df.patientunitstayid == 254027, :]
test_tf = prbc_df.loc[prbc_df.patientunitstayid == 254027, :]
test_hb
test_tf
| 0.178454 | 0.87982 |
```
import numpy as np
import math
from scipy.stats import skew, kurtosis, kurtosistest
import matplotlib.pyplot as plt
from scipy.stats import norm, t
import FinanceDataReader as fdr
ticker = '005930' # Samsung Electronics in KRX
rtn = fdr.DataReader(ticker, '2015-01-01', '2017-12-31')['Change']
mu_gaussian, sig_gaussian = norm.fit(rtn)
dx = 0.0001 # resolution
x = np.arange(-0.1, 0.1, dx)
# Gaussian fitting
gaussian_pdf = norm.pdf(x, mu_gaussian, sig_gaussian)
print("Gaussian mean = %.5f" % mu_gaussian)
print("Gaussian std = %.5f" % sig_gaussian)
print()
# Student t fitting
parm = t.fit(rtn)
nu, mu_t, sig_t = parm
nu = np.round(nu)
t_pdf = t.pdf(x, nu, mu_t, sig_t)
print("nu = %.2f" % nu)
print("Student t mean = %.5f" % mu_t)
print("Student t std = %.5f" % sig_t)
# Compute VaRs and CVaRs
h = 1
alpha = 0.01 # significance level
lev = 100*(1-alpha)
xanu = t.ppf(alpha, nu)
CVaR_n = alpha**-1 * norm.pdf(norm.ppf(alpha))*sig_gaussian - mu_gaussian
VaR_n = norm.ppf(1-alpha)*sig_gaussian - mu_gaussian
VaR_t = np.sqrt((nu-2)/nu) * t.ppf(1-alpha, nu)*sig_gaussian - h*mu_gaussian
CVaR_t = -1/alpha * (1-nu)**(-1) * (nu-2+xanu**2) * \
t.pdf(xanu, nu)*sig_gaussian - h*mu_gaussian
print("%g%% %g-day Gaussian VaR = %.2f%%" % (lev, h, VaR_n*100))
print("%g%% %g-day Gaussian CVaR = %.2f%%" % (lev, h, CVaR_n*100))
print("%g%% %g-day Student t VaR = %.2f%%" % (lev, h, VaR_t *100))
print("%g%% %g-day Student t CVaR = %.2f%%" % (lev, h, CVaR_t*100))
plt.figure(num=1, figsize=(11, 6))
grey = .77, .77, .77
# main figure
plt.hist(rtn, bins=50, normed=True, color=grey, edgecolor='none')
plt.axis("tight")
plt.plot(x, gaussian_pdf, 'b', label="Gaussian PDF")
plt.axis("tight")
plt.plot(x, t_pdf, 'g', label="Student t PDF")
plt.xlim([-0.2, 0.1])
plt.ylim([0, 50])
plt.legend(loc="best")
plt.xlabel("Daily Returns of Samsung Electronics")
plt.ylabel("Normalized Return Distribution")
# inset
a = plt.axes([.22, .35, .3, .4])
plt.hist(rtn, bins=50, normed=True, color=grey, edgecolor='none')
plt.plot(x, gaussian_pdf, 'b')
plt.plot(x, t_pdf, 'g')
# Student VaR line
plt.plot([-CVaR_t, -CVaR_t], [0, 3], c='r')
# Gaussian VaR line
plt.plot([-CVaR_n, -CVaR_n], [0, 4], c='b')
plt.text(-CVaR_n-0.015, 4.1, "Gaussian CVaR", color='b')
plt.text(-CVaR_t-0.0171, 3.1, "Student t CVaR", color='r')
plt.xlim([-0.09, -0.02])
plt.ylim([0, 5])
plt.show()
```
reference : http://www.quantatrisk.com/2016/12/08/conditional-value-at-risk-normal-student-t-var-model-python/
|
github_jupyter
|
import numpy as np
import math
from scipy.stats import skew, kurtosis, kurtosistest
import matplotlib.pyplot as plt
from scipy.stats import norm, t
import FinanceDataReader as fdr
ticker = '005930' # Samsung Electronics in KRX
rtn = fdr.DataReader(ticker, '2015-01-01', '2017-12-31')['Change']
mu_gaussian, sig_gaussian = norm.fit(rtn)
dx = 0.0001 # resolution
x = np.arange(-0.1, 0.1, dx)
# Gaussian fitting
gaussian_pdf = norm.pdf(x, mu_gaussian, sig_gaussian)
print("Gaussian mean = %.5f" % mu_gaussian)
print("Gaussian std = %.5f" % sig_gaussian)
print()
# Student t fitting
parm = t.fit(rtn)
nu, mu_t, sig_t = parm
nu = np.round(nu)
t_pdf = t.pdf(x, nu, mu_t, sig_t)
print("nu = %.2f" % nu)
print("Student t mean = %.5f" % mu_t)
print("Student t std = %.5f" % sig_t)
# Compute VaRs and CVaRs
h = 1
alpha = 0.01 # significance level
lev = 100*(1-alpha)
xanu = t.ppf(alpha, nu)
CVaR_n = alpha**-1 * norm.pdf(norm.ppf(alpha))*sig_gaussian - mu_gaussian
VaR_n = norm.ppf(1-alpha)*sig_gaussian - mu_gaussian
VaR_t = np.sqrt((nu-2)/nu) * t.ppf(1-alpha, nu)*sig_gaussian - h*mu_gaussian
CVaR_t = -1/alpha * (1-nu)**(-1) * (nu-2+xanu**2) * \
t.pdf(xanu, nu)*sig_gaussian - h*mu_gaussian
print("%g%% %g-day Gaussian VaR = %.2f%%" % (lev, h, VaR_n*100))
print("%g%% %g-day Gaussian CVaR = %.2f%%" % (lev, h, CVaR_n*100))
print("%g%% %g-day Student t VaR = %.2f%%" % (lev, h, VaR_t *100))
print("%g%% %g-day Student t CVaR = %.2f%%" % (lev, h, CVaR_t*100))
plt.figure(num=1, figsize=(11, 6))
grey = .77, .77, .77
# main figure
plt.hist(rtn, bins=50, normed=True, color=grey, edgecolor='none')
plt.axis("tight")
plt.plot(x, gaussian_pdf, 'b', label="Gaussian PDF")
plt.axis("tight")
plt.plot(x, t_pdf, 'g', label="Student t PDF")
plt.xlim([-0.2, 0.1])
plt.ylim([0, 50])
plt.legend(loc="best")
plt.xlabel("Daily Returns of Samsung Electronics")
plt.ylabel("Normalized Return Distribution")
# inset
a = plt.axes([.22, .35, .3, .4])
plt.hist(rtn, bins=50, normed=True, color=grey, edgecolor='none')
plt.plot(x, gaussian_pdf, 'b')
plt.plot(x, t_pdf, 'g')
# Student VaR line
plt.plot([-CVaR_t, -CVaR_t], [0, 3], c='r')
# Gaussian VaR line
plt.plot([-CVaR_n, -CVaR_n], [0, 4], c='b')
plt.text(-CVaR_n-0.015, 4.1, "Gaussian CVaR", color='b')
plt.text(-CVaR_t-0.0171, 3.1, "Student t CVaR", color='r')
plt.xlim([-0.09, -0.02])
plt.ylim([0, 5])
plt.show()
| 0.625438 | 0.5 |
```
import sqlite3
import pandas as pd
from urlparse import urlparse
import hashlib
from PIL import Image
import os
import re
from __future__ import unicode_literals
from nltk.stem.porter import PorterStemmer
import nltk
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import euclidean_distances
import fastcluster
from scipy.spatial.distance import squareform
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import fcluster, dendrogram
from collections import defaultdict
import ast
from scipy.sparse import coo_matrix, hstack
import numpy as np
from sklearn.preprocessing import MinMaxScaler
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
```
## Read data about the sites
```
con = sqlite3.connect('2018-12-03_segmentation_pilot/2018-12-03_segmentation_pilot.sqlite')
sites = pd.read_sql_query('''SELECT * from site_visits''', con)
sites.shape
list(sites.columns.values)
```
## Read data about the segments
```
segments = pd.read_sql_query('''SELECT * from segments''', con)
segments.shape
list(segments.columns.values)
```
For this analysis, only consider those segments with text.
```
segments = segments.loc[segments['inner_text'] != '']
```
Let's also ignore all segments with body tags
```
segments = segments.loc[segments['node_name'] != 'BODY']
segments.shape
```
Let's perform some pre-processing. First, let's swap all numbers with DPNUM placeholder.
```
segments['inner_text_processed'] = segments['inner_text'].str.replace(r'\d+', 'DPNUM')
segments['longest_text_processed'] = segments['longest_text'].str.replace(r'\d+', 'DPNUM')
```
Next let's consider the individual nodes, particularly those that were updated
```
def handle_node_update(gdata):
return gdata.drop_duplicates(subset=['inner_text_processed', 'longest_text_processed'], keep='last')
segments = segments.groupby(['visit_id']).apply(handle_node_update)
list(segments.columns.values)
segments.shape
```
## Join the two dataframes
```
dataset = segments.set_index('visit_id').join(sites.set_index('visit_id'), lsuffix='_1', rsuffix='_2')
dataset.shape
list(dataset.columns.values)
```
Let's get the visit_id back
```
dataset = dataset.reset_index()
list(dataset.columns.values)
```
Let's tokenize inner_text first:
```
stemmer = PorterStemmer()
def tokenize(line):
if (line is None):
line = ''
printable = set(string.printable)
line = ''.join(filter(lambda x: x in printable, line))
tokens = nltk.word_tokenize(line)
tokens = [f for f in tokens if f != '']
tokens = [stemmer.stem(f) for f in tokens]
return tokens
countVec = CountVectorizer(tokenizer=tokenize, binary=True).fit(dataset['inner_text_processed'])
```
What is the length of the vocabulary?
```
len(countVec.vocabulary_)
```
Let's create the bag of words representation.
```
lineVec = countVec.transform(dataset['inner_text_processed'])
lineVec.shape
```
First, scale the remaining columns:
```
scaler = MinMaxScaler()
cols = dataset[['num_buttons', 'num_imgs', 'num_anchors', 'top', 'left']]
cols = scaler.fit_transform(cols)
cols
```
Add these columns in
```
lineVec = hstack((lineVec, cols))
```
Shape of the vector?
```
lineVec.shape
```
Let's compute the euclidean distance
```
dist = euclidean_distances(lineVec)
```
Next, let's convert this to vector format. This is necessary as the linkage method below requires it in this format.
```
distVec = squareform(dist, checks = False)
res = fastcluster.linkage(distVec, method = 'ward', preserve_input = False)
dataset['cluster'] = fcluster(res, 5, criterion='distance')
dataset.to_csv('clusters.csv', encoding='utf-8', columns=['site_url', 'cluster', 'inner_text', 'top', 'left', 'width', 'height', 'time_stamp'], index=False)
dataset.shape
```
|
github_jupyter
|
import sqlite3
import pandas as pd
from urlparse import urlparse
import hashlib
from PIL import Image
import os
import re
from __future__ import unicode_literals
from nltk.stem.porter import PorterStemmer
import nltk
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import euclidean_distances
import fastcluster
from scipy.spatial.distance import squareform
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import fcluster, dendrogram
from collections import defaultdict
import ast
from scipy.sparse import coo_matrix, hstack
import numpy as np
from sklearn.preprocessing import MinMaxScaler
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
con = sqlite3.connect('2018-12-03_segmentation_pilot/2018-12-03_segmentation_pilot.sqlite')
sites = pd.read_sql_query('''SELECT * from site_visits''', con)
sites.shape
list(sites.columns.values)
segments = pd.read_sql_query('''SELECT * from segments''', con)
segments.shape
list(segments.columns.values)
segments = segments.loc[segments['inner_text'] != '']
segments = segments.loc[segments['node_name'] != 'BODY']
segments.shape
segments['inner_text_processed'] = segments['inner_text'].str.replace(r'\d+', 'DPNUM')
segments['longest_text_processed'] = segments['longest_text'].str.replace(r'\d+', 'DPNUM')
def handle_node_update(gdata):
return gdata.drop_duplicates(subset=['inner_text_processed', 'longest_text_processed'], keep='last')
segments = segments.groupby(['visit_id']).apply(handle_node_update)
list(segments.columns.values)
segments.shape
dataset = segments.set_index('visit_id').join(sites.set_index('visit_id'), lsuffix='_1', rsuffix='_2')
dataset.shape
list(dataset.columns.values)
dataset = dataset.reset_index()
list(dataset.columns.values)
stemmer = PorterStemmer()
def tokenize(line):
if (line is None):
line = ''
printable = set(string.printable)
line = ''.join(filter(lambda x: x in printable, line))
tokens = nltk.word_tokenize(line)
tokens = [f for f in tokens if f != '']
tokens = [stemmer.stem(f) for f in tokens]
return tokens
countVec = CountVectorizer(tokenizer=tokenize, binary=True).fit(dataset['inner_text_processed'])
len(countVec.vocabulary_)
lineVec = countVec.transform(dataset['inner_text_processed'])
lineVec.shape
scaler = MinMaxScaler()
cols = dataset[['num_buttons', 'num_imgs', 'num_anchors', 'top', 'left']]
cols = scaler.fit_transform(cols)
cols
lineVec = hstack((lineVec, cols))
lineVec.shape
dist = euclidean_distances(lineVec)
distVec = squareform(dist, checks = False)
res = fastcluster.linkage(distVec, method = 'ward', preserve_input = False)
dataset['cluster'] = fcluster(res, 5, criterion='distance')
dataset.to_csv('clusters.csv', encoding='utf-8', columns=['site_url', 'cluster', 'inner_text', 'top', 'left', 'width', 'height', 'time_stamp'], index=False)
dataset.shape
| 0.445047 | 0.717284 |
# Inference for Proportions
$$\newcommand{\ffrac}{\displaystyle \frac}
\newcommand{\Tran}[1]{{#1}^{\mathrm{T}}}
\newcommand{\d}[1]{\displaystyle{#1}}
\newcommand{\EE}[1]{\mathbb{E}\left[#1\right]}
\newcommand{\Var}[1]{\mathrm{Var}\left[#1\right]}
\newcommand{\using}[1]{\stackrel{\mathrm{#1}}{=}}
\newcommand{\I}[1]{\mathrm{I}\left( #1 \right)}
\newcommand{\N}[1]{\mathrm{N} \left( #1 \right)}$$
***
<center> Review </center>
When sample size is sufficiently large, more specificlly when $n p_0 \geq 10$, $n(1-p_0) \geq 10$, we have *count* $X$ and *portion* $\hat{p}$:
$$X \sim \N{np, \sqrt{np(1-p)}} ,\hat{p} \sim \N { p, \sqrt{\frac{p(1-p)} {n}}}$$
Now we need to estimate the parameter $p$. Similar to $z\text{-test}$, $\sigma_{\hat{p}} = \sqrt{\ffrac{p(1-p)} {n}}$, and $\text{SE}_{\hat{p}} = \sqrt{\ffrac{\hat{p}\left(1-\hat{p}\right)} {n}}$.
So that $m = z^* \times \text{SE}_{\hat{p}}$ and the **confident interval**: $\hat{p} \pm m = \hat{p} \pm z^* \times \sqrt{\ffrac{\hat{p}\left(1-\hat{p}\right)} {n}}$. $z^*$ is the critical value for the standard Normal density curve with area $C$ between $โ z^*$ and $z^*$.
***
And sometimes the prerequisites don't hold, we will apply the plus-four estimate.
**plus-four estimate** of the proportion: $\tilde{p} = \ffrac{2 + \text{number of success}} {n+4}$, and we have the **plus-four confidence interval** for $p$, it is $\tilde{p} \pm z^* \times\sqrt{\ffrac{\tilde{p}\left( 1-\tilde{p} \right)} {n+4}}$
## Significance Test for a Proportion
The hypothesis is like $H_0: p = p_0$, so that $\text{SE}_{\hat{p}} = \sqrt{\ffrac{p_0 \left( 1 - p_0 \right)} {n}}$ and the critical value: $z = \ffrac{\hat{p} - p} {\text{SE}_{\hat{p}}}$. About the $p\text{-value}$:
$$\begin{array}{cc}
\hline
H_a & p\text{-value} \\ \hline
p > p_0 & P(Z \geq z) \\
p < p_0 & P(Z \leq z) \\
p \neq p_0 & 2\cdot P(Z \geq \left|z\right|) \\ \hline
\end{array}$$
we can also use the formula inversely, so that we can find a sample size that satisfies the limited margin:
$$n = \left( \frac{z^*} {m} \right)^2 p^* \left( 1-p^* \right)$$
here $p^*$ is the guessed value for sample proportion, like $0.5$ or something, if you want the margin be equal to or less than a certain value.
# Inference for Regression
## Simple Linear Regression
We first see what the model looks like
- **population** part
- $X = $ Independent (Explanatory or Predictor) variable
- $Y = $ Dependent (Response) variable
- Model: $Y_i = \beta_0 + \beta_1 \cdot X_i + \varepsilon_i$
- Mean: $\mu_Y = \beta_0 + \beta_1 \cdot X$
- residual: $\varepsilon_i = \text{noise} \sim \N{0,\sigma}$
- Parameters
- $\mu_Y = $ mean response for a given $X$
- $\beta_0 = y\text{-intercept}$
- $\beta_1 = $ slope
- $\sigma = $ Standard deviation of the model, both $Y$ and the residual.
- **sample** part
- size: $n$
- Data: $\left( x_1,y_1\right),\left( x_2,y_2\right),\dots,\left( x_n,y_n\right)$
- Estimate: $\hat{y}_i = b_0 + b_1 \cdot x_i$
- Residual (error): $e_i = \hat{y}_i - y_i$
- Statistics
- $\hat{y} = $ estimate of the mean $\mu_Y$
- $b_0 = y\text{-intercept}$-estimate of $\beta_0$
- $b_1 = $ slope-estimate of $\beta_1$
- $s = $ Standard Error of estimate for $\sigma = \sqrt{\text{MSE}} = \text{RMSE}$
### Assumptions
1. The error terms $\varepsilon_i$ are *independent* and also, $\varepsilon_i \sim \N{0,\sigma^2}$
2. The underlying relationship between the $X$ and $Y$ is linear
### Estimated Regression Model
Regression Function: $\EE{Y_i\mid X_i} = \mu_Y = \beta_0 + \beta_1 \cdot X_i + \EE{\varepsilon_i} = \beta_0 + \beta_1 \cdot X_i$
Then the estimate: $\hat{Y}_{i} = b_0 + b_1 \cdot X_i$. Remember that the individual random error terms $e_i$ have a mean of $\mathbf{0}$
### Estimating the Parameters
Using the least-squares regression we can finally get the result: $\hat{y} = b_0 + b_1 \cdot x$, as the best estimate of the true regression line: $\mu_y = \beta_0 + \beta_1 \cdot x$.
- $\hat{y}$ is an unbiased estimate for mean response $\mu_y$
- $b_0$ is an unbiased estimate for intercept $\beta_0$
- $b_1$ is an unbiased estimate for slope $\beta_1$
The **population standard deviation** $\sigma$ for $y$ at any given value of $x$ represents the spread of the normal distribution of the $\varepsilon_i$ around the mean $\mu_y$. And for each **predicted value** $\hat{y}_i = b_0 + b_1 \cdot x_i$ there's a **residual** $y_i - \hat{y}_i$. The **regression standard error** $s$, for $n$ sample data points, is
$$s = \sqrt{\frac{\sum \text{residual}^2} {n-2}} = \sqrt{\frac{\sum \left( y_i - \hat{y}_i \right)^2} {n-2}}$$
and this $s$ is also the unbiased estimate of the regression standard deviation $\sigma = \text{RMSE} = \sqrt{\text{MSE}}$
### Checking the regression inference
- The relationship is linear in the population.
- The response varies Normally about the population regression line.
- Observations are independent.
- The standard deviation of the responses is the same for all values of $x$.
We can also check the residual plots.
### $CI$ for regression slope $\beta_1$
The estimator $b_1$ has a $t$ distribution with a degree of freedom $n-2$. The $CI$ for this parameter has the form $b_1 \pm t^* \times SE_{b_1}$
### Significance test for regression slope $\beta_1$
For the hypothesis: $H_0: \beta_1 = \text{hypothesized value}$, first we can calculate the test statistic or the critical value: $\ffrac{b_1 - \text{hypothesized value}} {SE_{b_1}}$. Then using Table with degree of freedom $n-2$ to find the $p\text{-value}$ by the rule
$$\begin{array}{cc}
\hline
H_a & p\text{-value} \\ \hline
\beta > \text{hypothesized value} & P(T \geq t) \\
\beta < \text{hypothesized value} & P(T \leq t) \\
\beta \neq \text{hypothesized value} & 2\cdot P(T \geq \left|t\right|) \\ \hline
\end{array}$$
### Testing the hypothesis of no relationship
$H_0:\beta_1 = 0$, $H_1: \beta_1 \neq 0$. Why this test? Because for the slope, we have $b_1 = r \cdot \ffrac{s_y} {s_x}$, which means testing $\beta_1 = 0$ is equivalent to testing the hypothesis of no correlation between $x$ and $y$ in the population.
Besides, this statistic is the same with testing $H_0: \rho = 0$, originally be $T = \ffrac{r\sqrt{n-2}} {\sqrt{1-r^2}}$
Note that $\beta_0$ normally has no practical interpretation so commonly people don't test the hypothesis on that.
### Analyse the JMP output
Slides are following, here are some very important formulae
- $\text{SST} = \text{SSM} + \text{SSE}$
- $\text{SST}$: Sum of squares of Total
- $\text{SSM}$: Sum of squares of Model
- $\text{SSE}$: Sum of squares of Error
- $\text{DF}_{\mathrm{T}} = \text{DF}_{\mathrm{M}} + \text{DF}_{\mathrm{E}}$
- $\text{DF}_{\mathrm{T}}$: Degree of freedom of Total
- $\text{DF}_{\mathrm{M}}$: Degree of freedom of Model
- $\text{DF}_{\mathrm{E}}$: Degree of freedom of Error$\\[0.7em]$
- $\text{MSM} = \ffrac{\text{SSM}} {\text{DFM}}\\[0.7em]$
- $\text{MSE} = \ffrac{\text{SSE}} {\text{DFE}}\\[0.7em]$
- $F\text{-ratio} = \ffrac{\text{MSM}} {\text{MSE}}\\[0.7em]$
- The standard deviation of $n$ residuals $e_i = y_i - \hat{y}_i$, $s$ is calculated by$\\[0.7em]$
$$\text{MSE} = s^2 = \frac{\sum e^2_i} {n-2} = \frac{\sum \left( y_i - \hat{y}_i \right)^2} {n-2} = \frac{\text{SSE}} {\text{DF}_{\text{E}}} = \text{MSE} \\[0.7em]$$
- $R^2 = \ffrac{\text{SSM}} {\text{SST}} = \ffrac{\sum \left( \hat{y}_i - \bar{y} \right)^2} {\sum \left( y_i - \bar{y} \right)^2} \\[0.7em]$
And other points needed to be pointed out
1. $R = \pm \sqrt{R^2}$, and the sign is the same with estimated slope $b_1$, or positive relation or negative relation
2. $R^2$ is also called the **Coefficient of Determination**, $R$ is also called the **Correlation coefficient**
3. $R^2$ can also means the percentage of variation in the dependent variable $Y$ that is explained by the regression with independent variable $X$



# Multiple Regression
## Inference
In multiple regression, the response variable $y$ depends on $p$ explanatory variables, $x_1, x_2, \dots, x_p$: $\mu_y = \beta_0 + \beta_1 \cdot x_1 + \cdots \beta_p \cdot x_p$. And the statistical model for this is: $y_i = \beta_0 + \beta_1 \cdot x_{i1} + \cdots +\beta_p \cdot x_{ip} + \varepsilon_i$.
The **mean response**, $\mu_y$ is the linear function of the explanatory variables; the deviation $\varepsilon_i$ are independent and follow the same normal disribution.
The estimators are $b_0, b_1, \dots, b_p$, the degree of freedom is $n-p-1$.
For the $CI$ part, it's basically the same method with simple regression.
### Significance test
$H_0: \beta_j \equiv 0$ against: One of them at least is not $0$. So that when we made it, we can at least draw the conclusion that itโs safe to throw away at least one of the variables. But the way to find the $p\text{-value}$ is still similar. The difference is: A significant $p\text{-value}$ doesnโt mean that all $p$ explanatory variables have a significant influence on $y$, only that at least one does.
## Case study
JMP output is in the last, several points before that is listed here.





|
github_jupyter
|
# Inference for Proportions
$$\newcommand{\ffrac}{\displaystyle \frac}
\newcommand{\Tran}[1]{{#1}^{\mathrm{T}}}
\newcommand{\d}[1]{\displaystyle{#1}}
\newcommand{\EE}[1]{\mathbb{E}\left[#1\right]}
\newcommand{\Var}[1]{\mathrm{Var}\left[#1\right]}
\newcommand{\using}[1]{\stackrel{\mathrm{#1}}{=}}
\newcommand{\I}[1]{\mathrm{I}\left( #1 \right)}
\newcommand{\N}[1]{\mathrm{N} \left( #1 \right)}$$
***
<center> Review </center>
When sample size is sufficiently large, more specificlly when $n p_0 \geq 10$, $n(1-p_0) \geq 10$, we have *count* $X$ and *portion* $\hat{p}$:
$$X \sim \N{np, \sqrt{np(1-p)}} ,\hat{p} \sim \N { p, \sqrt{\frac{p(1-p)} {n}}}$$
Now we need to estimate the parameter $p$. Similar to $z\text{-test}$, $\sigma_{\hat{p}} = \sqrt{\ffrac{p(1-p)} {n}}$, and $\text{SE}_{\hat{p}} = \sqrt{\ffrac{\hat{p}\left(1-\hat{p}\right)} {n}}$.
So that $m = z^* \times \text{SE}_{\hat{p}}$ and the **confident interval**: $\hat{p} \pm m = \hat{p} \pm z^* \times \sqrt{\ffrac{\hat{p}\left(1-\hat{p}\right)} {n}}$. $z^*$ is the critical value for the standard Normal density curve with area $C$ between $โ z^*$ and $z^*$.
***
And sometimes the prerequisites don't hold, we will apply the plus-four estimate.
**plus-four estimate** of the proportion: $\tilde{p} = \ffrac{2 + \text{number of success}} {n+4}$, and we have the **plus-four confidence interval** for $p$, it is $\tilde{p} \pm z^* \times\sqrt{\ffrac{\tilde{p}\left( 1-\tilde{p} \right)} {n+4}}$
## Significance Test for a Proportion
The hypothesis is like $H_0: p = p_0$, so that $\text{SE}_{\hat{p}} = \sqrt{\ffrac{p_0 \left( 1 - p_0 \right)} {n}}$ and the critical value: $z = \ffrac{\hat{p} - p} {\text{SE}_{\hat{p}}}$. About the $p\text{-value}$:
$$\begin{array}{cc}
\hline
H_a & p\text{-value} \\ \hline
p > p_0 & P(Z \geq z) \\
p < p_0 & P(Z \leq z) \\
p \neq p_0 & 2\cdot P(Z \geq \left|z\right|) \\ \hline
\end{array}$$
we can also use the formula inversely, so that we can find a sample size that satisfies the limited margin:
$$n = \left( \frac{z^*} {m} \right)^2 p^* \left( 1-p^* \right)$$
here $p^*$ is the guessed value for sample proportion, like $0.5$ or something, if you want the margin be equal to or less than a certain value.
# Inference for Regression
## Simple Linear Regression
We first see what the model looks like
- **population** part
- $X = $ Independent (Explanatory or Predictor) variable
- $Y = $ Dependent (Response) variable
- Model: $Y_i = \beta_0 + \beta_1 \cdot X_i + \varepsilon_i$
- Mean: $\mu_Y = \beta_0 + \beta_1 \cdot X$
- residual: $\varepsilon_i = \text{noise} \sim \N{0,\sigma}$
- Parameters
- $\mu_Y = $ mean response for a given $X$
- $\beta_0 = y\text{-intercept}$
- $\beta_1 = $ slope
- $\sigma = $ Standard deviation of the model, both $Y$ and the residual.
- **sample** part
- size: $n$
- Data: $\left( x_1,y_1\right),\left( x_2,y_2\right),\dots,\left( x_n,y_n\right)$
- Estimate: $\hat{y}_i = b_0 + b_1 \cdot x_i$
- Residual (error): $e_i = \hat{y}_i - y_i$
- Statistics
- $\hat{y} = $ estimate of the mean $\mu_Y$
- $b_0 = y\text{-intercept}$-estimate of $\beta_0$
- $b_1 = $ slope-estimate of $\beta_1$
- $s = $ Standard Error of estimate for $\sigma = \sqrt{\text{MSE}} = \text{RMSE}$
### Assumptions
1. The error terms $\varepsilon_i$ are *independent* and also, $\varepsilon_i \sim \N{0,\sigma^2}$
2. The underlying relationship between the $X$ and $Y$ is linear
### Estimated Regression Model
Regression Function: $\EE{Y_i\mid X_i} = \mu_Y = \beta_0 + \beta_1 \cdot X_i + \EE{\varepsilon_i} = \beta_0 + \beta_1 \cdot X_i$
Then the estimate: $\hat{Y}_{i} = b_0 + b_1 \cdot X_i$. Remember that the individual random error terms $e_i$ have a mean of $\mathbf{0}$
### Estimating the Parameters
Using the least-squares regression we can finally get the result: $\hat{y} = b_0 + b_1 \cdot x$, as the best estimate of the true regression line: $\mu_y = \beta_0 + \beta_1 \cdot x$.
- $\hat{y}$ is an unbiased estimate for mean response $\mu_y$
- $b_0$ is an unbiased estimate for intercept $\beta_0$
- $b_1$ is an unbiased estimate for slope $\beta_1$
The **population standard deviation** $\sigma$ for $y$ at any given value of $x$ represents the spread of the normal distribution of the $\varepsilon_i$ around the mean $\mu_y$. And for each **predicted value** $\hat{y}_i = b_0 + b_1 \cdot x_i$ there's a **residual** $y_i - \hat{y}_i$. The **regression standard error** $s$, for $n$ sample data points, is
$$s = \sqrt{\frac{\sum \text{residual}^2} {n-2}} = \sqrt{\frac{\sum \left( y_i - \hat{y}_i \right)^2} {n-2}}$$
and this $s$ is also the unbiased estimate of the regression standard deviation $\sigma = \text{RMSE} = \sqrt{\text{MSE}}$
### Checking the regression inference
- The relationship is linear in the population.
- The response varies Normally about the population regression line.
- Observations are independent.
- The standard deviation of the responses is the same for all values of $x$.
We can also check the residual plots.
### $CI$ for regression slope $\beta_1$
The estimator $b_1$ has a $t$ distribution with a degree of freedom $n-2$. The $CI$ for this parameter has the form $b_1 \pm t^* \times SE_{b_1}$
### Significance test for regression slope $\beta_1$
For the hypothesis: $H_0: \beta_1 = \text{hypothesized value}$, first we can calculate the test statistic or the critical value: $\ffrac{b_1 - \text{hypothesized value}} {SE_{b_1}}$. Then using Table with degree of freedom $n-2$ to find the $p\text{-value}$ by the rule
$$\begin{array}{cc}
\hline
H_a & p\text{-value} \\ \hline
\beta > \text{hypothesized value} & P(T \geq t) \\
\beta < \text{hypothesized value} & P(T \leq t) \\
\beta \neq \text{hypothesized value} & 2\cdot P(T \geq \left|t\right|) \\ \hline
\end{array}$$
### Testing the hypothesis of no relationship
$H_0:\beta_1 = 0$, $H_1: \beta_1 \neq 0$. Why this test? Because for the slope, we have $b_1 = r \cdot \ffrac{s_y} {s_x}$, which means testing $\beta_1 = 0$ is equivalent to testing the hypothesis of no correlation between $x$ and $y$ in the population.
Besides, this statistic is the same with testing $H_0: \rho = 0$, originally be $T = \ffrac{r\sqrt{n-2}} {\sqrt{1-r^2}}$
Note that $\beta_0$ normally has no practical interpretation so commonly people don't test the hypothesis on that.
### Analyse the JMP output
Slides are following, here are some very important formulae
- $\text{SST} = \text{SSM} + \text{SSE}$
- $\text{SST}$: Sum of squares of Total
- $\text{SSM}$: Sum of squares of Model
- $\text{SSE}$: Sum of squares of Error
- $\text{DF}_{\mathrm{T}} = \text{DF}_{\mathrm{M}} + \text{DF}_{\mathrm{E}}$
- $\text{DF}_{\mathrm{T}}$: Degree of freedom of Total
- $\text{DF}_{\mathrm{M}}$: Degree of freedom of Model
- $\text{DF}_{\mathrm{E}}$: Degree of freedom of Error$\\[0.7em]$
- $\text{MSM} = \ffrac{\text{SSM}} {\text{DFM}}\\[0.7em]$
- $\text{MSE} = \ffrac{\text{SSE}} {\text{DFE}}\\[0.7em]$
- $F\text{-ratio} = \ffrac{\text{MSM}} {\text{MSE}}\\[0.7em]$
- The standard deviation of $n$ residuals $e_i = y_i - \hat{y}_i$, $s$ is calculated by$\\[0.7em]$
$$\text{MSE} = s^2 = \frac{\sum e^2_i} {n-2} = \frac{\sum \left( y_i - \hat{y}_i \right)^2} {n-2} = \frac{\text{SSE}} {\text{DF}_{\text{E}}} = \text{MSE} \\[0.7em]$$
- $R^2 = \ffrac{\text{SSM}} {\text{SST}} = \ffrac{\sum \left( \hat{y}_i - \bar{y} \right)^2} {\sum \left( y_i - \bar{y} \right)^2} \\[0.7em]$
And other points needed to be pointed out
1. $R = \pm \sqrt{R^2}$, and the sign is the same with estimated slope $b_1$, or positive relation or negative relation
2. $R^2$ is also called the **Coefficient of Determination**, $R$ is also called the **Correlation coefficient**
3. $R^2$ can also means the percentage of variation in the dependent variable $Y$ that is explained by the regression with independent variable $X$



# Multiple Regression
## Inference
In multiple regression, the response variable $y$ depends on $p$ explanatory variables, $x_1, x_2, \dots, x_p$: $\mu_y = \beta_0 + \beta_1 \cdot x_1 + \cdots \beta_p \cdot x_p$. And the statistical model for this is: $y_i = \beta_0 + \beta_1 \cdot x_{i1} + \cdots +\beta_p \cdot x_{ip} + \varepsilon_i$.
The **mean response**, $\mu_y$ is the linear function of the explanatory variables; the deviation $\varepsilon_i$ are independent and follow the same normal disribution.
The estimators are $b_0, b_1, \dots, b_p$, the degree of freedom is $n-p-1$.
For the $CI$ part, it's basically the same method with simple regression.
### Significance test
$H_0: \beta_j \equiv 0$ against: One of them at least is not $0$. So that when we made it, we can at least draw the conclusion that itโs safe to throw away at least one of the variables. But the way to find the $p\text{-value}$ is still similar. The difference is: A significant $p\text{-value}$ doesnโt mean that all $p$ explanatory variables have a significant influence on $y$, only that at least one does.
## Case study
JMP output is in the last, several points before that is listed here.





| 0.778565 | 0.987179 |
# Exercise Set 3: Strings, requests and APIs
*Morning, August 13, 2019*
In this exercise set you will be working with collecting from the web. We will start out with some basic string operations and build on that to make a query for fetching data.
In addition to DataCamp, you might find [this page](https://pythonprogramming.net/string-concatenation-formatting-intermediate-python-tutorial/) on pythonprogramming.net useful. [This page](https://www.python-course.eu/python3_sequential_data_types.php) give an introduction to the basics of strings and their related data types.
## Exercise Section 3.1: Basic string operations and dictionaries
Strings have multiple operations and functions associated. In this exercise we investigate a few of these. We also explore the sequence form of a string and how it can be sliced and accessed via indices.
> **Ex. 3.1.1**: Let `s1='Chameleon'` and `s2='ham'`. Check whether the string `s2` is a substring of `s1`. Is `'hello'` a substring `'goodbye'`?
> *Hint*: One string operation is to check whether a string `S` contains a substring `T`, this can be done with the `in` operator: `S in T`.
```
# [Answer to Ex. 3.1.1]
# EXAMPLE ANSWER
s1 = 'Chameleon'
s2 = 'ham'
if s2 in s1:
print("ham is a substring of Chameleon... See? C-ham-eleon")
```
> **Ex. 3.1.2**: From the string `s1` select the last four characters. What is the index of the character `a` in `s1`?
> *Hint*: We can selecting a substring by slicing it with the `[]` notation, from the start to end where start is included and end is excluded. Recall that Python has zero-based indexing, see explanation [here](https://softwareengineering.stackexchange.com/questions/110804/why-are-zero-based-arrays-the-norm).
```
# [Answer to Ex. 3.1.2]
# EXAMPLE ANSWER
last_four = s1[-4:]
print("The last four characters are '{l}', the 'a' sits at index {a}, since Python is 0-indexed".format(l= last_four, a = s1.find('a')))
```
In addition to the techniques above strings are equipped with an array of _methods_, for solving more complex tasks. For example the `str.join(list)` method will insert a string in between each element of a list. Oppositely `str1.split(str2)` splits `str1` into a list. `.strip()` removes spaces in the beginning and end of a word and `.format()` fills in specified blanks in a string. Below we illustrate the use of each function
```python
>>> ','.join(['a','b'])
'a,b'
>>> ' Hello World! '.strip()
'Hello World!'
>>> 'Hello {w}'.format(w='World!')
'Hello World!'
>>> 'a,b,c'.split(',')
['a','b','c']
```
> **Ex. 3.1.3:** Let `l1 = ['r ', 'Is', '>', ' < ', 'g ', '?']`. Create from `l1` the sentence "Is r > g?" using your knowledge about string formatting. Make sure there is only one space in between worlds.
>
>> _Hint:_ You should be able to combine the above informations to solve this exercise.
```
# [Answer to Ex. 3.1.3]
# HANDIN
```
### Dictionaries
Dictionaries (or simply dicts) are a central building block of python. Python dicts are constructed from pairs of keys and values making them extremely versatile for data storage. Furthermore dicts correspond directly to the json file format.
> **Ex. 3.1.4**: Create an empty dictionary `words` using the `dict()`function. Then add each of the words in `['animal', 'coffee', 'python', 'unit', 'knowledge', 'tread', 'arise']` as a key, with the value being a boolean indicator for whether the word begins with a vowel. The results should look like `{'bacon': False, 'asynchronous': True ...}`
>
>> _Hint:_ You might want co first construct a function that asseses whether a given word begins with a vowel or not.
```
# [Answer to Ex. 3.1.4]
# HANDIN
```
> **Ex. 3.1.5:** Loop through the dictionary `words`. In each iteration you should print a proper sentence stating if the current word begins with a vowel or not.
> _Hint:_ You can loop through both keys and values simultaneously with the `.items()` method. [This](https://www.tutorialspoint.com/python/python_dictionary.htm) might help you.
```
# [Answer to Ex. 3.1.5]
# EXAMPLE ANSWER
for w,b in words.items():
if b:
print('{word} begins with a vowel'.format(word = w))
else:
print('{word} does not begin with a vowel'.format(word = w))
```
<br>
## Exercise Section 3.2: The punk API
The [punk API](https://punkapi.com/) serves information about _beers_. It is a well made and well documented API which makes it great for learning about APIs.
> **Ex. 3.2.1:** Read the documentation on the Punk API available [here](https://punkapi.com/documentation/v2). What is the server url (i.e. root endpoint) of the Punk API? Does it require authentication? Then use the Punk API to make a request for beers brewed before December, 2008 with an ABV of at least 8.
```
# [Answer to Ex. 3.2.1]
# EXAMPLE ANSWER
# Server URL is 'https://api.punkapi.com/v2/'
# No authentication required
import requests
response = requests.get('https://api.punkapi.com/v2/beers?brewed_before=12-2008&abv_gt=8')
```
> **Ex. 3.2.2:** What object type is the API's JSON response? What about the individual items in the container? Convert the response object to a suitable format and answer the following questions:
>> 1) How many beers are in the JSON object?
>>
>> 2) Print the names of the beers in the JSON object using lower case characters.
>>
>> 3) Select the beer called Paradox Islay from the JSON object.
>>
>> 4) Which hop ingredients does the Paradox Islay contain?
```
# [Answer to Ex. 3.2.2]
# EXAMPLE ANSWER
# format is json (see documentation)
beers = response.json()
# 1) How many beers are in the JSON object?
len(beers)
# 2) Print the names of the beers in the JSON object using lower case characters.
print('Beer names:', [b['name'].lower() for b in beers])
# 3) Select the beer called Paradox Islay from the JSON object.
print('Paradox Islay is the 2nd entry, i.e. index 1.')
b = beers[1]
# 4) Which hop ingredients does the Paradox Islay contain?
print('Ingredients in Paradox Islay:', set(i['name'] for i in b['ingredients']['hops']))
```
> **Ex. 3.2.3:** Save the beers as a JSON file on your machine.
> _Hint:_ you might want to take a look at the [json](https://docs.python.org/3/library/json.html) module.
```
# [Answer to Ex. 3.2.3]
# EXAMPLE ANSWER
import json
with open('beers.json', 'w') as f:
f.write(json.dumps(beers))
```
<br>
## Exercise Section 3.3: The DST API
DST (Statistics Denmark) provide an API access to their aggregate data. For developers they supply a [console](https://api.statbank.dk/console) for testing. In this exercise we will first code up a simple script which can collect data from the DST API, and then introduce the [PyDST](https://kristianuruplarsen.github.io/PyDST/) package.
> **Ex 3.3.1:** Use the API console to construct a GET request which retrieves the table FOLK1A split by quarter. The return should be in JSON format. We want all available dates.
>
>Then write a function `construct_link()` which takes as inputs: a table ID (e.g. `'FOLK1A'`) and a list of strings like `['var1=*', 'var2=somevalue']`. The function should return the proper URL for getting a dataset with the specified variables (e.g. in this case all levels of var1, but only where var2=somevalue).
> _Hint:_ The time variable is called 'tid'. To select all available values, set the value-id to '*'. Spend a little time with the console to get a sense of how the URLs are constructed.
```
# [Answer to Ex. 3.3.1]
# EXAMPLE ANSWER
# This is the manually constructed link
'https://api.statbank.dk/v1/data/FOLK1A/JSONSTAT?lang=en&Tid=*'
# This function will do it for you
def construct_link(table_id, variables):
base = 'https://api.statbank.dk/v1/data/{id}/JSONSTAT?lang=en'.format(id = table_id)
for var in variables:
base += '&{v}'.format(v = var)
return base
construct_link('FOLK1A', ['Tid=*'])
```
When executing the request in the console you should get a json file as output. Next lets write some code to load these json files directly into python.
> **Ex. 3.3.2:** use the `requests` module (get it with `pip install requests`) and `construct_link()` to request birth data from the "FOD" table. Get all available years (variable "Tid"), but only female births (BARNKON=P) . Unpack the json payload and store the result. Wrap the whole thing in a function which takes an url as input and returns the corresponding output.
> _Hint:_ The `requests.response` object has a `.json()` method.
```
# [Answer to Ex. 3.3.2]
# HANDIN
```
> **Ex. 3.3.3:** Extract the number of girls born each year. Store the results as a list.
```
# [Answer to Ex. 3.3.3]
# EXAMPLE ANSWER
girls = data['value']
girls
```
> **Ex.3.3.4:** Repeat 3.3.2 and 3.3.3 but this time only get boy births (BARNKON=D). Store the numbers in a new list and use the `plot_births` (supplied below) function to plot the data. If you don't already have matplotlib installed run `pip install matplotlib`.
```
# Just run this once, do not change it.
import matplotlib.pyplot as plt
def plot_births(boys, girls):
""" Plots lineplot of the number of births split by gender.
Args:
boys: a list of boy births by year
girls: a list of girl births by year
"""
if not len(boys) == len(girls):
raise ValueError('There must be the same number of observations for boys and girls')
labels = ['{y}'.format(y=year) for year in range(1973,2018)]
plt.plot(range(len(boys)), boys, color = 'blue', label = 'boys')
plt.plot(range(len(boys)), girls, color = 'red', label = 'girls')
plt.xticks([i for i in range(len(boys)) if i%4 == 0], [l for i,l in zip(range(len(boys)),labels) if i%4 == 0],
rotation = 'vertical')
plt.legend()
plt.show()
# [Answer to Ex. 3.3.4]
# EXAMPLE ANSWER
s2 = construct_link('FOD', ['Tid=*', 'BARNKON=D'])
boys = send_GET_request(s2)['value']
plot_births(boys, girls)
```
>**(Bonus question) Ex. 3.3.5:** Go to [https://kristianuruplarsen.github.io/PyDST/](https://kristianuruplarsen.github.io/PyDST/) follow the installation instructions and import PyDST. Try to replicate the birth figure from 3.3.4 using PyDST. Use [the documentation](https://kristianuruplarsen.github.io/PyDST/connection) to learn how the package works.
> _Hint:_ PyDST does not use json or dicts as its primary data format, instead it uses pandas DataFrames. When you install PyDST it will install pandas as a dependency. If this doesn't work simply run `pip install pandas` in your console. DataFrames are very intuitive to work with, for example accessing a column named 'name' is simply `data['name']` or `data.name`.
>
> In the next session you will learn more about pandas and DataFrames. If you are more comfortable with dicts, the data_response class has a `.dict` method you can use.
```
# [Answer to Ex. 3.3.5]
# EXAMPLE ANSWER
import PyDST
import seaborn as sns; sns.set_style("whitegrid")
conn = PyDST.connection(language = 'en')
resp = conn.get_data('FOD',
variables = ['Tid', 'BARNKON'],
values = {'Tid': ['*'], 'BARNKON': ['*']}
)
data = resp.df
sns.lineplot('TID', 'INDHOLD', data = data, hue = 'BARNKON', markers=True, palette = ['blue', 'red'])
```
|
github_jupyter
|
# [Answer to Ex. 3.1.1]
# EXAMPLE ANSWER
s1 = 'Chameleon'
s2 = 'ham'
if s2 in s1:
print("ham is a substring of Chameleon... See? C-ham-eleon")
# [Answer to Ex. 3.1.2]
# EXAMPLE ANSWER
last_four = s1[-4:]
print("The last four characters are '{l}', the 'a' sits at index {a}, since Python is 0-indexed".format(l= last_four, a = s1.find('a')))
>>> ','.join(['a','b'])
'a,b'
>>> ' Hello World! '.strip()
'Hello World!'
>>> 'Hello {w}'.format(w='World!')
'Hello World!'
>>> 'a,b,c'.split(',')
['a','b','c']
# [Answer to Ex. 3.1.3]
# HANDIN
# [Answer to Ex. 3.1.4]
# HANDIN
# [Answer to Ex. 3.1.5]
# EXAMPLE ANSWER
for w,b in words.items():
if b:
print('{word} begins with a vowel'.format(word = w))
else:
print('{word} does not begin with a vowel'.format(word = w))
# [Answer to Ex. 3.2.1]
# EXAMPLE ANSWER
# Server URL is 'https://api.punkapi.com/v2/'
# No authentication required
import requests
response = requests.get('https://api.punkapi.com/v2/beers?brewed_before=12-2008&abv_gt=8')
# [Answer to Ex. 3.2.2]
# EXAMPLE ANSWER
# format is json (see documentation)
beers = response.json()
# 1) How many beers are in the JSON object?
len(beers)
# 2) Print the names of the beers in the JSON object using lower case characters.
print('Beer names:', [b['name'].lower() for b in beers])
# 3) Select the beer called Paradox Islay from the JSON object.
print('Paradox Islay is the 2nd entry, i.e. index 1.')
b = beers[1]
# 4) Which hop ingredients does the Paradox Islay contain?
print('Ingredients in Paradox Islay:', set(i['name'] for i in b['ingredients']['hops']))
# [Answer to Ex. 3.2.3]
# EXAMPLE ANSWER
import json
with open('beers.json', 'w') as f:
f.write(json.dumps(beers))
# [Answer to Ex. 3.3.1]
# EXAMPLE ANSWER
# This is the manually constructed link
'https://api.statbank.dk/v1/data/FOLK1A/JSONSTAT?lang=en&Tid=*'
# This function will do it for you
def construct_link(table_id, variables):
base = 'https://api.statbank.dk/v1/data/{id}/JSONSTAT?lang=en'.format(id = table_id)
for var in variables:
base += '&{v}'.format(v = var)
return base
construct_link('FOLK1A', ['Tid=*'])
# [Answer to Ex. 3.3.2]
# HANDIN
# [Answer to Ex. 3.3.3]
# EXAMPLE ANSWER
girls = data['value']
girls
# Just run this once, do not change it.
import matplotlib.pyplot as plt
def plot_births(boys, girls):
""" Plots lineplot of the number of births split by gender.
Args:
boys: a list of boy births by year
girls: a list of girl births by year
"""
if not len(boys) == len(girls):
raise ValueError('There must be the same number of observations for boys and girls')
labels = ['{y}'.format(y=year) for year in range(1973,2018)]
plt.plot(range(len(boys)), boys, color = 'blue', label = 'boys')
plt.plot(range(len(boys)), girls, color = 'red', label = 'girls')
plt.xticks([i for i in range(len(boys)) if i%4 == 0], [l for i,l in zip(range(len(boys)),labels) if i%4 == 0],
rotation = 'vertical')
plt.legend()
plt.show()
# [Answer to Ex. 3.3.4]
# EXAMPLE ANSWER
s2 = construct_link('FOD', ['Tid=*', 'BARNKON=D'])
boys = send_GET_request(s2)['value']
plot_births(boys, girls)
# [Answer to Ex. 3.3.5]
# EXAMPLE ANSWER
import PyDST
import seaborn as sns; sns.set_style("whitegrid")
conn = PyDST.connection(language = 'en')
resp = conn.get_data('FOD',
variables = ['Tid', 'BARNKON'],
values = {'Tid': ['*'], 'BARNKON': ['*']}
)
data = resp.df
sns.lineplot('TID', 'INDHOLD', data = data, hue = 'BARNKON', markers=True, palette = ['blue', 'red'])
| 0.551815 | 0.990301 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.